1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-28 14:41:10 +01:00

Add option to ignore pods with PVCs from eviction

This commit is contained in:
Mike Dame
2021-01-11 11:19:02 -05:00
parent 19424f4119
commit c1a63a557a
20 changed files with 107 additions and 1 deletions

View File

@@ -111,6 +111,7 @@ parameters associated with the strategies can be configured too. By default, all
The policy also includes common configuration for all the strategies: The policy also includes common configuration for all the strategies:
- `nodeSelector` - limiting the nodes which are processed - `nodeSelector` - limiting the nodes which are processed
- `evictLocalStoragePods` - allowing to evict pods with local storage - `evictLocalStoragePods` - allowing to evict pods with local storage
- `ignorePvcPods` - set whether PVC pods should be evicted or ignored (defaults to `false`)
- `maxNoOfPodsToEvictPerNode` - maximum number of pods evicted from each node (summed through all strategies) - `maxNoOfPodsToEvictPerNode` - maximum number of pods evicted from each node (summed through all strategies)
```yaml ```yaml
@@ -119,6 +120,7 @@ kind: "DeschedulerPolicy"
nodeSelector: prod=dev nodeSelector: prod=dev
evictLocalStoragePods: true evictLocalStoragePods: true
maxNoOfPodsToEvictPerNode: 40 maxNoOfPodsToEvictPerNode: 40
ignorePvcPods: false
strategies: strategies:
... ...
``` ```
@@ -497,9 +499,10 @@ When the descheduler decides to evict pods from a node, it employs the following
never evicted because these pods won't be recreated. never evicted because these pods won't be recreated.
* Pods associated with DaemonSets are never evicted. * Pods associated with DaemonSets are never evicted.
* Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set) * Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set)
* Pods with PVCs are evicted unless `ignorePvcPods: true` is set.
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority, * In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
best effort pods are evicted before burstable and guaranteed pods. best effort pods are evicted before burstable and guaranteed pods.
* All types of pods with the annotation descheduler.alpha.kubernetes.io/evict are evicted. This * All types of pods with the annotation `descheduler.alpha.kubernetes.io/evict` are eligible for eviction. This
annotation is used to override checks which prevent eviction and users can select which pod is evicted. annotation is used to override checks which prevent eviction and users can select which pod is evicted.
Users should know how and if the pod will be recreated. Users should know how and if the pod will be recreated.

View File

@@ -35,6 +35,9 @@ type DeschedulerPolicy struct {
// EvictLocalStoragePods allows pods using local storage to be evicted. // EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods *bool EvictLocalStoragePods *bool
// IgnorePVCPods prevents pods with PVCs from being evicted.
IgnorePVCPods *bool
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node. // MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *int MaxNoOfPodsToEvictPerNode *int
} }

View File

@@ -35,6 +35,9 @@ type DeschedulerPolicy struct {
// EvictLocalStoragePods allows pods using local storage to be evicted. // EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods *bool `json:"evictLocalStoragePods,omitempty"` EvictLocalStoragePods *bool `json:"evictLocalStoragePods,omitempty"`
// IgnorePVCPods prevents pods with PVCs from being evicted.
IgnorePVCPods *bool `json:"ignorePvcPods,omitempty"`
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node. // MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *int `json:"maxNoOfPodsToEvictPerNode,omitempty"` MaxNoOfPodsToEvictPerNode *int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
} }

View File

@@ -122,6 +122,7 @@ func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Desched
out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies)) out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies))
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector)) out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods)) out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode)) out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
return nil return nil
} }
@@ -135,6 +136,7 @@ func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.Des
out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies)) out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies))
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector)) out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods)) out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode)) out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
return nil return nil
} }

View File

@@ -45,6 +45,11 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(bool) *out = new(bool)
**out = **in **out = **in
} }
if in.IgnorePVCPods != nil {
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
*out = new(bool)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNode != nil { if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(int) *out = new(int)

View File

@@ -45,6 +45,11 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(bool) *out = new(bool)
**out = **in **out = **in
} }
if in.IgnorePVCPods != nil {
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
*out = new(bool)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNode != nil { if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(int) *out = new(int)

View File

@@ -50,6 +50,9 @@ type DeschedulerConfiguration struct {
// EvictLocalStoragePods allows pods using local storage to be evicted. // EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods bool EvictLocalStoragePods bool
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
IgnorePVCPods bool
// Logging specifies the options of logging. // Logging specifies the options of logging.
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information. // Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
Logging componentbaseconfig.LoggingConfiguration Logging componentbaseconfig.LoggingConfiguration

View File

@@ -50,6 +50,9 @@ type DeschedulerConfiguration struct {
// EvictLocalStoragePods allows pods using local storage to be evicted. // EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"` EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
IgnorePVCPods bool `json:"ignorePvcPods,omitempty"`
// Logging specifies the options of logging. // Logging specifies the options of logging.
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information. // Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
Logging componentbaseconfig.LoggingConfiguration `json:"logging,omitempty"` Logging componentbaseconfig.LoggingConfiguration `json:"logging,omitempty"`

View File

@@ -56,6 +56,7 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
out.NodeSelector = in.NodeSelector out.NodeSelector = in.NodeSelector
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods out.EvictLocalStoragePods = in.EvictLocalStoragePods
out.IgnorePVCPods = in.IgnorePVCPods
out.Logging = in.Logging out.Logging = in.Logging
return nil return nil
} }
@@ -73,6 +74,7 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
out.NodeSelector = in.NodeSelector out.NodeSelector = in.NodeSelector
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods out.EvictLocalStoragePods = in.EvictLocalStoragePods
out.IgnorePVCPods = in.IgnorePVCPods
out.Logging = in.Logging out.Logging = in.Logging
return nil return nil
} }

View File

@@ -90,6 +90,11 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods
} }
ignorePvcPods := false
if deschedulerPolicy.IgnorePVCPods != nil {
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
}
maxNoOfPodsToEvictPerNode := rs.MaxNoOfPodsToEvictPerNode maxNoOfPodsToEvictPerNode := rs.MaxNoOfPodsToEvictPerNode
if deschedulerPolicy.MaxNoOfPodsToEvictPerNode != nil { if deschedulerPolicy.MaxNoOfPodsToEvictPerNode != nil {
maxNoOfPodsToEvictPerNode = *deschedulerPolicy.MaxNoOfPodsToEvictPerNode maxNoOfPodsToEvictPerNode = *deschedulerPolicy.MaxNoOfPodsToEvictPerNode
@@ -116,6 +121,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
maxNoOfPodsToEvictPerNode, maxNoOfPodsToEvictPerNode,
nodes, nodes,
evictLocalStoragePods, evictLocalStoragePods,
ignorePvcPods,
) )
for name, f := range strategyFuncs { for name, f := range strategyFuncs {

View File

@@ -51,6 +51,7 @@ type PodEvictor struct {
maxPodsToEvictPerNode int maxPodsToEvictPerNode int
nodepodCount nodePodEvictedCount nodepodCount nodePodEvictedCount
evictLocalStoragePods bool evictLocalStoragePods bool
ignorePvcPods bool
} }
func NewPodEvictor( func NewPodEvictor(
@@ -60,6 +61,7 @@ func NewPodEvictor(
maxPodsToEvictPerNode int, maxPodsToEvictPerNode int,
nodes []*v1.Node, nodes []*v1.Node,
evictLocalStoragePods bool, evictLocalStoragePods bool,
ignorePvcPods bool,
) *PodEvictor { ) *PodEvictor {
var nodePodCount = make(nodePodEvictedCount) var nodePodCount = make(nodePodEvictedCount)
for _, node := range nodes { for _, node := range nodes {
@@ -74,6 +76,7 @@ func NewPodEvictor(
maxPodsToEvictPerNode: maxPodsToEvictPerNode, maxPodsToEvictPerNode: maxPodsToEvictPerNode,
nodepodCount: nodePodCount, nodepodCount: nodePodCount,
evictLocalStoragePods: evictLocalStoragePods, evictLocalStoragePods: evictLocalStoragePods,
ignorePvcPods: ignorePvcPods,
} }
} }
@@ -189,6 +192,14 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
return nil return nil
}) })
} }
if pe.ignorePvcPods {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
if IsPodWithPVC(pod) {
return fmt.Errorf("pod has a PVC and descheduler is configured to ignore PVC pods")
}
return nil
})
}
if options.priority != nil { if options.priority != nil {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error { ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
if IsPodEvictableBasedOnPriority(pod, *options.priority) { if IsPodEvictableBasedOnPriority(pod, *options.priority) {
@@ -267,6 +278,15 @@ func IsPodWithLocalStorage(pod *v1.Pod) bool {
return false return false
} }
func IsPodWithPVC(pod *v1.Pod) bool {
for _, volume := range pod.Spec.Volumes {
if volume.PersistentVolumeClaim != nil {
return true
}
}
return false
}
// IsPodEvictableBasedOnPriority checks if the given pod is evictable based on priority resolved from pod Spec. // IsPodEvictableBasedOnPriority checks if the given pod is evictable based on priority resolved from pod Spec.
func IsPodEvictableBasedOnPriority(pod *v1.Pod, priority int32) bool { func IsPodEvictableBasedOnPriority(pod *v1.Pod, priority int32) bool {
return pod.Spec.Priority == nil || *pod.Spec.Priority < priority return pod.Spec.Priority == nil || *pod.Spec.Priority < priority

View File

@@ -210,6 +210,7 @@ func TestFindDuplicatePods(t *testing.T) {
testCase.maxPodsToEvictPerNode, testCase.maxPodsToEvictPerNode,
[]*v1.Node{node1, node2}, []*v1.Node{node1, node2},
false, false,
false,
) )
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, []*v1.Node{node1, node2}, podEvictor) RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, []*v1.Node{node1, node2}, podEvictor)
@@ -405,6 +406,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
testCase.maxPodsToEvictPerNode, testCase.maxPodsToEvictPerNode,
testCase.nodes, testCase.nodes,
false, false,
false,
) )
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor) RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor)

View File

@@ -444,6 +444,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.maxPodsToEvictPerNode, test.maxPodsToEvictPerNode,
nodes, nodes,
false, false,
false,
) )
strategy := api.DeschedulerStrategy{ strategy := api.DeschedulerStrategy{
@@ -764,6 +765,7 @@ func TestWithTaints(t *testing.T) {
item.evictionsExpected, item.evictionsExpected,
item.nodes, item.nodes,
false, false,
false,
) )
LowNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor) LowNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor)

View File

@@ -158,6 +158,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
tc.maxPodsToEvictPerNode, tc.maxPodsToEvictPerNode,
tc.nodes, tc.nodes,
false, false,
false,
) )
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor) RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)

View File

@@ -171,6 +171,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
tc.maxPodsToEvictPerNode, tc.maxPodsToEvictPerNode,
tc.nodes, tc.nodes,
tc.evictLocalStoragePods, tc.evictLocalStoragePods,
false,
) )
RemovePodsViolatingNodeTaints(ctx, fakeClient, api.DeschedulerStrategy{}, tc.nodes, podEvictor) RemovePodsViolatingNodeTaints(ctx, fakeClient, api.DeschedulerStrategy{}, tc.nodes, podEvictor)

View File

@@ -120,6 +120,7 @@ func TestPodAntiAffinity(t *testing.T) {
test.maxPodsToEvictPerNode, test.maxPodsToEvictPerNode,
[]*v1.Node{node}, []*v1.Node{node},
false, false,
false,
) )
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, podEvictor) RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, podEvictor)

View File

@@ -98,6 +98,19 @@ func TestPodLifeTime(t *testing.T) {
p9.ObjectMeta.OwnerReferences = ownerRef1 p9.ObjectMeta.OwnerReferences = ownerRef1
p10.ObjectMeta.OwnerReferences = ownerRef1 p10.ObjectMeta.OwnerReferences = ownerRef1
p11 := test.BuildTestPod("p11", 100, 0, node.Name, func(pod *v1.Pod) {
pod.Spec.Volumes = []v1.Volume{
{
Name: "pvc", VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "foo"},
},
},
}
pod.Namespace = "dev"
pod.ObjectMeta.CreationTimestamp = olderPodCreationTime
pod.ObjectMeta.OwnerReferences = ownerRef1
})
var maxLifeTime uint = 600 var maxLifeTime uint = 600
testCases := []struct { testCases := []struct {
description string description string
@@ -105,6 +118,7 @@ func TestPodLifeTime(t *testing.T) {
maxPodsToEvictPerNode int maxPodsToEvictPerNode int
pods []v1.Pod pods []v1.Pod
expectedEvictedPodCount int expectedEvictedPodCount int
ignorePvcPods bool
}{ }{
{ {
description: "Two pods in the `dev` Namespace, 1 is new and 1 very is old. 1 should be evicted.", description: "Two pods in the `dev` Namespace, 1 is new and 1 very is old. 1 should be evicted.",
@@ -169,6 +183,31 @@ func TestPodLifeTime(t *testing.T) {
pods: []v1.Pod{*p9, *p10}, pods: []v1.Pod{*p9, *p10},
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
}, },
{
description: "does not evict pvc pods with ignorePvcPods set to true",
strategy: api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p11},
expectedEvictedPodCount: 0,
ignorePvcPods: true,
},
{
description: "evicts pvc pods with ignorePvcPods set to false (or unset)",
strategy: api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p11},
expectedEvictedPodCount: 1,
},
} }
for _, tc := range testCases { for _, tc := range testCases {
@@ -186,6 +225,7 @@ func TestPodLifeTime(t *testing.T) {
tc.maxPodsToEvictPerNode, tc.maxPodsToEvictPerNode,
[]*v1.Node{node}, []*v1.Node{node},
false, false,
tc.ignorePvcPods,
) )
PodLifeTime(ctx, fakeClient, tc.strategy, []*v1.Node{node}, podEvictor) PodLifeTime(ctx, fakeClient, tc.strategy, []*v1.Node{node}, podEvictor)

View File

@@ -172,6 +172,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
tc.maxPodsToEvictPerNode, tc.maxPodsToEvictPerNode,
[]*v1.Node{node}, []*v1.Node{node},
false, false,
false,
) )
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, []*v1.Node{node}, podEvictor) RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, []*v1.Node{node}, podEvictor)

View File

@@ -368,6 +368,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
100, 100,
tc.nodes, tc.nodes,
false, false,
false,
) )
RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor) RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
podsEvicted := podEvictor.TotalEvicted() podsEvicted := podEvictor.TotalEvicted()

View File

@@ -220,6 +220,7 @@ func runPodLifetimeStrategy(ctx context.Context, clientset clientset.Interface,
0, 0,
nodes, nodes,
false, false,
false,
), ),
) )
} }
@@ -648,6 +649,7 @@ func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface,
0, 0,
nodeList, nodeList,
true, true,
false,
) )
for _, node := range nodeList { for _, node := range nodeList {
// Skip the Master Node // Skip the Master Node