mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-28 06:29:29 +01:00
Deprecate node-selector, max-pods-to-evict-per-node and evict-local-storage-pods flags and promote then to policy v1alpha1 fields
This commit is contained in:
15
README.md
15
README.md
@@ -64,6 +64,21 @@ Seven strategies `RemoveDuplicates`, `LowNodeUtilization`, `RemovePodsViolatingI
|
|||||||
are currently implemented. As part of the policy, the parameters associated with the strategies can be configured too.
|
are currently implemented. As part of the policy, the parameters associated with the strategies can be configured too.
|
||||||
By default, all strategies are enabled.
|
By default, all strategies are enabled.
|
||||||
|
|
||||||
|
The policy also includes common configuration for all the strategies:
|
||||||
|
- `nodeSelector` - limiting the nodes which are processed
|
||||||
|
- `evictLocalStoragePods` - allowing to evict pods with local storage
|
||||||
|
- `maxNoOfPodsToEvictPerNode` - maximum number of pods evicted from each node (summed through all strategies)
|
||||||
|
|
||||||
|
```
|
||||||
|
apiVersion: "descheduler/v1alpha1"
|
||||||
|
kind: "DeschedulerPolicy"
|
||||||
|
nodeSelector: prod=dev
|
||||||
|
evictLocalStoragePods: true
|
||||||
|
maxNoOfPodsToEvictPerNode: 40
|
||||||
|
strategies:
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
### RemoveDuplicates
|
### RemoveDuplicates
|
||||||
|
|
||||||
This strategy makes sure that there is only one pod associated with a Replica Set (RS),
|
This strategy makes sure that there is only one pod associated with a Replica Set (RS),
|
||||||
|
|||||||
@@ -53,9 +53,9 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
|||||||
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
||||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
||||||
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
|
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
|
||||||
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
|
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "DEPRECATED: selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
|
||||||
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
|
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
|
||||||
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "Limits the maximum number of pods to be evicted per node by descheduler")
|
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "DEPRECATED: limits the maximum number of pods to be evicted per node by descheduler")
|
||||||
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
|
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
|
||||||
fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "Enables evicting pods using local storage by descheduler")
|
fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "DEPRECATED: enables evicting pods using local storage by descheduler")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,11 +21,11 @@ Available Commands:
|
|||||||
version Version of descheduler
|
version Version of descheduler
|
||||||
|
|
||||||
Flags:
|
Flags:
|
||||||
--add-dir-header If true, adds the file directory to the header
|
--add-dir-header If true, adds the file directory to the header of the log messages
|
||||||
--alsologtostderr log to standard error as well as files
|
--alsologtostderr log to standard error as well as files
|
||||||
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
||||||
--dry-run execute descheduler in dry run mode.
|
--dry-run execute descheduler in dry run mode.
|
||||||
--evict-local-storage-pods Enables evicting pods using local storage by descheduler
|
--evict-local-storage-pods DEPRECATED: enables evicting pods using local storage by descheduler
|
||||||
-h, --help help for descheduler
|
-h, --help help for descheduler
|
||||||
--kubeconfig string File with kube configuration.
|
--kubeconfig string File with kube configuration.
|
||||||
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||||
@@ -34,8 +34,8 @@ Flags:
|
|||||||
--log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
--log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||||
--logtostderr log to standard error instead of files (default true)
|
--logtostderr log to standard error instead of files (default true)
|
||||||
--max-pods-to-evict-per-node int Limits the maximum number of pods to be evicted per node by descheduler
|
--max-pods-to-evict-per-node int DEPRECATED: limits the maximum number of pods to be evicted per node by descheduler
|
||||||
--node-selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
|
--node-selector string DEPRECATED: selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
|
||||||
--policy-config-file string File with descheduler policy configuration.
|
--policy-config-file string File with descheduler policy configuration.
|
||||||
--skip-headers If true, avoid header prefixes in the log messages
|
--skip-headers If true, avoid header prefixes in the log messages
|
||||||
--skip-log-headers If true, avoid headers when opening log files
|
--skip-log-headers If true, avoid headers when opening log files
|
||||||
@@ -88,10 +88,10 @@ strategies:
|
|||||||
```
|
```
|
||||||
|
|
||||||
### Autoheal Node Problems
|
### Autoheal Node Problems
|
||||||
Descheduler's `RemovePodsViolatingNodeTaints` strategy can be combined with
|
Descheduler's `RemovePodsViolatingNodeTaints` strategy can be combined with
|
||||||
[Node Problem Detector](https://github.com/kubernetes/node-problem-detector/) and
|
[Node Problem Detector](https://github.com/kubernetes/node-problem-detector/) and
|
||||||
[Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) to automatically remove
|
[Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) to automatically remove
|
||||||
Nodes which have problems. Node Problem Detector can detect specific Node problems and taint any Nodes which have those
|
Nodes which have problems. Node Problem Detector can detect specific Node problems and taint any Nodes which have those
|
||||||
problems. The Descheduler will then deschedule workloads from those Nodes. Finally, if the descheduled Node's resource
|
problems. The Descheduler will then deschedule workloads from those Nodes. Finally, if the descheduled Node's resource
|
||||||
allocation falls below the Cluster Autoscaler's scale down threshold, the Node will become a scale down candidate
|
allocation falls below the Cluster Autoscaler's scale down threshold, the Node will become a scale down candidate
|
||||||
and can be removed by Cluster Autoscaler. These three components form an autohealing cycle for Node problems.
|
and can be removed by Cluster Autoscaler. These three components form an autohealing cycle for Node problems.
|
||||||
|
|||||||
@@ -28,6 +28,15 @@ type DeschedulerPolicy struct {
|
|||||||
|
|
||||||
// Strategies
|
// Strategies
|
||||||
Strategies StrategyList
|
Strategies StrategyList
|
||||||
|
|
||||||
|
// NodeSelector for a set of nodes to operate over
|
||||||
|
NodeSelector *string
|
||||||
|
|
||||||
|
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||||
|
EvictLocalStoragePods *bool
|
||||||
|
|
||||||
|
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||||
|
MaxNoOfPodsToEvictPerNode *int
|
||||||
}
|
}
|
||||||
|
|
||||||
type StrategyName string
|
type StrategyName string
|
||||||
|
|||||||
@@ -28,6 +28,15 @@ type DeschedulerPolicy struct {
|
|||||||
|
|
||||||
// Strategies
|
// Strategies
|
||||||
Strategies StrategyList `json:"strategies,omitempty"`
|
Strategies StrategyList `json:"strategies,omitempty"`
|
||||||
|
|
||||||
|
// NodeSelector for a set of nodes to operate over
|
||||||
|
NodeSelector *string `json:"nodeSelector,omitempty"`
|
||||||
|
|
||||||
|
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||||
|
EvictLocalStoragePods *bool `json:"evictLocalStoragePods,omitempty"`
|
||||||
|
|
||||||
|
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||||
|
MaxNoOfPodsToEvictPerNode *int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type StrategyName string
|
type StrategyName string
|
||||||
|
|||||||
@@ -110,6 +110,9 @@ func RegisterConversions(s *runtime.Scheme) error {
|
|||||||
|
|
||||||
func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
||||||
out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies))
|
out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies))
|
||||||
|
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
|
||||||
|
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
|
||||||
|
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -120,6 +123,9 @@ func Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Descheduler
|
|||||||
|
|
||||||
func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
|
func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
|
||||||
out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies))
|
out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies))
|
||||||
|
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
|
||||||
|
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
|
||||||
|
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -35,6 +35,21 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
|||||||
(*out)[key] = *val.DeepCopy()
|
(*out)[key] = *val.DeepCopy()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.NodeSelector != nil {
|
||||||
|
in, out := &in.NodeSelector, &out.NodeSelector
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.EvictLocalStoragePods != nil {
|
||||||
|
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||||
|
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -35,6 +35,21 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
|||||||
(*out)[key] = *val.DeepCopy()
|
(*out)[key] = *val.DeepCopy()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.NodeSelector != nil {
|
||||||
|
in, out := &in.NodeSelector, &out.NodeSelector
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.EvictLocalStoragePods != nil {
|
||||||
|
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||||
|
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
@@ -79,8 +79,23 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
|||||||
"PodLifeTime": strategies.PodLifeTime,
|
"PodLifeTime": strategies.PodLifeTime,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nodeSelector := rs.NodeSelector
|
||||||
|
if deschedulerPolicy.NodeSelector != nil {
|
||||||
|
nodeSelector = *deschedulerPolicy.NodeSelector
|
||||||
|
}
|
||||||
|
|
||||||
|
evictLocalStoragePods := rs.EvictLocalStoragePods
|
||||||
|
if deschedulerPolicy.EvictLocalStoragePods != nil {
|
||||||
|
evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods
|
||||||
|
}
|
||||||
|
|
||||||
|
maxNoOfPodsToEvictPerNode := rs.MaxNoOfPodsToEvictPerNode
|
||||||
|
if deschedulerPolicy.MaxNoOfPodsToEvictPerNode != nil {
|
||||||
|
maxNoOfPodsToEvictPerNode = *deschedulerPolicy.MaxNoOfPodsToEvictPerNode
|
||||||
|
}
|
||||||
|
|
||||||
wait.Until(func() {
|
wait.Until(func() {
|
||||||
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, rs.NodeSelector, stopChannel)
|
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector, stopChannel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(1).Infof("Unable to get ready nodes: %v", err)
|
klog.V(1).Infof("Unable to get ready nodes: %v", err)
|
||||||
close(stopChannel)
|
close(stopChannel)
|
||||||
@@ -97,9 +112,9 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
|||||||
rs.Client,
|
rs.Client,
|
||||||
evictionPolicyGroupVersion,
|
evictionPolicyGroupVersion,
|
||||||
rs.DryRun,
|
rs.DryRun,
|
||||||
rs.MaxNoOfPodsToEvictPerNode,
|
maxNoOfPodsToEvictPerNode,
|
||||||
nodes,
|
nodes,
|
||||||
rs.EvictLocalStoragePods,
|
evictLocalStoragePods,
|
||||||
)
|
)
|
||||||
|
|
||||||
for name, f := range strategyFuncs {
|
for name, f := range strategyFuncs {
|
||||||
|
|||||||
Reference in New Issue
Block a user