mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-28 06:29:29 +01:00
Improve PodEvictor observability through EvictOptions (#1349)
* feat: profile name for pods_evicted metric Support new label "profile" for "pods_evicted" metric to allow understand which profiles are evicting more pods, allowing better observability * refactor: evictoptions improved observability Send profile and strategy names for EvictOptions, allowing Evictors to access observability information * cleanup: remove unnecessary evictoption reference * feat: evictoptions for nodeutilzation Explicit usage of options when invoking evictPods from the helper function from nodeutilization for both highnodeutilization and lownodeutilization
This commit is contained in:
@@ -36,7 +36,7 @@ var (
|
|||||||
Name: "pods_evicted",
|
Name: "pods_evicted",
|
||||||
Help: "Number of evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
Help: "Number of evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||||
StabilityLevel: metrics.ALPHA,
|
StabilityLevel: metrics.ALPHA,
|
||||||
}, []string{"result", "strategy", "namespace", "node"})
|
}, []string{"result", "strategy", "profile", "namespace", "node"})
|
||||||
|
|
||||||
buildInfo = metrics.NewGauge(
|
buildInfo = metrics.NewGauge(
|
||||||
&metrics.GaugeOpts{
|
&metrics.GaugeOpts{
|
||||||
|
|||||||
@@ -111,6 +111,10 @@ func (pe *PodEvictor) NodeLimitExceeded(node *v1.Node) bool {
|
|||||||
type EvictOptions struct {
|
type EvictOptions struct {
|
||||||
// Reason allows for passing details about the specific eviction for logging.
|
// Reason allows for passing details about the specific eviction for logging.
|
||||||
Reason string
|
Reason string
|
||||||
|
// ProfileName allows for passing details about profile for observability.
|
||||||
|
ProfileName string
|
||||||
|
// StrategyName allows for passing details about strategy for observability.
|
||||||
|
StrategyName string
|
||||||
}
|
}
|
||||||
|
|
||||||
// EvictPod evicts a pod while exercising eviction limits.
|
// EvictPod evicts a pod while exercising eviction limits.
|
||||||
@@ -119,16 +123,11 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
|||||||
var span trace.Span
|
var span trace.Span
|
||||||
ctx, span = tracing.Tracer().Start(ctx, "EvictPod", trace.WithAttributes(attribute.String("podName", pod.Name), attribute.String("podNamespace", pod.Namespace), attribute.String("reason", opts.Reason), attribute.String("operation", tracing.EvictOperation)))
|
ctx, span = tracing.Tracer().Start(ctx, "EvictPod", trace.WithAttributes(attribute.String("podName", pod.Name), attribute.String("podNamespace", pod.Namespace), attribute.String("reason", opts.Reason), attribute.String("operation", tracing.EvictOperation)))
|
||||||
defer span.End()
|
defer span.End()
|
||||||
// TODO: Replace context-propagated Strategy name with a defined framework handle for accessing Strategy info
|
|
||||||
strategy := ""
|
|
||||||
if ctx.Value("strategyName") != nil {
|
|
||||||
strategy = ctx.Value("strategyName").(string)
|
|
||||||
}
|
|
||||||
|
|
||||||
if pod.Spec.NodeName != "" {
|
if pod.Spec.NodeName != "" {
|
||||||
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode {
|
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode {
|
||||||
if pe.metricsEnabled {
|
if pe.metricsEnabled {
|
||||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||||
}
|
}
|
||||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per node reached")))
|
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per node reached")))
|
||||||
klog.ErrorS(fmt.Errorf("maximum number of evicted pods per node reached"), "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
klog.ErrorS(fmt.Errorf("maximum number of evicted pods per node reached"), "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||||
@@ -138,7 +137,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
|||||||
|
|
||||||
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
|
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
|
||||||
if pe.metricsEnabled {
|
if pe.metricsEnabled {
|
||||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||||
}
|
}
|
||||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per namespace reached")))
|
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per namespace reached")))
|
||||||
klog.ErrorS(fmt.Errorf("maximum number of evicted pods per namespace reached"), "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
|
klog.ErrorS(fmt.Errorf("maximum number of evicted pods per namespace reached"), "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
|
||||||
@@ -151,7 +150,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
|||||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
|
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
|
||||||
if pe.metricsEnabled {
|
if pe.metricsEnabled {
|
||||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -162,16 +161,16 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
|||||||
pe.namespacePodCount[pod.Namespace]++
|
pe.namespacePodCount[pod.Namespace]++
|
||||||
|
|
||||||
if pe.metricsEnabled {
|
if pe.metricsEnabled {
|
||||||
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
if pe.dryRun {
|
if pe.dryRun {
|
||||||
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", opts.StrategyName, "node", pod.Spec.NodeName, "profile", opts.ProfileName)
|
||||||
} else {
|
} else {
|
||||||
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", opts.StrategyName, "node", pod.Spec.NodeName, "profile", opts.ProfileName)
|
||||||
reason := opts.Reason
|
reason := opts.Reason
|
||||||
if len(reason) == 0 {
|
if len(reason) == 0 {
|
||||||
reason = strategy
|
reason = opts.StrategyName
|
||||||
if len(reason) == 0 {
|
if len(reason) == 0 {
|
||||||
reason = "NotSet"
|
reason = "NotSet"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||||
|
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
@@ -144,6 +145,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
|
|||||||
sourceNodes,
|
sourceNodes,
|
||||||
highNodes,
|
highNodes,
|
||||||
h.handle.Evictor(),
|
h.handle.Evictor(),
|
||||||
|
evictions.EvictOptions{StrategyName: HighNodeUtilizationPluginName},
|
||||||
h.podFilter,
|
h.podFilter,
|
||||||
resourceNames,
|
resourceNames,
|
||||||
continueEvictionCond)
|
continueEvictionCond)
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||||
@@ -191,6 +192,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
|
|||||||
sourceNodes,
|
sourceNodes,
|
||||||
lowNodes,
|
lowNodes,
|
||||||
l.handle.Evictor(),
|
l.handle.Evictor(),
|
||||||
|
evictions.EvictOptions{StrategyName: LowNodeUtilizationPluginName},
|
||||||
l.podFilter,
|
l.podFilter,
|
||||||
resourceNames,
|
resourceNames,
|
||||||
continueEvictionCond)
|
continueEvictionCond)
|
||||||
|
|||||||
@@ -222,6 +222,7 @@ func evictPodsFromSourceNodes(
|
|||||||
evictableNamespaces *api.Namespaces,
|
evictableNamespaces *api.Namespaces,
|
||||||
sourceNodes, destinationNodes []NodeInfo,
|
sourceNodes, destinationNodes []NodeInfo,
|
||||||
podEvictor frameworktypes.Evictor,
|
podEvictor frameworktypes.Evictor,
|
||||||
|
evictOptions evictions.EvictOptions,
|
||||||
podFilter func(pod *v1.Pod) bool,
|
podFilter func(pod *v1.Pod) bool,
|
||||||
resourceNames []v1.ResourceName,
|
resourceNames []v1.ResourceName,
|
||||||
continueEviction continueEvictionCond,
|
continueEviction continueEvictionCond,
|
||||||
@@ -273,7 +274,7 @@ func evictPodsFromSourceNodes(
|
|||||||
klog.V(1).InfoS("Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
|
klog.V(1).InfoS("Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
|
||||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||||
podutil.SortPodsBasedOnPriorityLowToHigh(removablePods)
|
podutil.SortPodsBasedOnPriorityLowToHigh(removablePods)
|
||||||
evictPods(ctx, evictableNamespaces, removablePods, node, totalAvailableUsage, taintsOfDestinationNodes, podEvictor, continueEviction)
|
evictPods(ctx, evictableNamespaces, removablePods, node, totalAvailableUsage, taintsOfDestinationNodes, podEvictor, evictOptions, continueEviction)
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -286,6 +287,7 @@ func evictPods(
|
|||||||
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
|
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
|
||||||
taintsOfLowNodes map[string][]v1.Taint,
|
taintsOfLowNodes map[string][]v1.Taint,
|
||||||
podEvictor frameworktypes.Evictor,
|
podEvictor frameworktypes.Evictor,
|
||||||
|
evictOptions evictions.EvictOptions,
|
||||||
continueEviction continueEvictionCond,
|
continueEviction continueEvictionCond,
|
||||||
) {
|
) {
|
||||||
var excludedNamespaces sets.Set[string]
|
var excludedNamespaces sets.Set[string]
|
||||||
@@ -310,7 +312,7 @@ func evictPods(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if preEvictionFilterWithOptions(pod) {
|
if preEvictionFilterWithOptions(pod) {
|
||||||
if podEvictor.Evict(ctx, pod, evictions.EvictOptions{}) {
|
if podEvictor.Evict(ctx, pod, evictOptions) {
|
||||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
|
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
|
||||||
|
|
||||||
for name := range totalAvailableUsage {
|
for name := range totalAvailableUsage {
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ func (d *PodLifeTime) Deschedule(ctx context.Context, nodes []*v1.Node) *framewo
|
|||||||
|
|
||||||
for _, pod := range podsToEvict {
|
for _, pod := range podsToEvict {
|
||||||
if !d.handle.Evictor().NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) {
|
if !d.handle.Evictor().NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) {
|
||||||
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
|
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{StrategyName: PluginName})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -210,7 +210,7 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
|
|||||||
// It's assumed all duplicated pods are in the same priority class
|
// It's assumed all duplicated pods are in the same priority class
|
||||||
// TODO(jchaloup): check if the pod has a different node to lend to
|
// TODO(jchaloup): check if the pod has a different node to lend to
|
||||||
for _, pod := range pods[upperAvg-1:] {
|
for _, pod := range pods[upperAvg-1:] {
|
||||||
r.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
|
r.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{StrategyName: PluginName})
|
||||||
if r.handle.Evictor().NodeLimitExceeded(nodeMap[nodeName]) {
|
if r.handle.Evictor().NodeLimitExceeded(nodeMap[nodeName]) {
|
||||||
continue loop
|
continue loop
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -103,7 +103,7 @@ func (d *RemoveFailedPods) Deschedule(ctx context.Context, nodes []*v1.Node) *fr
|
|||||||
}
|
}
|
||||||
totalPods := len(pods)
|
totalPods := len(pods)
|
||||||
for i := 0; i < totalPods; i++ {
|
for i := 0; i < totalPods; i++ {
|
||||||
d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{})
|
d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{StrategyName: PluginName})
|
||||||
if d.handle.Evictor().NodeLimitExceeded(node) {
|
if d.handle.Evictor().NodeLimitExceeded(node) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -123,7 +123,7 @@ func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes
|
|||||||
}
|
}
|
||||||
totalPods := len(pods)
|
totalPods := len(pods)
|
||||||
for i := 0; i < totalPods; i++ {
|
for i := 0; i < totalPods; i++ {
|
||||||
d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{})
|
d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{StrategyName: PluginName})
|
||||||
if d.handle.Evictor().NodeLimitExceeded(node) {
|
if d.handle.Evictor().NodeLimitExceeded(node) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -98,7 +98,7 @@ loop:
|
|||||||
totalPods := len(pods)
|
totalPods := len(pods)
|
||||||
for i := 0; i < totalPods; i++ {
|
for i := 0; i < totalPods; i++ {
|
||||||
if checkPodsWithAntiAffinityExist(pods[i], podsInANamespace, nodeMap) && d.handle.Evictor().Filter(pods[i]) && d.handle.Evictor().PreEvictionFilter(pods[i]) {
|
if checkPodsWithAntiAffinityExist(pods[i], podsInANamespace, nodeMap) && d.handle.Evictor().Filter(pods[i]) && d.handle.Evictor().PreEvictionFilter(pods[i]) {
|
||||||
if d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{}) {
|
if d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{StrategyName: PluginName}) {
|
||||||
// Since the current pod is evicted all other pods which have anti-affinity with this
|
// Since the current pod is evicted all other pods which have anti-affinity with this
|
||||||
// pod need not be evicted.
|
// pod need not be evicted.
|
||||||
// Update allPods.
|
// Update allPods.
|
||||||
|
|||||||
@@ -136,7 +136,7 @@ func (d *RemovePodsViolatingNodeAffinity) processNodes(ctx context.Context, node
|
|||||||
|
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
|
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
|
||||||
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
|
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{StrategyName: PluginName})
|
||||||
if d.handle.Evictor().NodeLimitExceeded(node) {
|
if d.handle.Evictor().NodeLimitExceeded(node) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -112,7 +112,7 @@ func (d *RemovePodsViolatingNodeTaints) Deschedule(ctx context.Context, nodes []
|
|||||||
d.taintFilterFnc,
|
d.taintFilterFnc,
|
||||||
) {
|
) {
|
||||||
klog.V(2).InfoS("Not all taints with NoSchedule effect are tolerated after update for pod on node", "pod", klog.KObj(pods[i]), "node", klog.KObj(node))
|
klog.V(2).InfoS("Not all taints with NoSchedule effect are tolerated after update for pod on node", "pod", klog.KObj(pods[i]), "node", klog.KObj(node))
|
||||||
d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{})
|
d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{ProfileName: PluginName})
|
||||||
if d.handle.Evictor().NodeLimitExceeded(node) {
|
if d.handle.Evictor().NodeLimitExceeded(node) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -235,7 +235,7 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
|
|||||||
}
|
}
|
||||||
|
|
||||||
if d.handle.Evictor().PreEvictionFilter(pod) {
|
if d.handle.Evictor().PreEvictionFilter(pod) {
|
||||||
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
|
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{StrategyName: PluginName})
|
||||||
}
|
}
|
||||||
if d.handle.Evictor().NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) {
|
if d.handle.Evictor().NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) {
|
||||||
nodeLimitExceeded[pod.Spec.NodeName] = true
|
nodeLimitExceeded[pod.Spec.NodeName] = true
|
||||||
|
|||||||
@@ -40,6 +40,7 @@ import (
|
|||||||
// evictorImpl implements the Evictor interface so plugins
|
// evictorImpl implements the Evictor interface so plugins
|
||||||
// can evict a pod without importing a specific pod evictor
|
// can evict a pod without importing a specific pod evictor
|
||||||
type evictorImpl struct {
|
type evictorImpl struct {
|
||||||
|
profileName string
|
||||||
podEvictor *evictions.PodEvictor
|
podEvictor *evictions.PodEvictor
|
||||||
filter podutil.FilterFunc
|
filter podutil.FilterFunc
|
||||||
preEvictionFilter podutil.FilterFunc
|
preEvictionFilter podutil.FilterFunc
|
||||||
@@ -59,6 +60,7 @@ func (ei *evictorImpl) PreEvictionFilter(pod *v1.Pod) bool {
|
|||||||
|
|
||||||
// Evict evicts a pod (no pre-check performed)
|
// Evict evicts a pod (no pre-check performed)
|
||||||
func (ei *evictorImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool {
|
func (ei *evictorImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool {
|
||||||
|
opts.ProfileName = ei.profileName
|
||||||
return ei.podEvictor.EvictPod(ctx, pod, opts)
|
return ei.podEvictor.EvictPod(ctx, pod, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -252,7 +254,8 @@ func NewProfile(config api.DeschedulerProfile, reg pluginregistry.Registry, opts
|
|||||||
getPodsAssignedToNodeFunc: hOpts.getPodsAssignedToNodeFunc,
|
getPodsAssignedToNodeFunc: hOpts.getPodsAssignedToNodeFunc,
|
||||||
sharedInformerFactory: hOpts.sharedInformerFactory,
|
sharedInformerFactory: hOpts.sharedInformerFactory,
|
||||||
evictor: &evictorImpl{
|
evictor: &evictorImpl{
|
||||||
podEvictor: hOpts.podEvictor,
|
profileName: config.Name,
|
||||||
|
podEvictor: hOpts.podEvictor,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -307,13 +310,8 @@ func (d profileImpl) RunDeschedulePlugins(ctx context.Context, nodes []*v1.Node)
|
|||||||
ctx, span = tracing.Tracer().Start(ctx, pl.Name(), trace.WithAttributes(attribute.String("plugin", pl.Name()), attribute.String("profile", d.profileName), attribute.String("operation", tracing.DescheduleOperation)))
|
ctx, span = tracing.Tracer().Start(ctx, pl.Name(), trace.WithAttributes(attribute.String("plugin", pl.Name()), attribute.String("profile", d.profileName), attribute.String("operation", tracing.DescheduleOperation)))
|
||||||
defer span.End()
|
defer span.End()
|
||||||
evicted := d.podEvictor.TotalEvicted()
|
evicted := d.podEvictor.TotalEvicted()
|
||||||
// TODO: strategyName should be accessible from within the strategy using a framework
|
|
||||||
// handle or function which the Evictor has access to. For migration/in-progress framework
|
|
||||||
// work, we are currently passing this via context. To be removed
|
|
||||||
// (See discussion thread https://github.com/kubernetes-sigs/descheduler/pull/885#discussion_r919962292)
|
|
||||||
strategyStart := time.Now()
|
strategyStart := time.Now()
|
||||||
childCtx := context.WithValue(ctx, "strategyName", pl.Name())
|
status := pl.Deschedule(ctx, nodes)
|
||||||
status := pl.Deschedule(childCtx, nodes)
|
|
||||||
metrics.DeschedulerStrategyDuration.With(map[string]string{"strategy": pl.Name(), "profile": d.profileName}).Observe(time.Since(strategyStart).Seconds())
|
metrics.DeschedulerStrategyDuration.With(map[string]string{"strategy": pl.Name(), "profile": d.profileName}).Observe(time.Since(strategyStart).Seconds())
|
||||||
|
|
||||||
if status != nil && status.Err != nil {
|
if status != nil && status.Err != nil {
|
||||||
@@ -340,13 +338,8 @@ func (d profileImpl) RunBalancePlugins(ctx context.Context, nodes []*v1.Node) *f
|
|||||||
ctx, span = tracing.Tracer().Start(ctx, pl.Name(), trace.WithAttributes(attribute.String("plugin", pl.Name()), attribute.String("profile", d.profileName), attribute.String("operation", tracing.BalanceOperation)))
|
ctx, span = tracing.Tracer().Start(ctx, pl.Name(), trace.WithAttributes(attribute.String("plugin", pl.Name()), attribute.String("profile", d.profileName), attribute.String("operation", tracing.BalanceOperation)))
|
||||||
defer span.End()
|
defer span.End()
|
||||||
evicted := d.podEvictor.TotalEvicted()
|
evicted := d.podEvictor.TotalEvicted()
|
||||||
// TODO: strategyName should be accessible from within the strategy using a framework
|
|
||||||
// handle or function which the Evictor has access to. For migration/in-progress framework
|
|
||||||
// work, we are currently passing this via context. To be removed
|
|
||||||
// (See discussion thread https://github.com/kubernetes-sigs/descheduler/pull/885#discussion_r919962292)
|
|
||||||
strategyStart := time.Now()
|
strategyStart := time.Now()
|
||||||
childCtx := context.WithValue(ctx, "strategyName", pl.Name())
|
status := pl.Balance(ctx, nodes)
|
||||||
status := pl.Balance(childCtx, nodes)
|
|
||||||
metrics.DeschedulerStrategyDuration.With(map[string]string{"strategy": pl.Name(), "profile": d.profileName}).Observe(time.Since(strategyStart).Seconds())
|
metrics.DeschedulerStrategyDuration.With(map[string]string{"strategy": pl.Name(), "profile": d.profileName}).Observe(time.Since(strategyStart).Seconds())
|
||||||
|
|
||||||
if status != nil && status.Err != nil {
|
if status != nil && status.Err != nil {
|
||||||
|
|||||||
@@ -185,7 +185,7 @@ func TestProfileDescheduleBalanceExtensionPointsEviction(t *testing.T) {
|
|||||||
if test.extensionPoint == frameworktypes.DescheduleExtensionPoint {
|
if test.extensionPoint == frameworktypes.DescheduleExtensionPoint {
|
||||||
fakePlugin.AddReactor(string(frameworktypes.DescheduleExtensionPoint), func(action fakeplugin.Action) (handled, filter bool, err error) {
|
fakePlugin.AddReactor(string(frameworktypes.DescheduleExtensionPoint), func(action fakeplugin.Action) (handled, filter bool, err error) {
|
||||||
if dAction, ok := action.(fakeplugin.DescheduleAction); ok {
|
if dAction, ok := action.(fakeplugin.DescheduleAction); ok {
|
||||||
if dAction.Handle().Evictor().Evict(ctx, p1, evictions.EvictOptions{}) {
|
if dAction.Handle().Evictor().Evict(ctx, p1, evictions.EvictOptions{StrategyName: fakePlugin.PluginName}) {
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
return true, false, fmt.Errorf("pod not evicted")
|
return true, false, fmt.Errorf("pod not evicted")
|
||||||
@@ -196,7 +196,7 @@ func TestProfileDescheduleBalanceExtensionPointsEviction(t *testing.T) {
|
|||||||
if test.extensionPoint == frameworktypes.BalanceExtensionPoint {
|
if test.extensionPoint == frameworktypes.BalanceExtensionPoint {
|
||||||
fakePlugin.AddReactor(string(frameworktypes.BalanceExtensionPoint), func(action fakeplugin.Action) (handled, filter bool, err error) {
|
fakePlugin.AddReactor(string(frameworktypes.BalanceExtensionPoint), func(action fakeplugin.Action) (handled, filter bool, err error) {
|
||||||
if dAction, ok := action.(fakeplugin.BalanceAction); ok {
|
if dAction, ok := action.(fakeplugin.BalanceAction); ok {
|
||||||
if dAction.Handle().Evictor().Evict(ctx, p1, evictions.EvictOptions{}) {
|
if dAction.Handle().Evictor().Evict(ctx, p1, evictions.EvictOptions{StrategyName: fakePlugin.PluginName}) {
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
return true, false, fmt.Errorf("pod not evicted")
|
return true, false, fmt.Errorf("pod not evicted")
|
||||||
|
|||||||
Reference in New Issue
Block a user