mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-28 06:29:29 +01:00
ReferencedResourceList: alias for map[v1.ResourceName]*resource.Quantity to avoid the type definition duplication
This commit is contained in:
@@ -18,6 +18,7 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
)
|
)
|
||||||
@@ -104,3 +105,6 @@ type MetricsCollector struct {
|
|||||||
// Later, the collection can be extended to other providers.
|
// Later, the collection can be extended to other providers.
|
||||||
Enabled bool
|
Enabled bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReferencedResourceList is an adaption of v1.ResourceList with resources as references
|
||||||
|
type ReferencedResourceList = map[v1.ResourceName]*resource.Quantity
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ import (
|
|||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||||
utilptr "k8s.io/utils/ptr"
|
utilptr "k8s.io/utils/ptr"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -43,7 +44,7 @@ type MetricsCollector struct {
|
|||||||
metricsClientset metricsclient.Interface
|
metricsClientset metricsclient.Interface
|
||||||
nodeSelector labels.Selector
|
nodeSelector labels.Selector
|
||||||
|
|
||||||
nodes map[string]map[v1.ResourceName]*resource.Quantity
|
nodes map[string]api.ReferencedResourceList
|
||||||
|
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
// hasSynced signals at least one sync succeeded
|
// hasSynced signals at least one sync succeeded
|
||||||
@@ -55,7 +56,7 @@ func NewMetricsCollector(nodeLister listercorev1.NodeLister, metricsClientset me
|
|||||||
nodeLister: nodeLister,
|
nodeLister: nodeLister,
|
||||||
metricsClientset: metricsClientset,
|
metricsClientset: metricsClientset,
|
||||||
nodeSelector: nodeSelector,
|
nodeSelector: nodeSelector,
|
||||||
nodes: make(map[string]map[v1.ResourceName]*resource.Quantity),
|
nodes: make(map[string]api.ReferencedResourceList),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,13 +78,13 @@ func weightedAverage(prevValue, value int64) int64 {
|
|||||||
return int64(math.Round(beta*float64(prevValue) + (1-beta)*float64(value)))
|
return int64(math.Round(beta*float64(prevValue) + (1-beta)*float64(value)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *MetricsCollector) AllNodesUsage() (map[string]map[v1.ResourceName]*resource.Quantity, error) {
|
func (mc *MetricsCollector) AllNodesUsage() (map[string]api.ReferencedResourceList, error) {
|
||||||
mc.mu.RLock()
|
mc.mu.RLock()
|
||||||
defer mc.mu.RUnlock()
|
defer mc.mu.RUnlock()
|
||||||
|
|
||||||
allNodesUsage := make(map[string]map[v1.ResourceName]*resource.Quantity)
|
allNodesUsage := make(map[string]api.ReferencedResourceList)
|
||||||
for nodeName := range mc.nodes {
|
for nodeName := range mc.nodes {
|
||||||
allNodesUsage[nodeName] = map[v1.ResourceName]*resource.Quantity{
|
allNodesUsage[nodeName] = api.ReferencedResourceList{
|
||||||
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceCPU].DeepCopy()),
|
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceCPU].DeepCopy()),
|
||||||
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceMemory].DeepCopy()),
|
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceMemory].DeepCopy()),
|
||||||
}
|
}
|
||||||
@@ -92,7 +93,7 @@ func (mc *MetricsCollector) AllNodesUsage() (map[string]map[v1.ResourceName]*res
|
|||||||
return allNodesUsage, nil
|
return allNodesUsage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *MetricsCollector) NodeUsage(node *v1.Node) (map[v1.ResourceName]*resource.Quantity, error) {
|
func (mc *MetricsCollector) NodeUsage(node *v1.Node) (api.ReferencedResourceList, error) {
|
||||||
mc.mu.RLock()
|
mc.mu.RLock()
|
||||||
defer mc.mu.RUnlock()
|
defer mc.mu.RUnlock()
|
||||||
|
|
||||||
@@ -100,7 +101,7 @@ func (mc *MetricsCollector) NodeUsage(node *v1.Node) (map[v1.ResourceName]*resou
|
|||||||
klog.V(4).InfoS("unable to find node in the collected metrics", "node", klog.KObj(node))
|
klog.V(4).InfoS("unable to find node in the collected metrics", "node", klog.KObj(node))
|
||||||
return nil, fmt.Errorf("unable to find node %q in the collected metrics", node.Name)
|
return nil, fmt.Errorf("unable to find node %q in the collected metrics", node.Name)
|
||||||
}
|
}
|
||||||
return map[v1.ResourceName]*resource.Quantity{
|
return api.ReferencedResourceList{
|
||||||
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceCPU].DeepCopy()),
|
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceCPU].DeepCopy()),
|
||||||
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceMemory].DeepCopy()),
|
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceMemory].DeepCopy()),
|
||||||
}, nil
|
}, nil
|
||||||
@@ -131,7 +132,7 @@ func (mc *MetricsCollector) Collect(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if _, exists := mc.nodes[node.Name]; !exists {
|
if _, exists := mc.nodes[node.Name]; !exists {
|
||||||
mc.nodes[node.Name] = map[v1.ResourceName]*resource.Quantity{
|
mc.nodes[node.Name] = api.ReferencedResourceList{
|
||||||
v1.ResourceCPU: utilptr.To[resource.Quantity](metrics.Usage.Cpu().DeepCopy()),
|
v1.ResourceCPU: utilptr.To[resource.Quantity](metrics.Usage.Cpu().DeepCopy()),
|
||||||
v1.ResourceMemory: utilptr.To[resource.Quantity](metrics.Usage.Memory().DeepCopy()),
|
v1.ResourceMemory: utilptr.To[resource.Quantity](metrics.Usage.Memory().DeepCopy()),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,10 +29,11 @@ import (
|
|||||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||||
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||||
|
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/test"
|
"sigs.k8s.io/descheduler/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
func checkCpuNodeUsage(t *testing.T, usage map[v1.ResourceName]*resource.Quantity, millicpu int64) {
|
func checkCpuNodeUsage(t *testing.T, usage api.ReferencedResourceList, millicpu int64) {
|
||||||
t.Logf("current node cpu usage: %v\n", usage[v1.ResourceCPU].MilliValue())
|
t.Logf("current node cpu usage: %v\n", usage[v1.ResourceCPU].MilliValue())
|
||||||
if usage[v1.ResourceCPU].MilliValue() != millicpu {
|
if usage[v1.ResourceCPU].MilliValue() != millicpu {
|
||||||
t.Fatalf("cpu node usage expected to be %v, got %v instead", millicpu, usage[v1.ResourceCPU].MilliValue())
|
t.Fatalf("cpu node usage expected to be %v, got %v instead", millicpu, usage[v1.ResourceCPU].MilliValue())
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ import (
|
|||||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
"sigs.k8s.io/descheduler/pkg/utils"
|
"sigs.k8s.io/descheduler/pkg/utils"
|
||||||
)
|
)
|
||||||
@@ -244,7 +245,7 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
|
|||||||
}
|
}
|
||||||
|
|
||||||
// nodeAvailableResources returns resources mapped to the quanitity available on the node.
|
// nodeAvailableResources returns resources mapped to the quanitity available on the node.
|
||||||
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) {
|
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (api.ReferencedResourceList, error) {
|
||||||
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
|
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -253,7 +254,7 @@ func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
remainingResources := map[v1.ResourceName]*resource.Quantity{
|
remainingResources := api.ReferencedResourceList{
|
||||||
v1.ResourceCPU: resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI),
|
v1.ResourceCPU: resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI),
|
||||||
v1.ResourceMemory: resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI),
|
v1.ResourceMemory: resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI),
|
||||||
v1.ResourcePods: resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI),
|
v1.ResourcePods: resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI),
|
||||||
@@ -273,8 +274,8 @@ func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated.
|
// NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated.
|
||||||
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) {
|
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (api.ReferencedResourceList, error) {
|
||||||
totalUtilization := map[v1.ResourceName]*resource.Quantity{
|
totalUtilization := api.ReferencedResourceList{
|
||||||
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
|
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||||
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
|
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
|
||||||
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
|
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
@@ -138,7 +137,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
|
|||||||
}
|
}
|
||||||
|
|
||||||
// stop if the total available usage has dropped to zero - no more pods can be scheduled
|
// stop if the total available usage has dropped to zero - no more pods can be scheduled
|
||||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage api.ReferencedResourceList) bool {
|
||||||
for name := range totalAvailableUsage {
|
for name := range totalAvailableUsage {
|
||||||
if totalAvailableUsage[name].CmpInt64(0) < 1 {
|
if totalAvailableUsage[name].CmpInt64(0) < 1 {
|
||||||
return false
|
return false
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
@@ -168,7 +167,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
|
|||||||
}
|
}
|
||||||
|
|
||||||
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
|
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
|
||||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage api.ReferencedResourceList) bool {
|
||||||
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) {
|
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,13 +37,13 @@ import (
|
|||||||
// NodeUsage stores a node's info, pods on it, thresholds and its resource usage
|
// NodeUsage stores a node's info, pods on it, thresholds and its resource usage
|
||||||
type NodeUsage struct {
|
type NodeUsage struct {
|
||||||
node *v1.Node
|
node *v1.Node
|
||||||
usage map[v1.ResourceName]*resource.Quantity
|
usage api.ReferencedResourceList
|
||||||
allPods []*v1.Pod
|
allPods []*v1.Pod
|
||||||
}
|
}
|
||||||
|
|
||||||
type NodeThresholds struct {
|
type NodeThresholds struct {
|
||||||
lowResourceThreshold map[v1.ResourceName]*resource.Quantity
|
lowResourceThreshold api.ReferencedResourceList
|
||||||
highResourceThreshold map[v1.ResourceName]*resource.Quantity
|
highResourceThreshold api.ReferencedResourceList
|
||||||
}
|
}
|
||||||
|
|
||||||
type NodeInfo struct {
|
type NodeInfo struct {
|
||||||
@@ -51,7 +51,7 @@ type NodeInfo struct {
|
|||||||
thresholds NodeThresholds
|
thresholds NodeThresholds
|
||||||
}
|
}
|
||||||
|
|
||||||
type continueEvictionCond func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool
|
type continueEvictionCond func(nodeInfo NodeInfo, totalAvailableUsage api.ReferencedResourceList) bool
|
||||||
|
|
||||||
// NodePodsMap is a set of (node, pods) pairs
|
// NodePodsMap is a set of (node, pods) pairs
|
||||||
type NodePodsMap map[*v1.Node][]*v1.Pod
|
type NodePodsMap map[*v1.Node][]*v1.Pod
|
||||||
@@ -94,8 +94,8 @@ func getNodeThresholds(
|
|||||||
}
|
}
|
||||||
|
|
||||||
nodeThresholdsMap[node.Name] = NodeThresholds{
|
nodeThresholdsMap[node.Name] = NodeThresholds{
|
||||||
lowResourceThreshold: map[v1.ResourceName]*resource.Quantity{},
|
lowResourceThreshold: api.ReferencedResourceList{},
|
||||||
highResourceThreshold: map[v1.ResourceName]*resource.Quantity{},
|
highResourceThreshold: api.ReferencedResourceList{},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, resourceName := range resourceNames {
|
for _, resourceName := range resourceNames {
|
||||||
@@ -206,7 +206,7 @@ func classifyNodes(
|
|||||||
return lowNodes, highNodes
|
return lowNodes, highNodes
|
||||||
}
|
}
|
||||||
|
|
||||||
func usageToKeysAndValues(usage map[v1.ResourceName]*resource.Quantity) []interface{} {
|
func usageToKeysAndValues(usage api.ReferencedResourceList) []interface{} {
|
||||||
// log message in one line
|
// log message in one line
|
||||||
keysAndValues := []interface{}{}
|
keysAndValues := []interface{}{}
|
||||||
if quantity, exists := usage[v1.ResourceCPU]; exists {
|
if quantity, exists := usage[v1.ResourceCPU]; exists {
|
||||||
@@ -241,7 +241,7 @@ func evictPodsFromSourceNodes(
|
|||||||
usageClient usageClient,
|
usageClient usageClient,
|
||||||
) {
|
) {
|
||||||
// upper bound on total number of pods/cpu/memory and optional extended resources to be moved
|
// upper bound on total number of pods/cpu/memory and optional extended resources to be moved
|
||||||
totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{}
|
totalAvailableUsage := api.ReferencedResourceList{}
|
||||||
for _, resourceName := range resourceNames {
|
for _, resourceName := range resourceNames {
|
||||||
totalAvailableUsage[resourceName] = &resource.Quantity{}
|
totalAvailableUsage[resourceName] = &resource.Quantity{}
|
||||||
}
|
}
|
||||||
@@ -296,7 +296,7 @@ func evictPods(
|
|||||||
evictableNamespaces *api.Namespaces,
|
evictableNamespaces *api.Namespaces,
|
||||||
inputPods []*v1.Pod,
|
inputPods []*v1.Pod,
|
||||||
nodeInfo NodeInfo,
|
nodeInfo NodeInfo,
|
||||||
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
|
totalAvailableUsage api.ReferencedResourceList,
|
||||||
taintsOfLowNodes map[string][]v1.Taint,
|
taintsOfLowNodes map[string][]v1.Taint,
|
||||||
podEvictor frameworktypes.Evictor,
|
podEvictor frameworktypes.Evictor,
|
||||||
evictOptions evictions.EvictOptions,
|
evictOptions evictions.EvictOptions,
|
||||||
@@ -400,7 +400,7 @@ func sortNodesByUsage(nodes []NodeInfo, ascending bool) {
|
|||||||
|
|
||||||
// isNodeAboveTargetUtilization checks if a node is overutilized
|
// isNodeAboveTargetUtilization checks if a node is overutilized
|
||||||
// At least one resource has to be above the high threshold
|
// At least one resource has to be above the high threshold
|
||||||
func isNodeAboveTargetUtilization(usage NodeUsage, threshold map[v1.ResourceName]*resource.Quantity) bool {
|
func isNodeAboveTargetUtilization(usage NodeUsage, threshold api.ReferencedResourceList) bool {
|
||||||
for name, nodeValue := range usage.usage {
|
for name, nodeValue := range usage.usage {
|
||||||
// usage.highResourceThreshold[name] < nodeValue
|
// usage.highResourceThreshold[name] < nodeValue
|
||||||
if threshold[name].Cmp(*nodeValue) == -1 {
|
if threshold[name].Cmp(*nodeValue) == -1 {
|
||||||
@@ -412,7 +412,7 @@ func isNodeAboveTargetUtilization(usage NodeUsage, threshold map[v1.ResourceName
|
|||||||
|
|
||||||
// isNodeWithLowUtilization checks if a node is underutilized
|
// isNodeWithLowUtilization checks if a node is underutilized
|
||||||
// All resources have to be below the low threshold
|
// All resources have to be below the low threshold
|
||||||
func isNodeWithLowUtilization(usage NodeUsage, threshold map[v1.ResourceName]*resource.Quantity) bool {
|
func isNodeWithLowUtilization(usage NodeUsage, threshold api.ReferencedResourceList) bool {
|
||||||
for name, nodeValue := range usage.usage {
|
for name, nodeValue := range usage.usage {
|
||||||
// usage.lowResourceThreshold[name] < nodeValue
|
// usage.lowResourceThreshold[name] < nodeValue
|
||||||
if threshold[name].Cmp(*nodeValue) == -1 {
|
if threshold[name].Cmp(*nodeValue) == -1 {
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
func BuildTestNodeInfo(name string, apply func(*NodeInfo)) *NodeInfo {
|
func BuildTestNodeInfo(name string, apply func(*NodeInfo)) *NodeInfo {
|
||||||
@@ -71,7 +72,7 @@ func TestResourceUsagePercentages(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
usage: map[v1.ResourceName]*resource.Quantity{
|
usage: api.ReferencedResourceList{
|
||||||
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
|
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
|
||||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||||
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
|
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
|
||||||
@@ -103,21 +104,21 @@ func TestSortNodesByUsage(t *testing.T) {
|
|||||||
name: "cpu memory pods",
|
name: "cpu memory pods",
|
||||||
nodeInfoList: []NodeInfo{
|
nodeInfoList: []NodeInfo{
|
||||||
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
|
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
|
||||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
nodeInfo.usage = api.ReferencedResourceList{
|
||||||
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
|
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
|
||||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||||
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
|
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
|
||||||
}
|
}
|
||||||
}),
|
}),
|
||||||
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
|
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
|
||||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
nodeInfo.usage = api.ReferencedResourceList{
|
||||||
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
|
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
|
||||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||||
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
|
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
|
||||||
}
|
}
|
||||||
}),
|
}),
|
||||||
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
|
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
|
||||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
nodeInfo.usage = api.ReferencedResourceList{
|
||||||
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
|
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
|
||||||
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
||||||
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
|
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
|
||||||
@@ -130,17 +131,17 @@ func TestSortNodesByUsage(t *testing.T) {
|
|||||||
name: "memory",
|
name: "memory",
|
||||||
nodeInfoList: []NodeInfo{
|
nodeInfoList: []NodeInfo{
|
||||||
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
|
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
|
||||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
nodeInfo.usage = api.ReferencedResourceList{
|
||||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||||
}
|
}
|
||||||
}),
|
}),
|
||||||
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
|
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
|
||||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
nodeInfo.usage = api.ReferencedResourceList{
|
||||||
v1.ResourceMemory: resource.NewQuantity(2038982964, resource.BinarySI),
|
v1.ResourceMemory: resource.NewQuantity(2038982964, resource.BinarySI),
|
||||||
}
|
}
|
||||||
}),
|
}),
|
||||||
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
|
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
|
||||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
nodeInfo.usage = api.ReferencedResourceList{
|
||||||
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
||||||
}
|
}
|
||||||
}),
|
}),
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ import (
|
|||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
utilptr "k8s.io/utils/ptr"
|
utilptr "k8s.io/utils/ptr"
|
||||||
|
|
||||||
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
@@ -37,9 +38,9 @@ type usageClient interface {
|
|||||||
// after Balance method is invoked. There's no cache invalidation so each
|
// after Balance method is invoked. There's no cache invalidation so each
|
||||||
// Balance is expected to get the latest data by invoking sync.
|
// Balance is expected to get the latest data by invoking sync.
|
||||||
sync(nodes []*v1.Node) error
|
sync(nodes []*v1.Node) error
|
||||||
nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity
|
nodeUtilization(node string) api.ReferencedResourceList
|
||||||
pods(node string) []*v1.Pod
|
pods(node string) []*v1.Pod
|
||||||
podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error)
|
podUsage(pod *v1.Pod) (api.ReferencedResourceList, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type requestedUsageClient struct {
|
type requestedUsageClient struct {
|
||||||
@@ -47,7 +48,7 @@ type requestedUsageClient struct {
|
|||||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||||
|
|
||||||
_pods map[string][]*v1.Pod
|
_pods map[string][]*v1.Pod
|
||||||
_nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
|
_nodeUtilization map[string]api.ReferencedResourceList
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ usageClient = &requestedUsageClient{}
|
var _ usageClient = &requestedUsageClient{}
|
||||||
@@ -62,7 +63,7 @@ func newRequestedUsageClient(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *requestedUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
|
func (s *requestedUsageClient) nodeUtilization(node string) api.ReferencedResourceList {
|
||||||
return s._nodeUtilization[node]
|
return s._nodeUtilization[node]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,8 +71,8 @@ func (s *requestedUsageClient) pods(node string) []*v1.Pod {
|
|||||||
return s._pods[node]
|
return s._pods[node]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *requestedUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
|
func (s *requestedUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceList, error) {
|
||||||
usage := make(map[v1.ResourceName]*resource.Quantity)
|
usage := make(api.ReferencedResourceList)
|
||||||
for _, resourceName := range s.resourceNames {
|
for _, resourceName := range s.resourceNames {
|
||||||
usage[resourceName] = utilptr.To[resource.Quantity](utils.GetResourceRequestQuantity(pod, resourceName).DeepCopy())
|
usage[resourceName] = utilptr.To[resource.Quantity](utils.GetResourceRequestQuantity(pod, resourceName).DeepCopy())
|
||||||
}
|
}
|
||||||
@@ -79,7 +80,7 @@ func (s *requestedUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resou
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *requestedUsageClient) sync(nodes []*v1.Node) error {
|
func (s *requestedUsageClient) sync(nodes []*v1.Node) error {
|
||||||
s._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
|
s._nodeUtilization = make(map[string]api.ReferencedResourceList)
|
||||||
s._pods = make(map[string][]*v1.Pod)
|
s._pods = make(map[string][]*v1.Pod)
|
||||||
|
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
@@ -111,7 +112,7 @@ type actualUsageClient struct {
|
|||||||
metricsCollector *metricscollector.MetricsCollector
|
metricsCollector *metricscollector.MetricsCollector
|
||||||
|
|
||||||
_pods map[string][]*v1.Pod
|
_pods map[string][]*v1.Pod
|
||||||
_nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
|
_nodeUtilization map[string]api.ReferencedResourceList
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ usageClient = &actualUsageClient{}
|
var _ usageClient = &actualUsageClient{}
|
||||||
@@ -128,7 +129,7 @@ func newActualUsageClient(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *actualUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
|
func (client *actualUsageClient) nodeUtilization(node string) api.ReferencedResourceList {
|
||||||
return client._nodeUtilization[node]
|
return client._nodeUtilization[node]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -136,7 +137,7 @@ func (client *actualUsageClient) pods(node string) []*v1.Pod {
|
|||||||
return client._pods[node]
|
return client._pods[node]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
|
func (client *actualUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceList, error) {
|
||||||
// It's not efficient to keep track of all pods in a cluster when only their fractions is evicted.
|
// It's not efficient to keep track of all pods in a cluster when only their fractions is evicted.
|
||||||
// Thus, take the current pod metrics without computing any softening (like e.g. EWMA).
|
// Thus, take the current pod metrics without computing any softening (like e.g. EWMA).
|
||||||
podMetrics, err := client.metricsCollector.MetricsClient().MetricsV1beta1().PodMetricses(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
podMetrics, err := client.metricsCollector.MetricsClient().MetricsV1beta1().PodMetricses(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||||
@@ -144,7 +145,7 @@ func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*res
|
|||||||
return nil, fmt.Errorf("unable to get podmetrics for %q/%q: %v", pod.Namespace, pod.Name, err)
|
return nil, fmt.Errorf("unable to get podmetrics for %q/%q: %v", pod.Namespace, pod.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
totalUsage := make(map[v1.ResourceName]*resource.Quantity)
|
totalUsage := make(api.ReferencedResourceList)
|
||||||
for _, container := range podMetrics.Containers {
|
for _, container := range podMetrics.Containers {
|
||||||
for _, resourceName := range client.resourceNames {
|
for _, resourceName := range client.resourceNames {
|
||||||
if resourceName == v1.ResourcePods {
|
if resourceName == v1.ResourcePods {
|
||||||
@@ -165,7 +166,7 @@ func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*res
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (client *actualUsageClient) sync(nodes []*v1.Node) error {
|
func (client *actualUsageClient) sync(nodes []*v1.Node) error {
|
||||||
client._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
|
client._nodeUtilization = make(map[string]api.ReferencedResourceList)
|
||||||
client._pods = make(map[string][]*v1.Pod)
|
client._pods = make(map[string][]*v1.Pod)
|
||||||
|
|
||||||
nodesUsage, err := client.metricsCollector.AllNodesUsage()
|
nodesUsage, err := client.metricsCollector.AllNodesUsage()
|
||||||
|
|||||||
Reference in New Issue
Block a user