mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-28 14:41:10 +01:00
Fix build errors
This commit is contained in:
@@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -126,7 +127,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
||||
npe := nodePodEvictedCount{}
|
||||
npe := utils.NodePodEvictedCount{}
|
||||
npe[node] = 0
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
core "k8s.io/client-go/testing"
|
||||
"reflect"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -116,7 +117,7 @@ func TestLowNodeUtilizationWithoutPriority(t *testing.T) {
|
||||
if len(lowNodes) != 1 {
|
||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||
}
|
||||
npe := nodePodEvictedCount{}
|
||||
npe := utils.NodePodEvictedCount{}
|
||||
npe[n1] = 0
|
||||
npe[n2] = 0
|
||||
npe[n3] = 0
|
||||
@@ -221,7 +222,7 @@ func TestLowNodeUtilizationWithPriorities(t *testing.T) {
|
||||
if len(lowNodes) != 1 {
|
||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||
}
|
||||
npe := nodePodEvictedCount{}
|
||||
npe := utils.NodePodEvictedCount{}
|
||||
npe[n1] = 0
|
||||
npe[n2] = 0
|
||||
npe[n3] = 0
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -92,7 +93,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
pods []v1.Pod
|
||||
strategy api.DeschedulerStrategy
|
||||
expectedEvictedPodCount int
|
||||
npe nodePodEvictedCount
|
||||
npe utils.NodePodEvictedCount
|
||||
maxPodsToEvict int
|
||||
}{
|
||||
{
|
||||
@@ -108,7 +109,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
@@ -124,7 +125,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
@@ -133,7 +134,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithLabels),
|
||||
nodes: []*v1.Node{nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithLabels: 0},
|
||||
npe: utils.NodePodEvictedCount{nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
@@ -142,7 +143,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
@@ -151,7 +152,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 1,
|
||||
},
|
||||
{
|
||||
@@ -160,7 +161,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, unschedulableNodeWithLabels: 0},
|
||||
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, unschedulableNodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -2,13 +2,15 @@ package strategies
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func createNoScheduleTaint(key, value string, index int) v1.Taint {
|
||||
@@ -96,7 +98,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes []*v1.Node
|
||||
pods []v1.Pod
|
||||
evictLocalStoragePods bool
|
||||
npe nodePodEvictedCount
|
||||
npe utils.NodePodEvictedCount
|
||||
maxPodsToEvict int
|
||||
expectedEvictedPodCount int
|
||||
}{
|
||||
@@ -106,7 +108,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
npe: nodePodEvictedCount{node1: 0},
|
||||
npe: utils.NodePodEvictedCount{node1: 0},
|
||||
maxPodsToEvict: 0,
|
||||
expectedEvictedPodCount: 1, //p2 gets evicted
|
||||
},
|
||||
@@ -115,7 +117,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p1, *p3, *p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
npe: nodePodEvictedCount{node1: 0},
|
||||
npe: utils.NodePodEvictedCount{node1: 0},
|
||||
maxPodsToEvict: 0,
|
||||
expectedEvictedPodCount: 1, //p4 gets evicted
|
||||
},
|
||||
@@ -124,7 +126,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p1, *p5, *p6},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
npe: nodePodEvictedCount{node1: 0},
|
||||
npe: utils.NodePodEvictedCount{node1: 0},
|
||||
maxPodsToEvict: 1,
|
||||
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
|
||||
},
|
||||
@@ -133,7 +135,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p7, *p8, *p9, *p10},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
npe: nodePodEvictedCount{node2: 0},
|
||||
npe: utils.NodePodEvictedCount{node2: 0},
|
||||
maxPodsToEvict: 0,
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
@@ -142,7 +144,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p7, *p8, *p9, *p10},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: true,
|
||||
npe: nodePodEvictedCount{node2: 0},
|
||||
npe: utils.NodePodEvictedCount{node2: 0},
|
||||
maxPodsToEvict: 0,
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
@@ -151,7 +153,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p7, *p8, *p10, *p11},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
npe: nodePodEvictedCount{node2: 0},
|
||||
npe: utils.NodePodEvictedCount{node2: 0},
|
||||
maxPodsToEvict: 0,
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -52,7 +53,7 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node, nil
|
||||
})
|
||||
npe := nodePodEvictedCount{}
|
||||
npe := utils.NodePodEvictedCount{}
|
||||
npe[node] = 0
|
||||
expectedEvictedPodCount := 3
|
||||
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 0, false)
|
||||
|
||||
Reference in New Issue
Block a user