mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-28 06:29:29 +01:00
e2e: TestLeaderElection: delete the lease and increase the retry period
In some random cases none of the deschedulers acquires a lease. Making the test fail.
This commit is contained in:
@@ -27,6 +27,7 @@ import (
|
|||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
@@ -85,6 +86,9 @@ func TestLeaderElection(t *testing.T) {
|
|||||||
s1.Client = clientSet
|
s1.Client = clientSet
|
||||||
s1.DeschedulingInterval = 5 * time.Second
|
s1.DeschedulingInterval = 5 * time.Second
|
||||||
s1.LeaderElection.LeaderElect = true
|
s1.LeaderElection.LeaderElect = true
|
||||||
|
s1.LeaderElection.RetryPeriod = metav1.Duration{
|
||||||
|
Duration: time.Second,
|
||||||
|
}
|
||||||
s1.ClientConnection.Kubeconfig = os.Getenv("KUBECONFIG")
|
s1.ClientConnection.Kubeconfig = os.Getenv("KUBECONFIG")
|
||||||
s1.PolicyConfigFile = "./policy_leaderelection_a.yaml"
|
s1.PolicyConfigFile = "./policy_leaderelection_a.yaml"
|
||||||
|
|
||||||
@@ -95,9 +99,21 @@ func TestLeaderElection(t *testing.T) {
|
|||||||
s2.Client = clientSet
|
s2.Client = clientSet
|
||||||
s2.DeschedulingInterval = 5 * time.Second
|
s2.DeschedulingInterval = 5 * time.Second
|
||||||
s2.LeaderElection.LeaderElect = true
|
s2.LeaderElection.LeaderElect = true
|
||||||
|
s2.LeaderElection.RetryPeriod = metav1.Duration{
|
||||||
|
Duration: time.Second,
|
||||||
|
}
|
||||||
s2.ClientConnection.Kubeconfig = os.Getenv("KUBECONFIG")
|
s2.ClientConnection.Kubeconfig = os.Getenv("KUBECONFIG")
|
||||||
s2.PolicyConfigFile = "./policy_leaderelection_b.yaml"
|
s2.PolicyConfigFile = "./policy_leaderelection_b.yaml"
|
||||||
|
|
||||||
|
// Delete the descheduler lease
|
||||||
|
err = clientSet.CoordinationV1().Leases("kube-system").Delete(ctx, "descheduler", metav1.DeleteOptions{})
|
||||||
|
if err != nil {
|
||||||
|
if !apierrors.IsNotFound(err) {
|
||||||
|
t.Fatalf("Unable to remove kube-system/descheduler lease: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Logf("Removed kube-system/descheduler lease")
|
||||||
|
|
||||||
t.Log("starting deschedulers")
|
t.Log("starting deschedulers")
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
@@ -141,7 +157,11 @@ func TestLeaderElection(t *testing.T) {
|
|||||||
t.Logf("Only the pods in %s namespace are evicted. Pods before: %s, Pods after %s", ns2, podListBOrg, podListB)
|
t.Logf("Only the pods in %s namespace are evicted. Pods before: %s, Pods after %s", ns2, podListBOrg, podListB)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
t.Fatalf("Pods are evicted in both namespaces. For %s namespace Pods before: %s, Pods after %s. And, for %s namespace Pods before: %s, Pods after: %s", ns1, podListAOrg, podListA, ns2, podListBOrg, podListB)
|
if left && right {
|
||||||
|
t.Fatalf("No pods evicted. Probably none of the deschedulers were running.")
|
||||||
|
} else {
|
||||||
|
t.Fatalf("Pods are evicted in both namespaces.\n\tFor %s namespace\n\tPods before: %s,\n\tPods after %s.\n\tAnd, for %s namespace\n\tPods before: %s,\n\tPods after: %s", ns1, podListAOrg, podListA, ns2, podListBOrg, podListB)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user