1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-28 06:29:29 +01:00

use new events implementation and take recorder out of EvictPod

This commit is contained in:
Lucas Severo Alves
2022-07-21 16:59:42 +02:00
committed by Lucas Severo Alves
parent 0d3ff8a84f
commit 0aa233415e
20 changed files with 139 additions and 15 deletions

View File

@@ -18,6 +18,8 @@ limitations under the License.
package options package options
import ( import (
"time"
"github.com/spf13/pflag" "github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiserveroptions "k8s.io/apiserver/pkg/server/options" apiserveroptions "k8s.io/apiserver/pkg/server/options"
@@ -27,7 +29,6 @@ import (
"sigs.k8s.io/descheduler/pkg/apis/componentconfig" "sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1" "sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme" deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
"time"
) )
const ( const (
@@ -39,6 +40,7 @@ type DeschedulerServer struct {
componentconfig.DeschedulerConfiguration componentconfig.DeschedulerConfiguration
Client clientset.Interface Client clientset.Interface
EventClient clientset.Interface
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
DisableMetrics bool DisableMetrics bool
} }

View File

@@ -4,7 +4,7 @@ apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: descheduler-cluster-role name: descheduler-cluster-role
rules: rules:
- apiGroups: [""] - apiGroups: ["events.k8s.io"]
resources: ["events"] resources: ["events"]
verbs: ["create", "update"] verbs: ["create", "update"]
- apiGroups: [""] - apiGroups: [""]

View File

@@ -26,7 +26,7 @@ import (
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
) )
func CreateClient(kubeconfig string) (clientset.Interface, error) { func CreateClient(kubeconfig string, userAgt string) (clientset.Interface, error) {
var cfg *rest.Config var cfg *rest.Config
if len(kubeconfig) != 0 { if len(kubeconfig) != 0 {
master, err := GetMasterFromKubeconfig(kubeconfig) master, err := GetMasterFromKubeconfig(kubeconfig)
@@ -47,7 +47,11 @@ func CreateClient(kubeconfig string) (clientset.Interface, error) {
} }
} }
return clientset.NewForConfig(cfg) if len(userAgt) != 0 {
return clientset.NewForConfig(rest.AddUserAgent(cfg, userAgt))
} else {
return clientset.NewForConfig(cfg)
}
} }
func GetMasterFromKubeconfig(filename string) (string, error) { func GetMasterFromKubeconfig(filename string) (string, error) {

View File

@@ -53,11 +53,12 @@ import (
func Run(ctx context.Context, rs *options.DeschedulerServer) error { func Run(ctx context.Context, rs *options.DeschedulerServer) error {
metrics.Register() metrics.Register()
rsclient, err := client.CreateClient(rs.KubeconfigFile) rsclient, eventClient, err := createClients(rs.KubeconfigFile)
if err != nil { if err != nil {
return err return err
} }
rs.Client = rsclient rs.Client = rsclient
rs.EventClient = eventClient
deschedulerPolicy, err := LoadPolicyConfig(rs.PolicyConfigFile) deschedulerPolicy, err := LoadPolicyConfig(rs.PolicyConfigFile)
if err != nil { if err != nil {
@@ -287,6 +288,16 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
} }
var eventClient clientset.Interface
if rs.DryRun {
eventClient = fakeclientset.NewSimpleClientset()
} else {
eventClient = rs.Client
}
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
defer eventBroadcaster.Shutdown()
wait.NonSlidingUntil(func() { wait.NonSlidingUntil(func() {
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector) nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector)
if err != nil { if err != nil {
@@ -340,6 +351,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace, deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
nodes, nodes,
!rs.DisableMetrics, !rs.DisableMetrics,
eventRecorder,
) )
for name, strategy := range deschedulerPolicy.Strategies { for name, strategy := range deschedulerPolicy.Strategies {
@@ -413,3 +425,17 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
return nil return nil
} }
func createClients(kubeconfig string) (clientset.Interface, clientset.Interface, error) {
kClient, err := client.CreateClient(kubeconfig, "descheduler")
if err != nil {
return nil, nil, err
}
eventClient, err := client.CreateClient(kubeconfig, "")
if err != nil {
return nil, nil, err
}
return kClient, eventClient, nil
}

View File

@@ -27,6 +27,7 @@ func TestTaintsUpdated(t *testing.T) {
} }
client := fakeclientset.NewSimpleClientset(n1, n2, p1) client := fakeclientset.NewSimpleClientset(n1, n2, p1)
eventClient := fakeclientset.NewSimpleClientset(n1, n2, p1)
dp := &api.DeschedulerPolicy{ dp := &api.DeschedulerPolicy{
Strategies: api.StrategyList{ Strategies: api.StrategyList{
"RemovePodsViolatingNodeTaints": api.DeschedulerStrategy{ "RemovePodsViolatingNodeTaints": api.DeschedulerStrategy{
@@ -40,6 +41,7 @@ func TestTaintsUpdated(t *testing.T) {
t.Fatalf("Unable to initialize server: %v", err) t.Fatalf("Unable to initialize server: %v", err)
} }
rs.Client = client rs.Client = client
rs.EventClient = eventClient
rs.DeschedulingInterval = 100 * time.Millisecond rs.DeschedulingInterval = 100 * time.Millisecond
errChan := make(chan error, 1) errChan := make(chan error, 1)
defer close(errChan) defer close(errChan)
@@ -104,6 +106,7 @@ func TestRootCancel(t *testing.T) {
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil) n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil) n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
client := fakeclientset.NewSimpleClientset(n1, n2) client := fakeclientset.NewSimpleClientset(n1, n2)
eventClient := fakeclientset.NewSimpleClientset(n1, n2)
dp := &api.DeschedulerPolicy{ dp := &api.DeschedulerPolicy{
Strategies: api.StrategyList{}, // no strategies needed for this test Strategies: api.StrategyList{}, // no strategies needed for this test
} }
@@ -113,6 +116,7 @@ func TestRootCancel(t *testing.T) {
t.Fatalf("Unable to initialize server: %v", err) t.Fatalf("Unable to initialize server: %v", err)
} }
rs.Client = client rs.Client = client
rs.EventClient = eventClient
rs.DeschedulingInterval = 100 * time.Millisecond rs.DeschedulingInterval = 100 * time.Millisecond
errChan := make(chan error, 1) errChan := make(chan error, 1)
defer close(errChan) defer close(errChan)
@@ -137,6 +141,7 @@ func TestRootCancelWithNoInterval(t *testing.T) {
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil) n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil) n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
client := fakeclientset.NewSimpleClientset(n1, n2) client := fakeclientset.NewSimpleClientset(n1, n2)
eventClient := fakeclientset.NewSimpleClientset(n1, n2)
dp := &api.DeschedulerPolicy{ dp := &api.DeschedulerPolicy{
Strategies: api.StrategyList{}, // no strategies needed for this test Strategies: api.StrategyList{}, // no strategies needed for this test
} }
@@ -146,6 +151,7 @@ func TestRootCancelWithNoInterval(t *testing.T) {
t.Fatalf("Unable to initialize server: %v", err) t.Fatalf("Unable to initialize server: %v", err)
} }
rs.Client = client rs.Client = client
rs.EventClient = eventClient
rs.DeschedulingInterval = 0 rs.DeschedulingInterval = 0
errChan := make(chan error, 1) errChan := make(chan error, 1)
defer close(errChan) defer close(errChan)

View File

@@ -27,9 +27,7 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/events"
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"sigs.k8s.io/descheduler/metrics" "sigs.k8s.io/descheduler/metrics"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
@@ -57,6 +55,7 @@ type PodEvictor struct {
nodepodCount nodePodEvictedCount nodepodCount nodePodEvictedCount
namespacePodCount namespacePodEvictCount namespacePodCount namespacePodEvictCount
metricsEnabled bool metricsEnabled bool
eventRecorder events.EventRecorder
} }
func NewPodEvictor( func NewPodEvictor(
@@ -67,6 +66,7 @@ func NewPodEvictor(
maxPodsToEvictPerNamespace *uint, maxPodsToEvictPerNamespace *uint,
nodes []*v1.Node, nodes []*v1.Node,
metricsEnabled bool, metricsEnabled bool,
eventRecorder events.EventRecorder,
) *PodEvictor { ) *PodEvictor {
var nodePodCount = make(nodePodEvictedCount) var nodePodCount = make(nodePodEvictedCount)
var namespacePodCount = make(namespacePodEvictCount) var namespacePodCount = make(namespacePodEvictCount)
@@ -85,6 +85,7 @@ func NewPodEvictor(
nodepodCount: nodePodCount, nodepodCount: nodePodCount,
namespacePodCount: namespacePodCount, namespacePodCount: namespacePodCount,
metricsEnabled: metricsEnabled, metricsEnabled: metricsEnabled,
eventRecorder: eventRecorder,
} }
} }
@@ -166,11 +167,14 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName) klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
} else { } else {
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName) klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
eventBroadcaster := record.NewBroadcaster() reason := opts.Reason
eventBroadcaster.StartStructuredLogging(3) if len(reason) == 0 {
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)}) reason = strategy
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"}) if len(reason) == 0 {
r.Event(pod, v1.EventTypeNormal, "Descheduled", fmt.Sprintf("pod evicted by sigs.k8s.io/descheduler%s", opts.Reason)) reason = "NotSet"
}
}
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod evicted by sigs.k8s.io/descheduler")
} }
return true return true
} }

View File

@@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
@@ -313,6 +314,8 @@ func TestFindDuplicatePods(t *testing.T) {
sharedInformerFactory.Start(ctx.Done()) sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done()) sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
"v1", "v1",
@@ -321,6 +324,7 @@ func TestFindDuplicatePods(t *testing.T) {
nil, nil,
testCase.nodes, testCase.nodes,
false, false,
eventRecorder,
) )
nodeFit := false nodeFit := false
@@ -751,6 +755,8 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
sharedInformerFactory.Start(ctx.Done()) sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done()) sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
policyv1.SchemeGroupVersion.String(), policyv1.SchemeGroupVersion.String(),
@@ -759,6 +765,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
nil, nil,
testCase.nodes, testCase.nodes,
false, false,
eventRecorder,
) )
evictorFilter := evictions.NewEvictorFilter( evictorFilter := evictions.NewEvictorFilter(

View File

@@ -10,6 +10,7 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
@@ -268,6 +269,8 @@ func TestRemoveFailedPods(t *testing.T) {
sharedInformerFactory.Start(ctx.Done()) sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done()) sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
policyv1.SchemeGroupVersion.String(), policyv1.SchemeGroupVersion.String(),
@@ -276,6 +279,7 @@ func TestRemoveFailedPods(t *testing.T) {
nil, nil,
tc.nodes, tc.nodes,
false, false,
eventRecorder,
) )
evictorFilter := evictions.NewEvictorFilter( evictorFilter := evictions.NewEvictorFilter(

View File

@@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
@@ -215,6 +216,8 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
sharedInformerFactory.Start(ctx.Done()) sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done()) sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
policyv1.SchemeGroupVersion.String(), policyv1.SchemeGroupVersion.String(),
@@ -223,6 +226,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
tc.maxNoOfPodsToEvictPerNamespace, tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes, tc.nodes,
false, false,
eventRecorder,
) )
nodeFit := false nodeFit := false

View File

@@ -28,6 +28,7 @@ import (
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
@@ -500,6 +501,8 @@ func TestHighNodeUtilization(t *testing.T) {
// return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName()) // return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
//}) //})
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
"v1", "v1",
@@ -508,6 +511,7 @@ func TestHighNodeUtilization(t *testing.T) {
nil, nil,
testCase.nodes, testCase.nodes,
false, false,
eventRecorder,
) )
strategy := api.DeschedulerStrategy{ strategy := api.DeschedulerStrategy{
@@ -712,6 +716,8 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
sharedInformerFactory.Start(ctx.Done()) sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done()) sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
"policy/v1", "policy/v1",
@@ -720,6 +726,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
nil, nil,
item.nodes, item.nodes,
false, false,
eventRecorder,
) )
evictorFilter := evictions.NewEvictorFilter( evictorFilter := evictions.NewEvictorFilter(

View File

@@ -29,6 +29,7 @@ import (
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
@@ -765,6 +766,8 @@ func TestLowNodeUtilization(t *testing.T) {
sharedInformerFactory.Start(ctx.Done()) sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done()) sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
policyv1.SchemeGroupVersion.String(), policyv1.SchemeGroupVersion.String(),
@@ -773,6 +776,7 @@ func TestLowNodeUtilization(t *testing.T) {
nil, nil,
test.nodes, test.nodes,
false, false,
eventRecorder,
) )
strategy := api.DeschedulerStrategy{ strategy := api.DeschedulerStrategy{
@@ -1086,6 +1090,8 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
sharedInformerFactory.Start(ctx.Done()) sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done()) sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
policyv1.SchemeGroupVersion.String(), policyv1.SchemeGroupVersion.String(),
@@ -1094,6 +1100,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
nil, nil,
item.nodes, item.nodes,
false, false,
eventRecorder,
) )
evictorFilter := evictions.NewEvictorFilter( evictorFilter := evictions.NewEvictorFilter(

View File

@@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
@@ -211,6 +212,8 @@ func TestPodAntiAffinity(t *testing.T) {
sharedInformerFactory.Start(ctx.Done()) sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done()) sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
policyv1.SchemeGroupVersion.String(), policyv1.SchemeGroupVersion.String(),
@@ -219,6 +222,7 @@ func TestPodAntiAffinity(t *testing.T) {
test.maxNoOfPodsToEvictPerNamespace, test.maxNoOfPodsToEvictPerNamespace,
test.nodes, test.nodes,
false, false,
eventRecorder,
) )
strategy := api.DeschedulerStrategy{ strategy := api.DeschedulerStrategy{
Params: &api.StrategyParameters{ Params: &api.StrategyParameters{

View File

@@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
@@ -389,6 +390,8 @@ func TestPodLifeTime(t *testing.T) {
sharedInformerFactory.Start(ctx.Done()) sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done()) sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
policyv1.SchemeGroupVersion.String(), policyv1.SchemeGroupVersion.String(),
@@ -397,6 +400,7 @@ func TestPodLifeTime(t *testing.T) {
tc.maxPodsToEvictPerNamespace, tc.maxPodsToEvictPerNamespace,
tc.nodes, tc.nodes,
false, false,
eventRecorder,
) )
evictorFilter := evictions.NewEvictorFilter( evictorFilter := evictions.NewEvictorFilter(

View File

@@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
@@ -243,6 +244,8 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
sharedInformerFactory.Start(ctx.Done()) sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done()) sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
policyv1.SchemeGroupVersion.String(), policyv1.SchemeGroupVersion.String(),
@@ -251,6 +254,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
tc.maxNoOfPodsToEvictPerNamespace, tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes, tc.nodes,
false, false,
eventRecorder,
) )
evictorFilter := evictions.NewEvictorFilter( evictorFilter := evictions.NewEvictorFilter(

View File

@@ -13,6 +13,7 @@ import (
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
@@ -1215,6 +1216,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
return false, nil, nil // fallback to the default reactor return false, nil, nil // fallback to the default reactor
}) })
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
"v1", "v1",
@@ -1223,6 +1226,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
nil, nil,
tc.nodes, tc.nodes,
false, false,
eventRecorder,
) )
nodeFit := false nodeFit := false

View File

@@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig" "sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
@@ -349,6 +350,8 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
sharedInformerFactory.Start(ctx.Done()) sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done()) sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
policyv1.SchemeGroupVersion.String(), policyv1.SchemeGroupVersion.String(),
@@ -357,6 +360,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
tc.maxNoOfPodsToEvictPerNamespace, tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes, tc.nodes,
false, false,
eventRecorder,
) )
handle := &frameworkfake.HandleImpl{ handle := &frameworkfake.HandleImpl{

15
pkg/utils/events.go Normal file
View File

@@ -0,0 +1,15 @@
package utils
import (
"context"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/events"
)
func GetRecorderAndBroadcaster(ctx context.Context, clientset clientset.Interface) (events.EventBroadcasterAdapter, events.EventRecorder) {
eventBroadcaster := events.NewEventBroadcasterAdapter(clientset)
eventBroadcaster.StartRecordingToSink(ctx.Done())
eventRecorder := eventBroadcaster.NewRecorder("sigs.k8s.io.descheduler")
return eventBroadcaster, eventRecorder
}

View File

@@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/events"
"k8s.io/utils/pointer" "k8s.io/utils/pointer"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api" deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
@@ -138,6 +139,9 @@ func TestRemoveDuplicates(t *testing.T) {
if err != nil || len(evictionPolicyGroupVersion) == 0 { if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Fatalf("Error creating eviction policy group %v", err) t.Fatalf("Error creating eviction policy group %v", err)
} }
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
clientSet, clientSet,
evictionPolicyGroupVersion, evictionPolicyGroupVersion,
@@ -146,6 +150,7 @@ func TestRemoveDuplicates(t *testing.T) {
nil, nil,
nodes, nodes,
false, false,
eventRecorder,
) )
t.Log("Running DeschedulerStrategy strategy") t.Log("Running DeschedulerStrategy strategy")

View File

@@ -36,6 +36,7 @@ import (
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
coreinformers "k8s.io/client-go/informers/core/v1" coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/events"
v1qos "k8s.io/kubectl/pkg/util/qos" v1qos "k8s.io/kubectl/pkg/util/qos"
"k8s.io/utils/pointer" "k8s.io/utils/pointer"
@@ -107,7 +108,7 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string
} }
func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInformer, podutil.GetPodsAssignedToNodeFunc, chan struct{}) { func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInformer, podutil.GetPodsAssignedToNodeFunc, chan struct{}) {
clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG")) clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG"), "")
if err != nil { if err != nil {
t.Errorf("Error during client creation with %v", err) t.Errorf("Error during client creation with %v", err)
} }
@@ -193,6 +194,8 @@ func runPodLifetimeStrategy(
t.Fatalf("Failed to get threshold priority from strategy's params") t.Fatalf("Failed to get threshold priority from strategy's params")
} }
eventRecorder := &events.FakeRecorder{}
strategies.PodLifeTime( strategies.PodLifeTime(
ctx, ctx,
clientset, clientset,
@@ -206,6 +209,7 @@ func runPodLifetimeStrategy(
maxPodsToEvictPerNamespace, maxPodsToEvictPerNamespace,
nodes, nodes,
false, false,
eventRecorder,
), ),
evictions.NewEvictorFilter( evictions.NewEvictorFilter(
nodes, nodes,
@@ -1035,7 +1039,7 @@ func TestPodLifeTimeOldestEvicted(t *testing.T) {
func TestDeschedulingInterval(t *testing.T) { func TestDeschedulingInterval(t *testing.T) {
ctx := context.Background() ctx := context.Background()
clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG")) clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG"), "")
if err != nil { if err != nil {
t.Errorf("Error during client creation with %v", err) t.Errorf("Error during client creation with %v", err)
} }
@@ -1424,6 +1428,9 @@ func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, getPodsAs
if err != nil || len(evictionPolicyGroupVersion) == 0 { if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Fatalf("Error creating eviction policy group: %v", err) t.Fatalf("Error creating eviction policy group: %v", err)
} }
eventRecorder := &events.FakeRecorder{}
return evictions.NewPodEvictor( return evictions.NewPodEvictor(
clientSet, clientSet,
evictionPolicyGroupVersion, evictionPolicyGroupVersion,
@@ -1432,5 +1439,6 @@ func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, getPodsAs
nil, nil,
nodes, nodes,
false, false,
eventRecorder,
) )
} }

View File

@@ -28,6 +28,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/events"
"k8s.io/utils/pointer" "k8s.io/utils/pointer"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api" deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
@@ -131,6 +132,9 @@ func TestTooManyRestarts(t *testing.T) {
if err != nil || len(evictionPolicyGroupVersion) == 0 { if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Fatalf("Error creating eviction policy group: %v", err) t.Fatalf("Error creating eviction policy group: %v", err)
} }
eventRecorder := &events.FakeRecorder{}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
clientSet, clientSet,
evictionPolicyGroupVersion, evictionPolicyGroupVersion,
@@ -139,6 +143,7 @@ func TestTooManyRestarts(t *testing.T) {
nil, nil,
nodes, nodes,
false, false,
eventRecorder,
) )
// Run RemovePodsHavingTooManyRestarts strategy // Run RemovePodsHavingTooManyRestarts strategy