mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-28 14:41:10 +01:00
Move framework types under framework/types
This commit is contained in:
@@ -27,7 +27,7 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const PluginName = "RemovePodsHavingTooManyRestarts"
|
||||
@@ -36,15 +36,15 @@ const PluginName = "RemovePodsHavingTooManyRestarts"
|
||||
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
type RemovePodsHavingTooManyRestarts struct {
|
||||
handle framework.Handle
|
||||
handle frameworktypes.Handle
|
||||
args *RemovePodsHavingTooManyRestartsArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
var _ framework.DeschedulePlugin = &RemovePodsHavingTooManyRestarts{}
|
||||
var _ frameworktypes.DeschedulePlugin = &RemovePodsHavingTooManyRestarts{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
tooManyRestartsArgs, ok := args.(*RemovePodsHavingTooManyRestartsArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsHavingTooManyRestartsArgs, got %T", args)
|
||||
@@ -88,13 +88,13 @@ func (d *RemovePodsHavingTooManyRestarts) Name() string {
|
||||
}
|
||||
|
||||
// Deschedule extension point implementation for the plugin
|
||||
func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status {
|
||||
func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
return &framework.Status{
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,9 +31,9 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -287,7 +287,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(framework.EvictorPlugin),
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
})
|
||||
@@ -295,7 +295,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
plugin.(framework.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
plugin.(frameworktypes.DeschedulePlugin).Deschedule(ctx, tc.nodes)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
|
||||
Reference in New Issue
Block a user