mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-28 06:29:29 +01:00
1. Enable OTEL configuration and base framework 2. update generated conversion spec 3. enable docker based conversion and deep copy generate 4. fix broken unit tests
208 lines
5.0 KiB
YAML
208 lines
5.0 KiB
YAML
# Default values for descheduler.
|
|
# This is a YAML-formatted file.
|
|
# Declare variables to be passed into your templates.
|
|
|
|
# CronJob or Deployment
|
|
kind: CronJob
|
|
|
|
image:
|
|
repository: registry.k8s.io/descheduler/descheduler
|
|
# Overrides the image tag whose default is the chart version
|
|
tag: ""
|
|
pullPolicy: IfNotPresent
|
|
|
|
imagePullSecrets:
|
|
# - name: container-registry-secret
|
|
|
|
resources:
|
|
requests:
|
|
cpu: 500m
|
|
memory: 256Mi
|
|
# limits:
|
|
# cpu: 100m
|
|
# memory: 128Mi
|
|
|
|
securityContext:
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
privileged: false
|
|
readOnlyRootFilesystem: true
|
|
runAsNonRoot: true
|
|
runAsUser: 1000
|
|
|
|
nameOverride: ""
|
|
fullnameOverride: ""
|
|
|
|
# labels that'll be applied to all resources
|
|
commonLabels: {}
|
|
|
|
cronJobApiVersion: "batch/v1"
|
|
schedule: "*/2 * * * *"
|
|
suspend: false
|
|
# startingDeadlineSeconds: 200
|
|
# successfulJobsHistoryLimit: 3
|
|
# failedJobsHistoryLimit: 1
|
|
# ttlSecondsAfterFinished 600
|
|
|
|
# Required when running as a Deployment
|
|
deschedulingInterval: 5m
|
|
|
|
# Specifies the replica count for Deployment
|
|
# Set leaderElection if you want to use more than 1 replica
|
|
# Set affinity.podAntiAffinity rule if you want to schedule onto a node
|
|
# only if that node is in the same zone as at least one already-running descheduler
|
|
replicas: 1
|
|
|
|
# Specifies whether Leader Election resources should be created
|
|
# Required when running as a Deployment
|
|
# NOTE: Leader election can't be activated if DryRun enabled
|
|
leaderElection: {}
|
|
# enabled: true
|
|
# leaseDuration: 15s
|
|
# renewDeadline: 10s
|
|
# retryPeriod: 2s
|
|
# resourceLock: "leases"
|
|
# resourceName: "descheduler"
|
|
# resourceNamescape: "kube-system"
|
|
|
|
command:
|
|
- "/bin/descheduler"
|
|
|
|
cmdOptions:
|
|
v: 3
|
|
|
|
# Recommended to use the latest Policy API version supported by the Descheduler app version
|
|
deschedulerPolicyAPIVersion: "descheduler/v1alpha1"
|
|
|
|
deschedulerPolicy:
|
|
# nodeSelector: "key1=value1,key2=value2"
|
|
# maxNoOfPodsToEvictPerNode: 10
|
|
# maxNoOfPodsToEvictPerNamespace: 10
|
|
# ignorePvcPods: true
|
|
# evictLocalStoragePods: true
|
|
# tracing:
|
|
# collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
|
|
# transportCert: ""
|
|
# serviceName: ""
|
|
# serviceNamespace: ""
|
|
# sampleRate: 1.0
|
|
# fallbackToNoOpProviderOnError: true
|
|
strategies:
|
|
RemoveDuplicates:
|
|
enabled: true
|
|
RemovePodsHavingTooManyRestarts:
|
|
enabled: true
|
|
params:
|
|
podsHavingTooManyRestarts:
|
|
podRestartThreshold: 100
|
|
includingInitContainers: true
|
|
RemovePodsViolatingNodeTaints:
|
|
enabled: true
|
|
RemovePodsViolatingNodeAffinity:
|
|
enabled: true
|
|
params:
|
|
nodeAffinityType:
|
|
- requiredDuringSchedulingIgnoredDuringExecution
|
|
RemovePodsViolatingInterPodAntiAffinity:
|
|
enabled: true
|
|
RemovePodsViolatingTopologySpreadConstraint:
|
|
enabled: true
|
|
params:
|
|
includeSoftConstraints: false
|
|
LowNodeUtilization:
|
|
enabled: true
|
|
params:
|
|
nodeResourceUtilizationThresholds:
|
|
thresholds:
|
|
cpu: 20
|
|
memory: 20
|
|
pods: 20
|
|
targetThresholds:
|
|
cpu: 50
|
|
memory: 50
|
|
pods: 50
|
|
|
|
priorityClassName: system-cluster-critical
|
|
|
|
nodeSelector: {}
|
|
# foo: bar
|
|
|
|
affinity: {}
|
|
# nodeAffinity:
|
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
|
# nodeSelectorTerms:
|
|
# - matchExpressions:
|
|
# - key: kubernetes.io/e2e-az-name
|
|
# operator: In
|
|
# values:
|
|
# - e2e-az1
|
|
# - e2e-az2
|
|
# podAntiAffinity:
|
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
|
# - labelSelector:
|
|
# matchExpressions:
|
|
# - key: app.kubernetes.io/name
|
|
# operator: In
|
|
# values:
|
|
# - descheduler
|
|
# topologyKey: "kubernetes.io/hostname"
|
|
tolerations: []
|
|
# - key: 'management'
|
|
# operator: 'Equal'
|
|
# value: 'tool'
|
|
# effect: 'NoSchedule'
|
|
|
|
rbac:
|
|
# Specifies whether RBAC resources should be created
|
|
create: true
|
|
|
|
serviceAccount:
|
|
# Specifies whether a ServiceAccount should be created
|
|
create: true
|
|
# The name of the ServiceAccount to use.
|
|
# If not set and create is true, a name is generated using the fullname template
|
|
name:
|
|
# Specifies custom annotations for the serviceAccount
|
|
annotations: {}
|
|
|
|
podAnnotations: {}
|
|
|
|
podLabels: {}
|
|
|
|
livenessProbe:
|
|
failureThreshold: 3
|
|
httpGet:
|
|
path: /healthz
|
|
port: 10258
|
|
scheme: HTTPS
|
|
initialDelaySeconds: 3
|
|
periodSeconds: 10
|
|
|
|
service:
|
|
enabled: false
|
|
|
|
serviceMonitor:
|
|
enabled: false
|
|
# The namespace where Prometheus expects to find service monitors.
|
|
# namespace: ""
|
|
# Add custom labels to the ServiceMonitor resource
|
|
additionalLabels: {}
|
|
# prometheus: kube-prometheus-stack
|
|
interval: ""
|
|
# honorLabels: true
|
|
insecureSkipVerify: true
|
|
serverName: null
|
|
metricRelabelings: []
|
|
# - action: keep
|
|
# regex: 'descheduler_(build_info|pods_evicted)'
|
|
# sourceLabels: [__name__]
|
|
relabelings: []
|
|
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
|
# separator: ;
|
|
# regex: ^(.*)$
|
|
# targetLabel: nodename
|
|
# replacement: $1
|
|
# action: replace
|