1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-28 06:29:29 +01:00

Bump to k8s 1.25-rc.0

This commit is contained in:
JaneLiuL
2022-08-18 09:11:02 +08:00
committed by Amir Alavi
parent 1eade5bf91
commit c9b0fbe467
967 changed files with 35722 additions and 35029 deletions

View File

@@ -21,7 +21,7 @@ package v1
import (
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/client-go/applyconfigurations/core/v1"
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// JobSpecApplyConfiguration represents an declarative configuration of the JobSpec type for use
@@ -30,8 +30,9 @@ type JobSpecApplyConfiguration struct {
Parallelism *int32 `json:"parallelism,omitempty"`
Completions *int32 `json:"completions,omitempty"`
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"`
PodFailurePolicy *PodFailurePolicyApplyConfiguration `json:"podFailurePolicy,omitempty"`
BackoffLimit *int32 `json:"backoffLimit,omitempty"`
Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
ManualSelector *bool `json:"manualSelector,omitempty"`
Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty"`
@@ -69,6 +70,14 @@ func (b *JobSpecApplyConfiguration) WithActiveDeadlineSeconds(value int64) *JobS
return b
}
// WithPodFailurePolicy sets the PodFailurePolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PodFailurePolicy field is set to the value of the last call.
func (b *JobSpecApplyConfiguration) WithPodFailurePolicy(value *PodFailurePolicyApplyConfiguration) *JobSpecApplyConfiguration {
b.PodFailurePolicy = value
return b
}
// WithBackoffLimit sets the BackoffLimit field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the BackoffLimit field is set to the value of the last call.
@@ -80,7 +89,7 @@ func (b *JobSpecApplyConfiguration) WithBackoffLimit(value int32) *JobSpecApplyC
// WithSelector sets the Selector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Selector field is set to the value of the last call.
func (b *JobSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *JobSpecApplyConfiguration {
func (b *JobSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *JobSpecApplyConfiguration {
b.Selector = value
return b
}

View File

@@ -0,0 +1,44 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// PodFailurePolicyApplyConfiguration represents an declarative configuration of the PodFailurePolicy type for use
// with apply.
type PodFailurePolicyApplyConfiguration struct {
Rules []PodFailurePolicyRuleApplyConfiguration `json:"rules,omitempty"`
}
// PodFailurePolicyApplyConfiguration constructs an declarative configuration of the PodFailurePolicy type for use with
// apply.
func PodFailurePolicy() *PodFailurePolicyApplyConfiguration {
return &PodFailurePolicyApplyConfiguration{}
}
// WithRules adds the given value to the Rules field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Rules field.
func (b *PodFailurePolicyApplyConfiguration) WithRules(values ...*PodFailurePolicyRuleApplyConfiguration) *PodFailurePolicyApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithRules")
}
b.Rules = append(b.Rules, *values[i])
}
return b
}

View File

@@ -0,0 +1,63 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "k8s.io/api/batch/v1"
)
// PodFailurePolicyOnExitCodesRequirementApplyConfiguration represents an declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use
// with apply.
type PodFailurePolicyOnExitCodesRequirementApplyConfiguration struct {
ContainerName *string `json:"containerName,omitempty"`
Operator *v1.PodFailurePolicyOnExitCodesOperator `json:"operator,omitempty"`
Values []int32 `json:"values,omitempty"`
}
// PodFailurePolicyOnExitCodesRequirementApplyConfiguration constructs an declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use with
// apply.
func PodFailurePolicyOnExitCodesRequirement() *PodFailurePolicyOnExitCodesRequirementApplyConfiguration {
return &PodFailurePolicyOnExitCodesRequirementApplyConfiguration{}
}
// WithContainerName sets the ContainerName field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ContainerName field is set to the value of the last call.
func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithContainerName(value string) *PodFailurePolicyOnExitCodesRequirementApplyConfiguration {
b.ContainerName = &value
return b
}
// WithOperator sets the Operator field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Operator field is set to the value of the last call.
func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithOperator(value v1.PodFailurePolicyOnExitCodesOperator) *PodFailurePolicyOnExitCodesRequirementApplyConfiguration {
b.Operator = &value
return b
}
// WithValues adds the given value to the Values field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Values field.
func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithValues(values ...int32) *PodFailurePolicyOnExitCodesRequirementApplyConfiguration {
for i := range values {
b.Values = append(b.Values, values[i])
}
return b
}

View File

@@ -0,0 +1,52 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "k8s.io/api/core/v1"
)
// PodFailurePolicyOnPodConditionsPatternApplyConfiguration represents an declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use
// with apply.
type PodFailurePolicyOnPodConditionsPatternApplyConfiguration struct {
Type *v1.PodConditionType `json:"type,omitempty"`
Status *v1.ConditionStatus `json:"status,omitempty"`
}
// PodFailurePolicyOnPodConditionsPatternApplyConfiguration constructs an declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use with
// apply.
func PodFailurePolicyOnPodConditionsPattern() *PodFailurePolicyOnPodConditionsPatternApplyConfiguration {
return &PodFailurePolicyOnPodConditionsPatternApplyConfiguration{}
}
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithType(value v1.PodConditionType) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration {
b.Type = &value
return b
}
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithStatus(value v1.ConditionStatus) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration {
b.Status = &value
return b
}

View File

@@ -0,0 +1,66 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "k8s.io/api/batch/v1"
)
// PodFailurePolicyRuleApplyConfiguration represents an declarative configuration of the PodFailurePolicyRule type for use
// with apply.
type PodFailurePolicyRuleApplyConfiguration struct {
Action *v1.PodFailurePolicyAction `json:"action,omitempty"`
OnExitCodes *PodFailurePolicyOnExitCodesRequirementApplyConfiguration `json:"onExitCodes,omitempty"`
OnPodConditions []PodFailurePolicyOnPodConditionsPatternApplyConfiguration `json:"onPodConditions,omitempty"`
}
// PodFailurePolicyRuleApplyConfiguration constructs an declarative configuration of the PodFailurePolicyRule type for use with
// apply.
func PodFailurePolicyRule() *PodFailurePolicyRuleApplyConfiguration {
return &PodFailurePolicyRuleApplyConfiguration{}
}
// WithAction sets the Action field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Action field is set to the value of the last call.
func (b *PodFailurePolicyRuleApplyConfiguration) WithAction(value v1.PodFailurePolicyAction) *PodFailurePolicyRuleApplyConfiguration {
b.Action = &value
return b
}
// WithOnExitCodes sets the OnExitCodes field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the OnExitCodes field is set to the value of the last call.
func (b *PodFailurePolicyRuleApplyConfiguration) WithOnExitCodes(value *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) *PodFailurePolicyRuleApplyConfiguration {
b.OnExitCodes = value
return b
}
// WithOnPodConditions adds the given value to the OnPodConditions field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OnPodConditions field.
func (b *PodFailurePolicyRuleApplyConfiguration) WithOnPodConditions(values ...*PodFailurePolicyOnPodConditionsPatternApplyConfiguration) *PodFailurePolicyRuleApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOnPodConditions")
}
b.OnPodConditions = append(b.OnPodConditions, *values[i])
}
return b
}

View File

@@ -30,6 +30,7 @@ type CSIPersistentVolumeSourceApplyConfiguration struct {
NodeStageSecretRef *SecretReferenceApplyConfiguration `json:"nodeStageSecretRef,omitempty"`
NodePublishSecretRef *SecretReferenceApplyConfiguration `json:"nodePublishSecretRef,omitempty"`
ControllerExpandSecretRef *SecretReferenceApplyConfiguration `json:"controllerExpandSecretRef,omitempty"`
NodeExpandSecretRef *SecretReferenceApplyConfiguration `json:"nodeExpandSecretRef,omitempty"`
}
// CSIPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the CSIPersistentVolumeSource type for use with
@@ -115,3 +116,11 @@ func (b *CSIPersistentVolumeSourceApplyConfiguration) WithControllerExpandSecret
b.ControllerExpandSecretRef = value
return b
}
// WithNodeExpandSecretRef sets the NodeExpandSecretRef field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NodeExpandSecretRef field is set to the value of the last call.
func (b *CSIPersistentVolumeSourceApplyConfiguration) WithNodeExpandSecretRef(value *SecretReferenceApplyConfiguration) *CSIPersistentVolumeSourceApplyConfiguration {
b.NodeExpandSecretRef = value
return b
}

View File

@@ -61,6 +61,7 @@ type PodSpecApplyConfiguration struct {
TopologySpreadConstraints []TopologySpreadConstraintApplyConfiguration `json:"topologySpreadConstraints,omitempty"`
SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty"`
OS *PodOSApplyConfiguration `json:"os,omitempty"`
HostUsers *bool `json:"hostUsers,omitempty"`
}
// PodSpecApplyConfiguration constructs an declarative configuration of the PodSpec type for use with
@@ -407,3 +408,11 @@ func (b *PodSpecApplyConfiguration) WithOS(value *PodOSApplyConfiguration) *PodS
b.OS = value
return b
}
// WithHostUsers sets the HostUsers field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the HostUsers field is set to the value of the last call.
func (b *PodSpecApplyConfiguration) WithHostUsers(value bool) *PodSpecApplyConfiguration {
b.HostUsers = &value
return b
}

View File

@@ -40,7 +40,7 @@ type ServiceSpecApplyConfiguration struct {
PublishNotReadyAddresses *bool `json:"publishNotReadyAddresses,omitempty"`
SessionAffinityConfig *SessionAffinityConfigApplyConfiguration `json:"sessionAffinityConfig,omitempty"`
IPFamilies []corev1.IPFamily `json:"ipFamilies,omitempty"`
IPFamilyPolicy *corev1.IPFamilyPolicyType `json:"ipFamilyPolicy,omitempty"`
IPFamilyPolicy *corev1.IPFamilyPolicy `json:"ipFamilyPolicy,omitempty"`
AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty"`
LoadBalancerClass *string `json:"loadBalancerClass,omitempty"`
InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty"`
@@ -194,7 +194,7 @@ func (b *ServiceSpecApplyConfiguration) WithIPFamilies(values ...corev1.IPFamily
// WithIPFamilyPolicy sets the IPFamilyPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the IPFamilyPolicy field is set to the value of the last call.
func (b *ServiceSpecApplyConfiguration) WithIPFamilyPolicy(value corev1.IPFamilyPolicyType) *ServiceSpecApplyConfiguration {
func (b *ServiceSpecApplyConfiguration) WithIPFamilyPolicy(value corev1.IPFamilyPolicy) *ServiceSpecApplyConfiguration {
b.IPFamilyPolicy = &value
return b
}

View File

@@ -26,11 +26,14 @@ import (
// TopologySpreadConstraintApplyConfiguration represents an declarative configuration of the TopologySpreadConstraint type for use
// with apply.
type TopologySpreadConstraintApplyConfiguration struct {
MaxSkew *int32 `json:"maxSkew,omitempty"`
TopologyKey *string `json:"topologyKey,omitempty"`
WhenUnsatisfiable *v1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"`
LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"`
MinDomains *int32 `json:"minDomains,omitempty"`
MaxSkew *int32 `json:"maxSkew,omitempty"`
TopologyKey *string `json:"topologyKey,omitempty"`
WhenUnsatisfiable *v1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"`
LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"`
MinDomains *int32 `json:"minDomains,omitempty"`
NodeAffinityPolicy *v1.NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty"`
NodeTaintsPolicy *v1.NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty"`
MatchLabelKeys []string `json:"matchLabelKeys,omitempty"`
}
// TopologySpreadConstraintApplyConfiguration constructs an declarative configuration of the TopologySpreadConstraint type for use with
@@ -78,3 +81,29 @@ func (b *TopologySpreadConstraintApplyConfiguration) WithMinDomains(value int32)
b.MinDomains = &value
return b
}
// WithNodeAffinityPolicy sets the NodeAffinityPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NodeAffinityPolicy field is set to the value of the last call.
func (b *TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(value v1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration {
b.NodeAffinityPolicy = &value
return b
}
// WithNodeTaintsPolicy sets the NodeTaintsPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NodeTaintsPolicy field is set to the value of the last call.
func (b *TopologySpreadConstraintApplyConfiguration) WithNodeTaintsPolicy(value v1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration {
b.NodeTaintsPolicy = &value
return b
}
// WithMatchLabelKeys adds the given value to the MatchLabelKeys field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the MatchLabelKeys field.
func (b *TopologySpreadConstraintApplyConfiguration) WithMatchLabelKeys(values ...string) *TopologySpreadConstraintApplyConfiguration {
for i := range values {
b.MatchLabelKeys = append(b.MatchLabelKeys, values[i])
}
return b
}

View File

@@ -3041,6 +3041,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: parallelism
type:
scalar: numeric
- name: podFailurePolicy
type:
namedType: io.k8s.api.batch.v1.PodFailurePolicy
- name: selector
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector
@@ -3098,6 +3101,58 @@ var schemaYAML = typed.YAMLObject(`types:
type:
namedType: io.k8s.api.batch.v1.JobSpec
default: {}
- name: io.k8s.api.batch.v1.PodFailurePolicy
map:
fields:
- name: rules
type:
list:
elementType:
namedType: io.k8s.api.batch.v1.PodFailurePolicyRule
elementRelationship: atomic
- name: io.k8s.api.batch.v1.PodFailurePolicyOnExitCodesRequirement
map:
fields:
- name: containerName
type:
scalar: string
- name: operator
type:
scalar: string
default: ""
- name: values
type:
list:
elementType:
scalar: numeric
elementRelationship: associative
- name: io.k8s.api.batch.v1.PodFailurePolicyOnPodConditionsPattern
map:
fields:
- name: status
type:
scalar: string
default: ""
- name: type
type:
scalar: string
default: ""
- name: io.k8s.api.batch.v1.PodFailurePolicyRule
map:
fields:
- name: action
type:
scalar: string
default: ""
- name: onExitCodes
type:
namedType: io.k8s.api.batch.v1.PodFailurePolicyOnExitCodesRequirement
- name: onPodConditions
type:
list:
elementType:
namedType: io.k8s.api.batch.v1.PodFailurePolicyOnPodConditionsPattern
elementRelationship: atomic
- name: io.k8s.api.batch.v1.UncountedTerminatedPods
map:
fields:
@@ -3565,6 +3620,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: fsType
type:
scalar: string
- name: nodeExpandSecretRef
type:
namedType: io.k8s.api.core.v1.SecretReference
- name: nodePublishSecretRef
type:
namedType: io.k8s.api.core.v1.SecretReference
@@ -5430,6 +5488,7 @@ var schemaYAML = typed.YAMLObject(`types:
- name: claimRef
type:
namedType: io.k8s.api.core.v1.ObjectReference
elementRelationship: separable
- name: csi
type:
namedType: io.k8s.api.core.v1.CSIPersistentVolumeSource
@@ -5759,6 +5818,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: hostPID
type:
scalar: boolean
- name: hostUsers
type:
scalar: boolean
- name: hostname
type:
scalar: string
@@ -6855,6 +6917,12 @@ var schemaYAML = typed.YAMLObject(`types:
- name: labelSelector
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector
- name: matchLabelKeys
type:
list:
elementType:
scalar: string
elementRelationship: atomic
- name: maxSkew
type:
scalar: numeric
@@ -6862,6 +6930,12 @@ var schemaYAML = typed.YAMLObject(`types:
- name: minDomains
type:
scalar: numeric
- name: nodeAffinityPolicy
type:
scalar: string
- name: nodeTaintsPolicy
type:
scalar: string
- name: topologyKey
type:
scalar: string
@@ -9519,6 +9593,41 @@ var schemaYAML = typed.YAMLObject(`types:
- name: number
type:
scalar: numeric
- name: io.k8s.api.networking.v1alpha1.ClusterCIDR
map:
fields:
- name: apiVersion
type:
scalar: string
- name: kind
type:
scalar: string
- name: metadata
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
default: {}
- name: spec
type:
namedType: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec
default: {}
- name: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec
map:
fields:
- name: ipv4
type:
scalar: string
default: ""
- name: ipv6
type:
scalar: string
default: ""
- name: nodeSelector
type:
namedType: io.k8s.api.core.v1.NodeSelector
- name: perNodeHostBits
type:
scalar: numeric
default: 0
- name: io.k8s.api.networking.v1beta1.HTTPIngressPath
map:
fields:
@@ -10824,6 +10933,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: requiresRepublish
type:
scalar: boolean
- name: seLinuxMount
type:
scalar: boolean
- name: storageCapacity
type:
scalar: boolean
@@ -11176,6 +11288,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: requiresRepublish
type:
scalar: boolean
- name: seLinuxMount
type:
scalar: boolean
- name: storageCapacity
type:
scalar: boolean
@@ -11530,9 +11645,6 @@ var schemaYAML = typed.YAMLObject(`types:
map:
elementType:
scalar: string
- name: clusterName
type:
scalar: string
- name: creationTimestamp
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time

View File

@@ -0,0 +1,66 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// ListMetaApplyConfiguration represents an declarative configuration of the ListMeta type for use
// with apply.
type ListMetaApplyConfiguration struct {
SelfLink *string `json:"selfLink,omitempty"`
ResourceVersion *string `json:"resourceVersion,omitempty"`
Continue *string `json:"continue,omitempty"`
RemainingItemCount *int64 `json:"remainingItemCount,omitempty"`
}
// ListMetaApplyConfiguration constructs an declarative configuration of the ListMeta type for use with
// apply.
func ListMeta() *ListMetaApplyConfiguration {
return &ListMetaApplyConfiguration{}
}
// WithSelfLink sets the SelfLink field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the SelfLink field is set to the value of the last call.
func (b *ListMetaApplyConfiguration) WithSelfLink(value string) *ListMetaApplyConfiguration {
b.SelfLink = &value
return b
}
// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ListMetaApplyConfiguration) WithResourceVersion(value string) *ListMetaApplyConfiguration {
b.ResourceVersion = &value
return b
}
// WithContinue sets the Continue field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Continue field is set to the value of the last call.
func (b *ListMetaApplyConfiguration) WithContinue(value string) *ListMetaApplyConfiguration {
b.Continue = &value
return b
}
// WithRemainingItemCount sets the RemainingItemCount field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the RemainingItemCount field is set to the value of the last call.
func (b *ListMetaApplyConfiguration) WithRemainingItemCount(value int64) *ListMetaApplyConfiguration {
b.RemainingItemCount = &value
return b
}

View File

@@ -0,0 +1,247 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1alpha1
import (
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ClusterCIDRApplyConfiguration represents an declarative configuration of the ClusterCIDR type for use
// with apply.
type ClusterCIDRApplyConfiguration struct {
v1.TypeMetaApplyConfiguration `json:",inline"`
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
Spec *ClusterCIDRSpecApplyConfiguration `json:"spec,omitempty"`
}
// ClusterCIDR constructs an declarative configuration of the ClusterCIDR type for use with
// apply.
func ClusterCIDR(name string) *ClusterCIDRApplyConfiguration {
b := &ClusterCIDRApplyConfiguration{}
b.WithName(name)
b.WithKind("ClusterCIDR")
b.WithAPIVersion("networking.k8s.io/v1alpha1")
return b
}
// ExtractClusterCIDR extracts the applied configuration owned by fieldManager from
// clusterCIDR. If no managedFields are found in clusterCIDR for fieldManager, a
// ClusterCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable),
// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// clusterCIDR must be a unmodified ClusterCIDR API object that was retrieved from the Kubernetes API.
// ExtractClusterCIDR provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) {
return extractClusterCIDR(clusterCIDR, fieldManager, "")
}
// ExtractClusterCIDRStatus is the same as ExtractClusterCIDR except
// that it extracts the status subresource applied configuration.
// Experimental!
func ExtractClusterCIDRStatus(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) {
return extractClusterCIDR(clusterCIDR, fieldManager, "status")
}
func extractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string, subresource string) (*ClusterCIDRApplyConfiguration, error) {
b := &ClusterCIDRApplyConfiguration{}
err := managedfields.ExtractInto(clusterCIDR, internal.Parser().Type("io.k8s.api.networking.v1alpha1.ClusterCIDR"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
b.WithName(clusterCIDR.Name)
b.WithKind("ClusterCIDR")
b.WithAPIVersion("networking.k8s.io/v1alpha1")
return b, nil
}
// WithKind sets the Kind field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ClusterCIDRApplyConfiguration) WithKind(value string) *ClusterCIDRApplyConfiguration {
b.Kind = &value
return b
}
// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ClusterCIDRApplyConfiguration) WithAPIVersion(value string) *ClusterCIDRApplyConfiguration {
b.APIVersion = &value
return b
}
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *ClusterCIDRApplyConfiguration) WithName(value string) *ClusterCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.Name = &value
return b
}
// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ClusterCIDRApplyConfiguration) WithGenerateName(value string) *ClusterCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.GenerateName = &value
return b
}
// WithNamespace sets the Namespace field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ClusterCIDRApplyConfiguration) WithNamespace(value string) *ClusterCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.Namespace = &value
return b
}
// WithUID sets the UID field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the UID field is set to the value of the last call.
func (b *ClusterCIDRApplyConfiguration) WithUID(value types.UID) *ClusterCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.UID = &value
return b
}
// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ClusterCIDRApplyConfiguration) WithResourceVersion(value string) *ClusterCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.ResourceVersion = &value
return b
}
// WithGeneration sets the Generation field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ClusterCIDRApplyConfiguration) WithGeneration(value int64) *ClusterCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ClusterCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ClusterCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.DeletionTimestamp = &value
return b
}
// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ClusterCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.DeletionGracePeriodSeconds = &value
return b
}
// WithLabels puts the entries into the Labels field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Labels field,
// overwriting an existing map entries in Labels field with the same key.
func (b *ClusterCIDRApplyConfiguration) WithLabels(entries map[string]string) *ClusterCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
if b.Labels == nil && len(entries) > 0 {
b.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
b.Labels[k] = v
}
return b
}
// WithAnnotations puts the entries into the Annotations field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Annotations field,
// overwriting an existing map entries in Annotations field with the same key.
func (b *ClusterCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
if b.Annotations == nil && len(entries) > 0 {
b.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
b.Annotations[k] = v
}
return b
}
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
func (b *ClusterCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
b.OwnerReferences = append(b.OwnerReferences, *values[i])
}
return b
}
// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Finalizers field.
func (b *ClusterCIDRApplyConfiguration) WithFinalizers(values ...string) *ClusterCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
b.Finalizers = append(b.Finalizers, values[i])
}
return b
}
func (b *ClusterCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
}
}
// WithSpec sets the Spec field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Spec field is set to the value of the last call.
func (b *ClusterCIDRApplyConfiguration) WithSpec(value *ClusterCIDRSpecApplyConfiguration) *ClusterCIDRApplyConfiguration {
b.Spec = value
return b
}

View File

@@ -0,0 +1,70 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/client-go/applyconfigurations/core/v1"
)
// ClusterCIDRSpecApplyConfiguration represents an declarative configuration of the ClusterCIDRSpec type for use
// with apply.
type ClusterCIDRSpecApplyConfiguration struct {
NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
PerNodeHostBits *int32 `json:"perNodeHostBits,omitempty"`
IPv4 *string `json:"ipv4,omitempty"`
IPv6 *string `json:"ipv6,omitempty"`
}
// ClusterCIDRSpecApplyConfiguration constructs an declarative configuration of the ClusterCIDRSpec type for use with
// apply.
func ClusterCIDRSpec() *ClusterCIDRSpecApplyConfiguration {
return &ClusterCIDRSpecApplyConfiguration{}
}
// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NodeSelector field is set to the value of the last call.
func (b *ClusterCIDRSpecApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *ClusterCIDRSpecApplyConfiguration {
b.NodeSelector = value
return b
}
// WithPerNodeHostBits sets the PerNodeHostBits field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PerNodeHostBits field is set to the value of the last call.
func (b *ClusterCIDRSpecApplyConfiguration) WithPerNodeHostBits(value int32) *ClusterCIDRSpecApplyConfiguration {
b.PerNodeHostBits = &value
return b
}
// WithIPv4 sets the IPv4 field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the IPv4 field is set to the value of the last call.
func (b *ClusterCIDRSpecApplyConfiguration) WithIPv4(value string) *ClusterCIDRSpecApplyConfiguration {
b.IPv4 = &value
return b
}
// WithIPv6 sets the IPv6 field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the IPv6 field is set to the value of the last call.
func (b *ClusterCIDRSpecApplyConfiguration) WithIPv6(value string) *ClusterCIDRSpecApplyConfiguration {
b.IPv6 = &value
return b
}

View File

@@ -32,6 +32,7 @@ type CSIDriverSpecApplyConfiguration struct {
FSGroupPolicy *v1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"`
TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"`
RequiresRepublish *bool `json:"requiresRepublish,omitempty"`
SELinuxMount *bool `json:"seLinuxMount,omitempty"`
}
// CSIDriverSpecApplyConfiguration constructs an declarative configuration of the CSIDriverSpec type for use with
@@ -102,3 +103,11 @@ func (b *CSIDriverSpecApplyConfiguration) WithRequiresRepublish(value bool) *CSI
b.RequiresRepublish = &value
return b
}
// WithSELinuxMount sets the SELinuxMount field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the SELinuxMount field is set to the value of the last call.
func (b *CSIDriverSpecApplyConfiguration) WithSELinuxMount(value bool) *CSIDriverSpecApplyConfiguration {
b.SELinuxMount = &value
return b
}

View File

@@ -32,6 +32,7 @@ type CSIDriverSpecApplyConfiguration struct {
FSGroupPolicy *v1beta1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"`
TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"`
RequiresRepublish *bool `json:"requiresRepublish,omitempty"`
SELinuxMount *bool `json:"seLinuxMount,omitempty"`
}
// CSIDriverSpecApplyConfiguration constructs an declarative configuration of the CSIDriverSpec type for use with
@@ -102,3 +103,11 @@ func (b *CSIDriverSpecApplyConfiguration) WithRequiresRepublish(value bool) *CSI
b.RequiresRepublish = &value
return b
}
// WithSELinuxMount sets the SELinuxMount field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the SELinuxMount field is set to the value of the last call.
func (b *CSIDriverSpecApplyConfiguration) WithSELinuxMount(value bool) *CSIDriverSpecApplyConfiguration {
b.SELinuxMount = &value
return b
}

View File

@@ -52,6 +52,9 @@ const (
// defaultTimeout is the maximum amount of time per request when no timeout has been set on a RESTClient.
// Defaults to 32s in order to have a distinguishable length of time, relative to other timeouts that exist.
defaultTimeout = 32 * time.Second
// defaultBurst is the default burst to be used with the discovery client's token bucket rate limiter
defaultBurst = 300
)
// DiscoveryInterface holds the methods that discover server-supported API groups,
@@ -456,12 +459,13 @@ func setDiscoveryDefaults(config *restclient.Config) error {
if config.Timeout == 0 {
config.Timeout = defaultTimeout
}
if config.Burst == 0 && config.QPS < 100 {
// if a burst limit is not already configured
if config.Burst == 0 {
// discovery is expected to be bursty, increase the default burst
// to accommodate looking up resource info for many API groups.
// matches burst set by ConfigFlags#ToDiscoveryClient().
// see https://issue.k8s.io/86149
config.Burst = 100
config.Burst = defaultBurst
}
codec := runtime.NoopEncoder{Decoder: scheme.Codecs.UniversalDecoder()}
config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec})

View File

@@ -47,6 +47,7 @@ import (
flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
networkingv1 "k8s.io/api/networking/v1"
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
nodev1 "k8s.io/api/node/v1"
nodev1alpha1 "k8s.io/api/node/v1alpha1"
@@ -272,6 +273,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
case networkingv1.SchemeGroupVersion.WithResource("networkpolicies"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().NetworkPolicies().Informer()}, nil
// Group=networking.k8s.io, Version=v1alpha1
case networkingv1alpha1.SchemeGroupVersion.WithResource("clustercidrs"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().ClusterCIDRs().Informer()}, nil
// Group=networking.k8s.io, Version=v1beta1
case networkingv1beta1.SchemeGroupVersion.WithResource("ingresses"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().Ingresses().Informer()}, nil

View File

@@ -21,6 +21,7 @@ package networking
import (
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
v1 "k8s.io/client-go/informers/networking/v1"
v1alpha1 "k8s.io/client-go/informers/networking/v1alpha1"
v1beta1 "k8s.io/client-go/informers/networking/v1beta1"
)
@@ -28,6 +29,8 @@ import (
type Interface interface {
// V1 provides access to shared informers for resources in V1.
V1() v1.Interface
// V1alpha1 provides access to shared informers for resources in V1alpha1.
V1alpha1() v1alpha1.Interface
// V1beta1 provides access to shared informers for resources in V1beta1.
V1beta1() v1beta1.Interface
}
@@ -48,6 +51,11 @@ func (g *group) V1() v1.Interface {
return v1.New(g.factory, g.namespace, g.tweakListOptions)
}
// V1alpha1 returns a new v1alpha1.Interface.
func (g *group) V1alpha1() v1alpha1.Interface {
return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
}
// V1beta1 returns a new v1beta1.Interface.
func (g *group) V1beta1() v1beta1.Interface {
return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)

View File

@@ -0,0 +1,89 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
time "time"
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
kubernetes "k8s.io/client-go/kubernetes"
v1alpha1 "k8s.io/client-go/listers/networking/v1alpha1"
cache "k8s.io/client-go/tools/cache"
)
// ClusterCIDRInformer provides access to a shared informer and lister for
// ClusterCIDRs.
type ClusterCIDRInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1alpha1.ClusterCIDRLister
}
type clusterCIDRInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// NewClusterCIDRInformer constructs a new informer for ClusterCIDR type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewClusterCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredClusterCIDRInformer(client, resyncPeriod, indexers, nil)
}
// NewFilteredClusterCIDRInformer constructs a new informer for ClusterCIDR type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredClusterCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.NetworkingV1alpha1().ClusterCIDRs().List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.NetworkingV1alpha1().ClusterCIDRs().Watch(context.TODO(), options)
},
},
&networkingv1alpha1.ClusterCIDR{},
resyncPeriod,
indexers,
)
}
func (f *clusterCIDRInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredClusterCIDRInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *clusterCIDRInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&networkingv1alpha1.ClusterCIDR{}, f.defaultInformer)
}
func (f *clusterCIDRInformer) Lister() v1alpha1.ClusterCIDRLister {
return v1alpha1.NewClusterCIDRLister(f.Informer().GetIndexer())
}

View File

@@ -0,0 +1,45 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// ClusterCIDRs returns a ClusterCIDRInformer.
ClusterCIDRs() ClusterCIDRInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// ClusterCIDRs returns a ClusterCIDRInformer.
func (v *version) ClusterCIDRs() ClusterCIDRInformer {
return &clusterCIDRInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
}

View File

@@ -53,6 +53,7 @@ import (
flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1"
flowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2"
networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1"
networkingv1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1"
networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1"
nodev1 "k8s.io/client-go/kubernetes/typed/node/v1"
nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1"
@@ -104,6 +105,7 @@ type Interface interface {
FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface
FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2Interface
NetworkingV1() networkingv1.NetworkingV1Interface
NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface
NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface
NodeV1() nodev1.NodeV1Interface
NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface
@@ -155,6 +157,7 @@ type Clientset struct {
flowcontrolV1beta1 *flowcontrolv1beta1.FlowcontrolV1beta1Client
flowcontrolV1beta2 *flowcontrolv1beta2.FlowcontrolV1beta2Client
networkingV1 *networkingv1.NetworkingV1Client
networkingV1alpha1 *networkingv1alpha1.NetworkingV1alpha1Client
networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client
nodeV1 *nodev1.NodeV1Client
nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client
@@ -322,6 +325,11 @@ func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface {
return c.networkingV1
}
// NetworkingV1alpha1 retrieves the NetworkingV1alpha1Client
func (c *Clientset) NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface {
return c.networkingV1alpha1
}
// NetworkingV1beta1 retrieves the NetworkingV1beta1Client
func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface {
return c.networkingV1beta1
@@ -561,6 +569,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
if err != nil {
return nil, err
}
cs.networkingV1alpha1, err = networkingv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
cs.networkingV1beta1, err = networkingv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
@@ -672,6 +684,7 @@ func New(c rest.Interface) *Clientset {
cs.flowcontrolV1beta1 = flowcontrolv1beta1.New(c)
cs.flowcontrolV1beta2 = flowcontrolv1beta2.New(c)
cs.networkingV1 = networkingv1.New(c)
cs.networkingV1alpha1 = networkingv1alpha1.New(c)
cs.networkingV1beta1 = networkingv1beta1.New(c)
cs.nodeV1 = nodev1.New(c)
cs.nodeV1alpha1 = nodev1alpha1.New(c)

View File

@@ -84,6 +84,8 @@ import (
fakeflowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake"
networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1"
fakenetworkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1/fake"
networkingv1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1"
fakenetworkingv1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake"
networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1"
fakenetworkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake"
nodev1 "k8s.io/client-go/kubernetes/typed/node/v1"
@@ -317,6 +319,11 @@ func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface {
return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake}
}
// NetworkingV1alpha1 retrieves the NetworkingV1alpha1Client
func (c *Clientset) NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface {
return &fakenetworkingv1alpha1.FakeNetworkingV1alpha1{Fake: &c.Fake}
}
// NetworkingV1beta1 retrieves the NetworkingV1beta1Client
func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface {
return &fakenetworkingv1beta1.FakeNetworkingV1beta1{Fake: &c.Fake}

View File

@@ -49,6 +49,7 @@ import (
flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
networkingv1 "k8s.io/api/networking/v1"
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
nodev1 "k8s.io/api/node/v1"
nodev1alpha1 "k8s.io/api/node/v1alpha1"
@@ -105,6 +106,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
flowcontrolv1beta1.AddToScheme,
flowcontrolv1beta2.AddToScheme,
networkingv1.AddToScheme,
networkingv1alpha1.AddToScheme,
networkingv1beta1.AddToScheme,
nodev1.AddToScheme,
nodev1alpha1.AddToScheme,
@@ -125,14 +127,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.

View File

@@ -49,6 +49,7 @@ import (
flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
networkingv1 "k8s.io/api/networking/v1"
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
nodev1 "k8s.io/api/node/v1"
nodev1alpha1 "k8s.io/api/node/v1alpha1"
@@ -105,6 +106,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
flowcontrolv1beta1.AddToScheme,
flowcontrolv1beta2.AddToScheme,
networkingv1.AddToScheme,
networkingv1alpha1.AddToScheme,
networkingv1beta1.AddToScheme,
nodev1.AddToScheme,
nodev1alpha1.AddToScheme,
@@ -125,14 +127,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.

View File

@@ -17,7 +17,7 @@ limitations under the License.
package fake
import (
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
@@ -26,9 +26,11 @@ import (
)
func (c *FakeEvents) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) {
action := core.NewRootCreateAction(eventsResource, event)
var action core.CreateActionImpl
if c.ns != "" {
action = core.NewCreateAction(eventsResource, c.ns, event)
} else {
action = core.NewCreateAction(eventsResource, event.GetNamespace(), event)
}
obj, err := c.Fake.Invokes(action, event)
if obj == nil {
@@ -40,9 +42,11 @@ func (c *FakeEvents) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error
// Update replaces an existing event. Returns the copy of the event the server returns, or an error.
func (c *FakeEvents) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) {
action := core.NewRootUpdateAction(eventsResource, event)
var action core.UpdateActionImpl
if c.ns != "" {
action = core.NewUpdateAction(eventsResource, c.ns, event)
} else {
action = core.NewUpdateAction(eventsResource, event.GetNamespace(), event)
}
obj, err := c.Fake.Invokes(action, event)
if obj == nil {
@@ -57,9 +61,11 @@ func (c *FakeEvents) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error
func (c *FakeEvents) PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.Event, error) {
// TODO: Should be configurable to support additional patch strategies.
pt := types.StrategicMergePatchType
action := core.NewRootPatchAction(eventsResource, event.Name, pt, data)
var action core.PatchActionImpl
if c.ns != "" {
action = core.NewPatchAction(eventsResource, c.ns, event.Name, pt, data)
} else {
action = core.NewPatchAction(eventsResource, event.GetNamespace(), event.Name, pt, data)
}
obj, err := c.Fake.Invokes(action, event)
if obj == nil {
@@ -71,9 +77,11 @@ func (c *FakeEvents) PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.
// Search returns a list of events matching the specified object.
func (c *FakeEvents) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) {
action := core.NewRootListAction(eventsResource, eventsKind, metav1.ListOptions{})
var action core.ListActionImpl
if c.ns != "" {
action = core.NewListAction(eventsResource, eventsKind, c.ns, metav1.ListOptions{})
} else {
action = core.NewListAction(eventsResource, eventsKind, v1.NamespaceDefault, metav1.ListOptions{})
}
obj, err := c.Fake.Invokes(action, &v1.EventList{})
if obj == nil {

View File

@@ -82,7 +82,8 @@ func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event,
// It returns the copy of the event that the server returns, or an error.
// The namespace and name of the target event is deduced from the event.
// The namespace must either match this event client's namespace, or this event client must
// have been created with the "" namespace.
//
// have been created with the "" namespace.
func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) {
if e.ns != "" && event.Namespace != e.ns {
return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns)

View File

@@ -0,0 +1,197 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
json "encoding/json"
"fmt"
"time"
v1alpha1 "k8s.io/api/networking/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1"
scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
// ClusterCIDRsGetter has a method to return a ClusterCIDRInterface.
// A group's client should implement this interface.
type ClusterCIDRsGetter interface {
ClusterCIDRs() ClusterCIDRInterface
}
// ClusterCIDRInterface has methods to work with ClusterCIDR resources.
type ClusterCIDRInterface interface {
Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (*v1alpha1.ClusterCIDR, error)
Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (*v1alpha1.ClusterCIDR, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterCIDR, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterCIDRList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error)
Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error)
ClusterCIDRExpansion
}
// clusterCIDRs implements ClusterCIDRInterface
type clusterCIDRs struct {
client rest.Interface
}
// newClusterCIDRs returns a ClusterCIDRs
func newClusterCIDRs(c *NetworkingV1alpha1Client) *clusterCIDRs {
return &clusterCIDRs{
client: c.RESTClient(),
}
}
// Get takes name of the clusterCIDR, and returns the corresponding clusterCIDR object, and an error if there is any.
func (c *clusterCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCIDR, err error) {
result = &v1alpha1.ClusterCIDR{}
err = c.client.Get().
Resource("clustercidrs").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of ClusterCIDRs that match those selectors.
func (c *clusterCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCIDRList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.ClusterCIDRList{}
err = c.client.Get().
Resource("clustercidrs").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested clusterCIDRs.
func (c *clusterCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Resource("clustercidrs").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a clusterCIDR and creates it. Returns the server's representation of the clusterCIDR, and an error, if there is any.
func (c *clusterCIDRs) Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (result *v1alpha1.ClusterCIDR, err error) {
result = &v1alpha1.ClusterCIDR{}
err = c.client.Post().
Resource("clustercidrs").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterCIDR).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a clusterCIDR and updates it. Returns the server's representation of the clusterCIDR, and an error, if there is any.
func (c *clusterCIDRs) Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (result *v1alpha1.ClusterCIDR, err error) {
result = &v1alpha1.ClusterCIDR{}
err = c.client.Put().
Resource("clustercidrs").
Name(clusterCIDR.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterCIDR).
Do(ctx).
Into(result)
return
}
// Delete takes name of the clusterCIDR and deletes it. Returns an error if one occurs.
func (c *clusterCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Resource("clustercidrs").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *clusterCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Resource("clustercidrs").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched clusterCIDR.
func (c *clusterCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) {
result = &v1alpha1.ClusterCIDR{}
err = c.client.Patch(pt).
Resource("clustercidrs").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}
// Apply takes the given apply declarative configuration, applies it and returns the applied clusterCIDR.
func (c *clusterCIDRs) Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) {
if clusterCIDR == nil {
return nil, fmt.Errorf("clusterCIDR provided to Apply must not be nil")
}
patchOpts := opts.ToPatchOptions()
data, err := json.Marshal(clusterCIDR)
if err != nil {
return nil, err
}
name := clusterCIDR.Name
if name == nil {
return nil, fmt.Errorf("clusterCIDR.Name must be provided to Apply")
}
result = &v1alpha1.ClusterCIDR{}
err = c.client.Patch(types.ApplyPatchType).
Resource("clustercidrs").
Name(*name).
VersionedParams(&patchOpts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

View File

@@ -1,5 +1,5 @@
/*
Copyright 2015 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// package jsonpath is a template engine using jsonpath syntax,
// which can be seen at http://goessner.net/articles/JsonPath/.
// In addition, it has {range} {end} function to iterate list and slice.
package jsonpath // import "k8s.io/client-go/util/jsonpath"
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
package v1alpha1

View File

@@ -0,0 +1,20 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// Package fake has the automatically generated clients.
package fake

View File

@@ -0,0 +1,146 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
json "encoding/json"
"fmt"
v1alpha1 "k8s.io/api/networking/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1"
testing "k8s.io/client-go/testing"
)
// FakeClusterCIDRs implements ClusterCIDRInterface
type FakeClusterCIDRs struct {
Fake *FakeNetworkingV1alpha1
}
var clustercidrsResource = schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1alpha1", Resource: "clustercidrs"}
var clustercidrsKind = schema.GroupVersionKind{Group: "networking.k8s.io", Version: "v1alpha1", Kind: "ClusterCIDR"}
// Get takes name of the clusterCIDR, and returns the corresponding clusterCIDR object, and an error if there is any.
func (c *FakeClusterCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCIDR, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootGetAction(clustercidrsResource, name), &v1alpha1.ClusterCIDR{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ClusterCIDR), err
}
// List takes label and field selectors, and returns the list of ClusterCIDRs that match those selectors.
func (c *FakeClusterCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCIDRList, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootListAction(clustercidrsResource, clustercidrsKind, opts), &v1alpha1.ClusterCIDRList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.ClusterCIDRList{ListMeta: obj.(*v1alpha1.ClusterCIDRList).ListMeta}
for _, item := range obj.(*v1alpha1.ClusterCIDRList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested clusterCIDRs.
func (c *FakeClusterCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewRootWatchAction(clustercidrsResource, opts))
}
// Create takes the representation of a clusterCIDR and creates it. Returns the server's representation of the clusterCIDR, and an error, if there is any.
func (c *FakeClusterCIDRs) Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (result *v1alpha1.ClusterCIDR, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootCreateAction(clustercidrsResource, clusterCIDR), &v1alpha1.ClusterCIDR{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ClusterCIDR), err
}
// Update takes the representation of a clusterCIDR and updates it. Returns the server's representation of the clusterCIDR, and an error, if there is any.
func (c *FakeClusterCIDRs) Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (result *v1alpha1.ClusterCIDR, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootUpdateAction(clustercidrsResource, clusterCIDR), &v1alpha1.ClusterCIDR{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ClusterCIDR), err
}
// Delete takes name of the clusterCIDR and deletes it. Returns an error if one occurs.
func (c *FakeClusterCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewRootDeleteActionWithOptions(clustercidrsResource, name, opts), &v1alpha1.ClusterCIDR{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeClusterCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewRootDeleteCollectionAction(clustercidrsResource, listOpts)
_, err := c.Fake.Invokes(action, &v1alpha1.ClusterCIDRList{})
return err
}
// Patch applies the patch and returns the patched clusterCIDR.
func (c *FakeClusterCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootPatchSubresourceAction(clustercidrsResource, name, pt, data, subresources...), &v1alpha1.ClusterCIDR{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ClusterCIDR), err
}
// Apply takes the given apply declarative configuration, applies it and returns the applied clusterCIDR.
func (c *FakeClusterCIDRs) Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) {
if clusterCIDR == nil {
return nil, fmt.Errorf("clusterCIDR provided to Apply must not be nil")
}
data, err := json.Marshal(clusterCIDR)
if err != nil {
return nil, err
}
name := clusterCIDR.Name
if name == nil {
return nil, fmt.Errorf("clusterCIDR.Name must be provided to Apply")
}
obj, err := c.Fake.
Invokes(testing.NewRootPatchSubresourceAction(clustercidrsResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterCIDR{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ClusterCIDR), err
}

View File

@@ -0,0 +1,40 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeNetworkingV1alpha1 struct {
*testing.Fake
}
func (c *FakeNetworkingV1alpha1) ClusterCIDRs() v1alpha1.ClusterCIDRInterface {
return &FakeClusterCIDRs{c}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeNetworkingV1alpha1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}

View File

@@ -0,0 +1,21 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
type ClusterCIDRExpansion interface{}

View File

@@ -0,0 +1,107 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"net/http"
v1alpha1 "k8s.io/api/networking/v1alpha1"
"k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
type NetworkingV1alpha1Interface interface {
RESTClient() rest.Interface
ClusterCIDRsGetter
}
// NetworkingV1alpha1Client is used to interact with features provided by the networking.k8s.io group.
type NetworkingV1alpha1Client struct {
restClient rest.Interface
}
func (c *NetworkingV1alpha1Client) ClusterCIDRs() ClusterCIDRInterface {
return newClusterCIDRs(c)
}
// NewForConfig creates a new NetworkingV1alpha1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*NetworkingV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new NetworkingV1alpha1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NetworkingV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
return &NetworkingV1alpha1Client{client}, nil
}
// NewForConfigOrDie creates a new NetworkingV1alpha1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *NetworkingV1alpha1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new NetworkingV1alpha1Client for the given RESTClient.
func New(c rest.Interface) *NetworkingV1alpha1Client {
return &NetworkingV1alpha1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *NetworkingV1alpha1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}

View File

@@ -0,0 +1,68 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "k8s.io/api/networking/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// ClusterCIDRLister helps list ClusterCIDRs.
// All objects returned here must be treated as read-only.
type ClusterCIDRLister interface {
// List lists all ClusterCIDRs in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha1.ClusterCIDR, err error)
// Get retrieves the ClusterCIDR from the index for a given name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1alpha1.ClusterCIDR, error)
ClusterCIDRListerExpansion
}
// clusterCIDRLister implements the ClusterCIDRLister interface.
type clusterCIDRLister struct {
indexer cache.Indexer
}
// NewClusterCIDRLister returns a new ClusterCIDRLister.
func NewClusterCIDRLister(indexer cache.Indexer) ClusterCIDRLister {
return &clusterCIDRLister{indexer: indexer}
}
// List lists all ClusterCIDRs in the indexer.
func (s *clusterCIDRLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterCIDR, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.ClusterCIDR))
})
return ret, err
}
// Get retrieves the ClusterCIDR from the index for a given name.
func (s *clusterCIDRLister) Get(name string) (*v1alpha1.ClusterCIDR, error) {
obj, exists, err := s.indexer.GetByKey(name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("clustercidr"), name)
}
return obj.(*v1alpha1.ClusterCIDR), nil
}

View File

@@ -0,0 +1,23 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
// ClusterCIDRListerExpansion allows custom methods to be added to
// ClusterCIDRLister.
type ClusterCIDRListerExpansion interface{}

View File

@@ -1,56 +0,0 @@
# Azure Active Directory plugin for client authentication
This plugin provides an integration with Azure Active Directory device flow. If no tokens are present in the kubectl configuration, it will prompt a device code which can be used to login in a browser. After login it will automatically fetch the tokens and store them in the kubectl configuration. In addition it will refresh and update the tokens in the configuration when expired.
## Usage
1. Create an Azure Active Directory *Web App / API* application for `apiserver` following these [instructions](https://docs.microsoft.com/en-us/azure/active-directory/active-directory-app-registration). The callback URL does not matter (just cannot be empty).
2. Create a second Azure Active Directory native application for `kubectl`. The callback URL does not matter (just cannot be empty).
3. On `kubectl` application's configuration page in Azure portal grant permissions to `apiserver` application by clicking on *Required Permissions*, click the *Add* button and search for the apiserver application created in step 1. Select "Access apiserver" under the *DELEGATED PERMISSIONS*. Once added click the *Grant Permissions* button to apply the changes.
4. Configure the `apiserver` to use the Azure Active Directory as an OIDC provider with following options
```
--oidc-client-id="spn:APISERVER_APPLICATION_ID" \
--oidc-issuer-url="https://sts.windows.net/TENANT_ID/"
--oidc-username-claim="sub"
```
* Replace the `APISERVER_APPLICATION_ID` with the application ID of `apiserver` application
* Replace `TENANT_ID` with your tenant ID.
  * For a list of alternative username claims that are supported by the OIDC issuer check the JSON response at `https://sts.windows.net/TENANT_ID/.well-known/openid-configuration`.
5. Configure `kubectl` to use the `azure` authentication provider
```
kubectl config set-credentials "USER_NAME" --auth-provider=azure \
--auth-provider-arg=environment=AzurePublicCloud \
--auth-provider-arg=client-id=APPLICATION_ID \
--auth-provider-arg=tenant-id=TENANT_ID \
--auth-provider-arg=apiserver-id=APISERVER_APPLICATION_ID
```
* Supported environments: `AzurePublicCloud`, `AzureUSGovernmentCloud`, `AzureChinaCloud`, `AzureGermanCloud`
* Replace `USER_NAME` and `TENANT_ID` with your user name and tenant ID
* Replace `APPLICATION_ID` with the application ID of your`kubectl` application ID
* Replace `APISERVER_APPLICATION_ID` with the application ID of your `apiserver` application ID
* Be sure to also (create and) select a context that uses above user
6. (Optionally) the AAD token has `aud` claim with `spn:` prefix. To omit that, add following auth configuration:
```
--auth-provider-arg=config-mode="1"
```
7. The access token is acquired when first `kubectl` command is executed
```
kubectl get pods
To sign in, use a web browser to open the page https://aka.ms/devicelogin and enter the code DEC7D48GA to authenticate.
```
* After signing in a web browser, the token is stored in the configuration, and it will be reused when executing further commands.
* The resulting username in Kubernetes depends on your [configuration of the `--oidc-username-claim` and `--oidc-username-prefix` flags on the API server](https://kubernetes.io/docs/admin/authentication/#configuring-the-api-server). If you are using any authorization method you need to give permissions to that user, e.g. by binding the user to a role in the case of RBAC.

View File

@@ -1,477 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"strconv"
"sync"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/util/net"
restclient "k8s.io/client-go/rest"
)
type configMode int
const (
azureTokenKey = "azureTokenKey"
tokenType = "Bearer"
authHeader = "Authorization"
cfgClientID = "client-id"
cfgTenantID = "tenant-id"
cfgAccessToken = "access-token"
cfgRefreshToken = "refresh-token"
cfgExpiresIn = "expires-in"
cfgExpiresOn = "expires-on"
cfgEnvironment = "environment"
cfgApiserverID = "apiserver-id"
cfgConfigMode = "config-mode"
configModeDefault configMode = 0
configModeOmitSPNPrefix configMode = 1
)
func init() {
if err := restclient.RegisterAuthProviderPlugin("azure", newAzureAuthProvider); err != nil {
klog.Fatalf("Failed to register azure auth plugin: %v", err)
}
}
var cache = newAzureTokenCache()
type azureTokenCache struct {
lock sync.Mutex
cache map[string]*azureToken
}
func newAzureTokenCache() *azureTokenCache {
return &azureTokenCache{cache: make(map[string]*azureToken)}
}
func (c *azureTokenCache) getToken(tokenKey string) *azureToken {
c.lock.Lock()
defer c.lock.Unlock()
return c.cache[tokenKey]
}
func (c *azureTokenCache) setToken(tokenKey string, token *azureToken) {
c.lock.Lock()
defer c.lock.Unlock()
c.cache[tokenKey] = token
}
var warnOnce sync.Once
func newAzureAuthProvider(_ string, cfg map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) {
// deprecated in v1.22, remove in v1.25
warnOnce.Do(func() {
klog.Warningf(`WARNING: the azure auth plugin is deprecated in v1.22+, unavailable in v1.25+; use https://github.com/Azure/kubelogin instead.
To learn more, consult https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins`)
})
var (
ts tokenSource
environment azure.Environment
err error
mode configMode
)
environment, err = azure.EnvironmentFromName(cfg[cfgEnvironment])
if err != nil {
environment = azure.PublicCloud
}
mode = configModeDefault
if cfg[cfgConfigMode] != "" {
configModeInt, err := strconv.Atoi(cfg[cfgConfigMode])
if err != nil {
return nil, fmt.Errorf("failed to parse %s, error: %s", cfgConfigMode, err)
}
mode = configMode(configModeInt)
switch mode {
case configModeOmitSPNPrefix:
case configModeDefault:
default:
return nil, fmt.Errorf("%s:%s is not a valid mode", cfgConfigMode, cfg[cfgConfigMode])
}
}
ts, err = newAzureTokenSourceDeviceCode(environment, cfg[cfgClientID], cfg[cfgTenantID], cfg[cfgApiserverID], mode)
if err != nil {
return nil, fmt.Errorf("creating a new azure token source for device code authentication: %v", err)
}
cacheSource := newAzureTokenSource(ts, cache, cfg, mode, persister)
return &azureAuthProvider{
tokenSource: cacheSource,
}, nil
}
type azureAuthProvider struct {
tokenSource tokenSource
}
func (p *azureAuthProvider) Login() error {
return errors.New("not yet implemented")
}
func (p *azureAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper {
return &azureRoundTripper{
tokenSource: p.tokenSource,
roundTripper: rt,
}
}
type azureRoundTripper struct {
tokenSource tokenSource
roundTripper http.RoundTripper
}
var _ net.RoundTripperWrapper = &azureRoundTripper{}
func (r *azureRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
if len(req.Header.Get(authHeader)) != 0 {
return r.roundTripper.RoundTrip(req)
}
token, err := r.tokenSource.Token()
if err != nil {
klog.Errorf("Failed to acquire a token: %v", err)
return nil, fmt.Errorf("acquiring a token for authorization header: %v", err)
}
// clone the request in order to avoid modifying the headers of the original request
req2 := new(http.Request)
*req2 = *req
req2.Header = make(http.Header, len(req.Header))
for k, s := range req.Header {
req2.Header[k] = append([]string(nil), s...)
}
req2.Header.Set(authHeader, fmt.Sprintf("%s %s", tokenType, token.token.AccessToken))
return r.roundTripper.RoundTrip(req2)
}
func (r *azureRoundTripper) WrappedRoundTripper() http.RoundTripper { return r.roundTripper }
type azureToken struct {
token adal.Token
environment string
clientID string
tenantID string
apiserverID string
}
type tokenSource interface {
Token() (*azureToken, error)
Refresh(*azureToken) (*azureToken, error)
}
type azureTokenSource struct {
source tokenSource
cache *azureTokenCache
lock sync.Mutex
configMode configMode
cfg map[string]string
persister restclient.AuthProviderConfigPersister
}
func newAzureTokenSource(source tokenSource, cache *azureTokenCache, cfg map[string]string, configMode configMode, persister restclient.AuthProviderConfigPersister) tokenSource {
return &azureTokenSource{
source: source,
cache: cache,
cfg: cfg,
persister: persister,
configMode: configMode,
}
}
// Token fetches a token from the cache of configuration if present otherwise
// acquires a new token from the configured source. Automatically refreshes
// the token if expired.
func (ts *azureTokenSource) Token() (*azureToken, error) {
ts.lock.Lock()
defer ts.lock.Unlock()
var err error
token := ts.cache.getToken(azureTokenKey)
if token != nil && !token.token.IsExpired() {
return token, nil
}
// retrieve from config if no cache
if token == nil {
tokenFromCfg, err := ts.retrieveTokenFromCfg()
if err == nil {
token = tokenFromCfg
}
}
if token != nil {
// cache and return if the token is as good
// avoids frequent persistor calls
if !token.token.IsExpired() {
ts.cache.setToken(azureTokenKey, token)
return token, nil
}
klog.V(4).Info("Refreshing token.")
tokenFromRefresh, err := ts.Refresh(token)
switch {
case err == nil:
token = tokenFromRefresh
case autorest.IsTokenRefreshError(err):
klog.V(4).Infof("Failed to refresh expired token, proceed to auth: %v", err)
// reset token to nil so that the token source will be used to acquire new
token = nil
default:
return nil, fmt.Errorf("unexpected error when refreshing token: %v", err)
}
}
if token == nil {
tokenFromSource, err := ts.source.Token()
if err != nil {
return nil, fmt.Errorf("failed acquiring new token: %v", err)
}
token = tokenFromSource
}
// sanity check
if token == nil {
return nil, fmt.Errorf("unable to acquire token")
}
// corner condition, newly got token is valid but expired
if token.token.IsExpired() {
return nil, fmt.Errorf("newly acquired token is expired")
}
err = ts.storeTokenInCfg(token)
if err != nil {
return nil, fmt.Errorf("storing the refreshed token in configuration: %v", err)
}
ts.cache.setToken(azureTokenKey, token)
return token, nil
}
func (ts *azureTokenSource) retrieveTokenFromCfg() (*azureToken, error) {
accessToken := ts.cfg[cfgAccessToken]
if accessToken == "" {
return nil, fmt.Errorf("no access token in cfg: %s", cfgAccessToken)
}
refreshToken := ts.cfg[cfgRefreshToken]
if refreshToken == "" {
return nil, fmt.Errorf("no refresh token in cfg: %s", cfgRefreshToken)
}
environment := ts.cfg[cfgEnvironment]
if environment == "" {
return nil, fmt.Errorf("no environment in cfg: %s", cfgEnvironment)
}
clientID := ts.cfg[cfgClientID]
if clientID == "" {
return nil, fmt.Errorf("no client ID in cfg: %s", cfgClientID)
}
tenantID := ts.cfg[cfgTenantID]
if tenantID == "" {
return nil, fmt.Errorf("no tenant ID in cfg: %s", cfgTenantID)
}
resourceID := ts.cfg[cfgApiserverID]
if resourceID == "" {
return nil, fmt.Errorf("no apiserver ID in cfg: %s", cfgApiserverID)
}
expiresIn := ts.cfg[cfgExpiresIn]
if expiresIn == "" {
return nil, fmt.Errorf("no expiresIn in cfg: %s", cfgExpiresIn)
}
expiresOn := ts.cfg[cfgExpiresOn]
if expiresOn == "" {
return nil, fmt.Errorf("no expiresOn in cfg: %s", cfgExpiresOn)
}
tokenAudience := resourceID
if ts.configMode == configModeDefault {
tokenAudience = fmt.Sprintf("spn:%s", resourceID)
}
return &azureToken{
token: adal.Token{
AccessToken: accessToken,
RefreshToken: refreshToken,
ExpiresIn: json.Number(expiresIn),
ExpiresOn: json.Number(expiresOn),
NotBefore: json.Number(expiresOn),
Resource: tokenAudience,
Type: tokenType,
},
environment: environment,
clientID: clientID,
tenantID: tenantID,
apiserverID: resourceID,
}, nil
}
func (ts *azureTokenSource) storeTokenInCfg(token *azureToken) error {
newCfg := make(map[string]string)
newCfg[cfgAccessToken] = token.token.AccessToken
newCfg[cfgRefreshToken] = token.token.RefreshToken
newCfg[cfgEnvironment] = token.environment
newCfg[cfgClientID] = token.clientID
newCfg[cfgTenantID] = token.tenantID
newCfg[cfgApiserverID] = token.apiserverID
newCfg[cfgExpiresIn] = string(token.token.ExpiresIn)
newCfg[cfgExpiresOn] = string(token.token.ExpiresOn)
newCfg[cfgConfigMode] = strconv.Itoa(int(ts.configMode))
err := ts.persister.Persist(newCfg)
if err != nil {
return fmt.Errorf("persisting the configuration: %v", err)
}
ts.cfg = newCfg
return nil
}
func (ts *azureTokenSource) Refresh(token *azureToken) (*azureToken, error) {
return ts.source.Refresh(token)
}
// refresh outdated token with adal.
func (ts *azureTokenSourceDeviceCode) Refresh(token *azureToken) (*azureToken, error) {
env, err := azure.EnvironmentFromName(token.environment)
if err != nil {
return nil, err
}
var oauthConfig *adal.OAuthConfig
if ts.configMode == configModeOmitSPNPrefix {
oauthConfig, err = adal.NewOAuthConfigWithAPIVersion(env.ActiveDirectoryEndpoint, token.tenantID, nil)
if err != nil {
return nil, fmt.Errorf("building the OAuth configuration without api-version for token refresh: %v", err)
}
} else {
oauthConfig, err = adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, token.tenantID)
if err != nil {
return nil, fmt.Errorf("building the OAuth configuration for token refresh: %v", err)
}
}
callback := func(t adal.Token) error {
return nil
}
spt, err := adal.NewServicePrincipalTokenFromManualToken(
*oauthConfig,
token.clientID,
token.apiserverID,
token.token,
callback)
if err != nil {
return nil, fmt.Errorf("creating new service principal for token refresh: %v", err)
}
if err := spt.Refresh(); err != nil {
// Caller expects IsTokenRefreshError(err) to trigger prompt.
return nil, fmt.Errorf("refreshing token: %w", err)
}
return &azureToken{
token: spt.Token(),
environment: token.environment,
clientID: token.clientID,
tenantID: token.tenantID,
apiserverID: token.apiserverID,
}, nil
}
type azureTokenSourceDeviceCode struct {
environment azure.Environment
clientID string
tenantID string
apiserverID string
configMode configMode
}
func newAzureTokenSourceDeviceCode(environment azure.Environment, clientID string, tenantID string, apiserverID string, configMode configMode) (tokenSource, error) {
if clientID == "" {
return nil, errors.New("client-id is empty")
}
if tenantID == "" {
return nil, errors.New("tenant-id is empty")
}
if apiserverID == "" {
return nil, errors.New("apiserver-id is empty")
}
return &azureTokenSourceDeviceCode{
environment: environment,
clientID: clientID,
tenantID: tenantID,
apiserverID: apiserverID,
configMode: configMode,
}, nil
}
func (ts *azureTokenSourceDeviceCode) Token() (*azureToken, error) {
var (
oauthConfig *adal.OAuthConfig
err error
)
if ts.configMode == configModeOmitSPNPrefix {
oauthConfig, err = adal.NewOAuthConfigWithAPIVersion(ts.environment.ActiveDirectoryEndpoint, ts.tenantID, nil)
if err != nil {
return nil, fmt.Errorf("building the OAuth configuration without api-version for device code authentication: %v", err)
}
} else {
oauthConfig, err = adal.NewOAuthConfig(ts.environment.ActiveDirectoryEndpoint, ts.tenantID)
if err != nil {
return nil, fmt.Errorf("building the OAuth configuration for device code authentication: %v", err)
}
}
client := &autorest.Client{}
deviceCode, err := adal.InitiateDeviceAuth(client, *oauthConfig, ts.clientID, ts.apiserverID)
if err != nil {
return nil, fmt.Errorf("initialing the device code authentication: %v", err)
}
_, err = fmt.Fprintln(os.Stderr, *deviceCode.Message)
if err != nil {
return nil, fmt.Errorf("prompting the device code message: %v", err)
}
token, err := adal.WaitForUserCompletion(client, deviceCode)
if err != nil {
return nil, fmt.Errorf("waiting for device code authentication to complete: %v", err)
}
return &azureToken{
token: *token,
environment: ts.environment.Name,
clientID: ts.clientID,
tenantID: ts.tenantID,
apiserverID: ts.apiserverID,
}, nil
}

View File

@@ -0,0 +1,36 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"errors"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
)
func init() {
if err := rest.RegisterAuthProviderPlugin("azure", newAzureAuthProvider); err != nil {
klog.Fatalf("Failed to register azure auth plugin: %v", err)
}
}
func newAzureAuthProvider(_ string, _ map[string]string, _ rest.AuthProviderConfigPersister) (rest.AuthProvider, error) {
return nil, errors.New(`The azure auth plugin has been removed.
Please use the https://github.com/Azure/kubelogin kubectl/client-go credential plugin instead.
See https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins for further details`)
}

View File

@@ -1,8 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- cjcullen
reviewers:
- cjcullen
emeritus_approvers:
- jlowdermilk

View File

@@ -1,390 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gcp
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"os/exec"
"strings"
"sync"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/yaml"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/util/jsonpath"
"k8s.io/klog/v2"
)
func init() {
if err := restclient.RegisterAuthProviderPlugin("gcp", newGCPAuthProvider); err != nil {
klog.Fatalf("Failed to register gcp auth plugin: %v", err)
}
}
var (
// Stubbable for testing
execCommand = exec.Command
// defaultScopes:
// - cloud-platform is the base scope to authenticate to GCP.
// - userinfo.email is used to authenticate to GKE APIs with gserviceaccount
// email instead of numeric uniqueID.
defaultScopes = []string{
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/userinfo.email"}
)
// gcpAuthProvider is an auth provider plugin that uses GCP credentials to provide
// tokens for kubectl to authenticate itself to the apiserver. A sample json config
// is provided below with all recognized options described.
//
// {
// 'auth-provider': {
// # Required
// "name": "gcp",
//
// 'config': {
// # Authentication options
// # These options are used while getting a token.
//
// # comma-separated list of GCP API scopes. default value of this field
// # is "https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/userinfo.email".
// # to override the API scopes, specify this field explicitly.
// "scopes": "https://www.googleapis.com/auth/cloud-platform"
//
// # Caching options
//
// # Raw string data representing cached access token.
// "access-token": "ya29.CjWdA4GiBPTt",
// # RFC3339Nano expiration timestamp for cached access token.
// "expiry": "2016-10-31 22:31:9.123",
//
// # Command execution options
// # These options direct the plugin to execute a specified command and parse
// # token and expiry time from the output of the command.
//
// # Command to execute for access token. Command output will be parsed as JSON.
// # If "cmd-args" is not present, this value will be split on whitespace, with
// # the first element interpreted as the command, remaining elements as args.
// "cmd-path": "/usr/bin/gcloud",
//
// # Arguments to pass to command to execute for access token.
// "cmd-args": "config config-helper --output=json"
//
// # JSONPath to the string field that represents the access token in
// # command output. If omitted, defaults to "{.access_token}".
// "token-key": "{.credential.access_token}",
//
// # JSONPath to the string field that represents expiration timestamp
// # of the access token in the command output. If omitted, defaults to
// # "{.token_expiry}"
// "expiry-key": ""{.credential.token_expiry}",
//
// # golang reference time in the format that the expiration timestamp uses.
// # If omitted, defaults to time.RFC3339Nano
// "time-fmt": "2006-01-02 15:04:05.999999999"
// }
// }
// }
//
type gcpAuthProvider struct {
tokenSource oauth2.TokenSource
persister restclient.AuthProviderConfigPersister
}
var warnOnce sync.Once
func newGCPAuthProvider(_ string, gcpConfig map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) {
warnOnce.Do(func() {
klog.Warningf(`WARNING: the gcp auth plugin is deprecated in v1.22+, unavailable in v1.25+; use gcloud instead.
To learn more, consult https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke`)
})
ts, err := tokenSource(isCmdTokenSource(gcpConfig), gcpConfig)
if err != nil {
return nil, err
}
cts, err := newCachedTokenSource(gcpConfig["access-token"], gcpConfig["expiry"], persister, ts, gcpConfig)
if err != nil {
return nil, err
}
return &gcpAuthProvider{cts, persister}, nil
}
func isCmdTokenSource(gcpConfig map[string]string) bool {
_, ok := gcpConfig["cmd-path"]
return ok
}
func tokenSource(isCmd bool, gcpConfig map[string]string) (oauth2.TokenSource, error) {
// Command-based token source
if isCmd {
cmd := gcpConfig["cmd-path"]
if len(cmd) == 0 {
return nil, fmt.Errorf("missing access token cmd")
}
if gcpConfig["scopes"] != "" {
return nil, fmt.Errorf("scopes can only be used when kubectl is using a gcp service account key")
}
var args []string
if cmdArgs, ok := gcpConfig["cmd-args"]; ok {
args = strings.Fields(cmdArgs)
} else {
fields := strings.Fields(cmd)
cmd = fields[0]
args = fields[1:]
}
return newCmdTokenSource(cmd, args, gcpConfig["token-key"], gcpConfig["expiry-key"], gcpConfig["time-fmt"]), nil
}
// Google Application Credentials-based token source
scopes := parseScopes(gcpConfig)
ts, err := google.DefaultTokenSource(context.Background(), scopes...)
if err != nil {
return nil, fmt.Errorf("cannot construct google default token source: %v", err)
}
return ts, nil
}
// parseScopes constructs a list of scopes that should be included in token source
// from the config map.
func parseScopes(gcpConfig map[string]string) []string {
scopes, ok := gcpConfig["scopes"]
if !ok {
return defaultScopes
}
if scopes == "" {
return []string{}
}
return strings.Split(gcpConfig["scopes"], ",")
}
func (g *gcpAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper {
var resetCache map[string]string
if cts, ok := g.tokenSource.(*cachedTokenSource); ok {
resetCache = cts.baseCache()
} else {
resetCache = make(map[string]string)
}
return &conditionalTransport{&oauth2.Transport{Source: g.tokenSource, Base: rt}, g.persister, resetCache}
}
func (g *gcpAuthProvider) Login() error { return nil }
type cachedTokenSource struct {
lk sync.Mutex
source oauth2.TokenSource
accessToken string `datapolicy:"token"`
expiry time.Time
persister restclient.AuthProviderConfigPersister
cache map[string]string
}
func newCachedTokenSource(accessToken, expiry string, persister restclient.AuthProviderConfigPersister, ts oauth2.TokenSource, cache map[string]string) (*cachedTokenSource, error) {
var expiryTime time.Time
if parsedTime, err := time.Parse(time.RFC3339Nano, expiry); err == nil {
expiryTime = parsedTime
}
if cache == nil {
cache = make(map[string]string)
}
return &cachedTokenSource{
source: ts,
accessToken: accessToken,
expiry: expiryTime,
persister: persister,
cache: cache,
}, nil
}
func (t *cachedTokenSource) Token() (*oauth2.Token, error) {
tok := t.cachedToken()
if tok.Valid() && !tok.Expiry.IsZero() {
return tok, nil
}
tok, err := t.source.Token()
if err != nil {
return nil, err
}
cache := t.update(tok)
if t.persister != nil {
if err := t.persister.Persist(cache); err != nil {
klog.V(4).Infof("Failed to persist token: %v", err)
}
}
return tok, nil
}
func (t *cachedTokenSource) cachedToken() *oauth2.Token {
t.lk.Lock()
defer t.lk.Unlock()
return &oauth2.Token{
AccessToken: t.accessToken,
TokenType: "Bearer",
Expiry: t.expiry,
}
}
func (t *cachedTokenSource) update(tok *oauth2.Token) map[string]string {
t.lk.Lock()
defer t.lk.Unlock()
t.accessToken = tok.AccessToken
t.expiry = tok.Expiry
ret := map[string]string{}
for k, v := range t.cache {
ret[k] = v
}
ret["access-token"] = t.accessToken
ret["expiry"] = t.expiry.Format(time.RFC3339Nano)
return ret
}
// baseCache is the base configuration value for this TokenSource, without any cached ephemeral tokens.
func (t *cachedTokenSource) baseCache() map[string]string {
t.lk.Lock()
defer t.lk.Unlock()
ret := map[string]string{}
for k, v := range t.cache {
ret[k] = v
}
delete(ret, "access-token")
delete(ret, "expiry")
return ret
}
type commandTokenSource struct {
cmd string
args []string
tokenKey string `datapolicy:"token"`
expiryKey string `datapolicy:"secret-key"`
timeFmt string
}
func newCmdTokenSource(cmd string, args []string, tokenKey, expiryKey, timeFmt string) *commandTokenSource {
if len(timeFmt) == 0 {
timeFmt = time.RFC3339Nano
}
if len(tokenKey) == 0 {
tokenKey = "{.access_token}"
}
if len(expiryKey) == 0 {
expiryKey = "{.token_expiry}"
}
return &commandTokenSource{
cmd: cmd,
args: args,
tokenKey: tokenKey,
expiryKey: expiryKey,
timeFmt: timeFmt,
}
}
func (c *commandTokenSource) Token() (*oauth2.Token, error) {
fullCmd := strings.Join(append([]string{c.cmd}, c.args...), " ")
cmd := execCommand(c.cmd, c.args...)
var stderr bytes.Buffer
cmd.Stderr = &stderr
output, err := cmd.Output()
if err != nil {
return nil, fmt.Errorf("error executing access token command %q: err=%v output=%s stderr=%s", fullCmd, err, output, string(stderr.Bytes()))
}
token, err := c.parseTokenCmdOutput(output)
if err != nil {
return nil, fmt.Errorf("error parsing output for access token command %q: %v", fullCmd, err)
}
return token, nil
}
func (c *commandTokenSource) parseTokenCmdOutput(output []byte) (*oauth2.Token, error) {
output, err := yaml.ToJSON(output)
if err != nil {
return nil, err
}
var data interface{}
if err := json.Unmarshal(output, &data); err != nil {
return nil, err
}
accessToken, err := parseJSONPath(data, "token-key", c.tokenKey)
if err != nil {
return nil, fmt.Errorf("error parsing token-key %q from %q: %v", c.tokenKey, string(output), err)
}
expiryStr, err := parseJSONPath(data, "expiry-key", c.expiryKey)
if err != nil {
return nil, fmt.Errorf("error parsing expiry-key %q from %q: %v", c.expiryKey, string(output), err)
}
var expiry time.Time
if t, err := time.Parse(c.timeFmt, expiryStr); err != nil {
klog.V(4).Infof("Failed to parse token expiry from %s (fmt=%s): %v", expiryStr, c.timeFmt, err)
} else {
expiry = t
}
return &oauth2.Token{
AccessToken: accessToken,
TokenType: "Bearer",
Expiry: expiry,
}, nil
}
func parseJSONPath(input interface{}, name, template string) (string, error) {
j := jsonpath.New(name)
buf := new(bytes.Buffer)
if err := j.Parse(template); err != nil {
return "", err
}
if err := j.Execute(buf, input); err != nil {
return "", err
}
return buf.String(), nil
}
type conditionalTransport struct {
oauthTransport *oauth2.Transport
persister restclient.AuthProviderConfigPersister
resetCache map[string]string
}
var _ net.RoundTripperWrapper = &conditionalTransport{}
func (t *conditionalTransport) RoundTrip(req *http.Request) (*http.Response, error) {
if len(req.Header.Get("Authorization")) != 0 {
return t.oauthTransport.Base.RoundTrip(req)
}
res, err := t.oauthTransport.RoundTrip(req)
if err != nil {
return nil, err
}
if res.StatusCode == 401 {
klog.V(4).Infof("The credentials that were supplied are invalid for the target cluster")
t.persister.Persist(t.resetCache)
}
return res, nil
}
func (t *conditionalTransport) WrappedRoundTripper() http.RoundTripper { return t.oauthTransport.Base }

View File

@@ -0,0 +1,36 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gcp
import (
"errors"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
)
func init() {
if err := rest.RegisterAuthProviderPlugin("gcp", newGCPAuthProvider); err != nil {
klog.Fatalf("Failed to register gcp auth plugin: %v", err)
}
}
func newGCPAuthProvider(_ string, _ map[string]string, _ rest.AuthProviderConfigPersister) (rest.AuthProvider, error) {
return nil, errors.New(`The gcp auth plugin has been removed.
Please use the "gke-gcloud-auth-plugin" kubectl/client-go credential plugin instead.
See https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke for further details`)
}

View File

@@ -52,7 +52,8 @@ type Interface interface {
// ClientContentConfig controls how RESTClient communicates with the server.
//
// TODO: ContentConfig will be updated to accept a Negotiator instead of a
// NegotiatedSerializer and NegotiatedSerializer will be removed.
//
// NegotiatedSerializer and NegotiatedSerializer will be removed.
type ClientContentConfig struct {
// AcceptContentTypes specifies the types the client will accept and is optional.
// If not set, ContentType will be used to define the Accept header
@@ -159,13 +160,14 @@ func readExpBackoffConfig() BackoffManager {
// c, err := NewRESTClient(...)
// if err != nil { ... }
// resp, err := c.Verb("GET").
// Path("pods").
// SelectorParam("labels", "area=staging").
// Timeout(10*time.Second).
// Do()
//
// Path("pods").
// SelectorParam("labels", "area=staging").
// Timeout(10*time.Second).
// Do()
//
// if err != nil { ... }
// list, ok := resp.(*api.PodList)
//
func (c *RESTClient) Verb(verb string) *Request {
return NewRequest(c).Verb(verb)
}

View File

@@ -21,7 +21,6 @@ import (
"net/http"
"net/url"
"k8s.io/client-go/pkg/apis/clientauthentication"
clientauthenticationapi "k8s.io/client-go/pkg/apis/clientauthentication"
)
@@ -50,7 +49,7 @@ func ConfigToExecCluster(config *Config) (*clientauthenticationapi.Cluster, erro
}
}
return &clientauthentication.Cluster{
return &clientauthenticationapi.Cluster{
Server: config.Host,
TLSServerName: config.ServerName,
InsecureSkipTLSVerify: config.Insecure,
@@ -63,7 +62,7 @@ func ConfigToExecCluster(config *Config) (*clientauthenticationapi.Cluster, erro
// ExecClusterToConfig creates a Config with the corresponding fields from the provided
// clientauthenticationapi.Cluster. The returned Config will be anonymous (i.e., it will not have
// any authentication-related fields set).
func ExecClusterToConfig(cluster *clientauthentication.Cluster) (*Config, error) {
func ExecClusterToConfig(cluster *clientauthenticationapi.Cluster) (*Config, error) {
var proxy func(*http.Request) (*url.URL, error)
if cluster.ProxyURL != "" {
proxyURL, err := url.Parse(cluster.ProxyURL)

View File

@@ -36,9 +36,10 @@ type AuthProvider interface {
}
// Factory generates an AuthProvider plugin.
// clusterAddress is the address of the current cluster.
// config is the initial configuration for this plugin.
// persister allows the plugin to save updated configuration.
//
// clusterAddress is the address of the current cluster.
// config is the initial configuration for this plugin.
// persister allows the plugin to save updated configuration.
type Factory func(clusterAddress string, config map[string]string, persister AuthProviderConfigPersister) (AuthProvider, error)
// AuthProviderConfigPersister allows a plugin to persist configuration info

View File

@@ -710,7 +710,7 @@ func updateURLMetrics(ctx context.Context, req *Request, resp *http.Response, er
if err != nil {
metrics.RequestResult.Increment(ctx, "<error>", req.verb, url)
} else {
//Metrics for failure codes
// Metrics for failure codes
metrics.RequestResult.Increment(ctx, strconv.Itoa(resp.StatusCode), req.verb, url)
}
}
@@ -823,7 +823,7 @@ func (r *Request) newHTTPRequest(ctx context.Context) (*http.Request, error) {
// fn at most once. It will return an error if a problem occurred prior to connecting to the
// server - the provided function is responsible for handling server errors.
func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Response)) error {
//Metrics for total request latency
// Metrics for total request latency
start := time.Now()
defer func() {
metrics.RequestLatency.Observe(ctx, r.verb, *r.URL(), time.Since(start))
@@ -892,7 +892,7 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
done := func() bool {
defer readAndCloseResponseBody(resp)
// if the the server returns an error in err, the response will be nil.
// if the server returns an error in err, the response will be nil.
f := func(req *http.Request, resp *http.Response) {
if resp == nil {
return
@@ -917,8 +917,8 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
// processing.
//
// Error type:
// * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError
// * http.Client.Do errors are returned directly.
// - If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError
// - http.Client.Do errors are returned directly.
func (r *Request) Do(ctx context.Context) Result {
var result Result
err := r.request(ctx, func(req *http.Request, resp *http.Response) {
@@ -1085,15 +1085,15 @@ const maxUnstructuredResponseTextBytes = 2048
// unexpected responses. The rough structure is:
//
// 1. Assume the server sends you something sane - JSON + well defined error objects + proper codes
// - this is the happy path
// - when you get this output, trust what the server sends
// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to
// generate a reasonable facsimile of the original failure.
// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above
// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error
// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected
// initial contact, the presence of mismatched body contents from posted content types
// - Give these a separate distinct error type and capture as much as possible of the original message
// - this is the happy path
// - when you get this output, trust what the server sends
// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to
// generate a reasonable facsimile of the original failure.
// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above
// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error
// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected
// initial contact, the presence of mismatched body contents from posted content types
// - Give these a separate distinct error type and capture as much as possible of the original message
//
// TODO: introduce transformation of generic http.Client.Do() errors that separates 4.
func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error {

View File

@@ -40,9 +40,9 @@ var (
// SetDefaultWarningHandler sets the default handler clients use when warning headers are encountered.
// By default, warnings are logged. Several built-in implementations are provided:
// - NoWarnings suppresses warnings.
// - WarningLogger logs warnings.
// - NewWarningWriter() outputs warnings to the provided writer.
// - NoWarnings suppresses warnings.
// - WarningLogger logs warnings.
// - NewWarningWriter() outputs warnings to the provided writer.
func SetDefaultWarningHandler(l WarningHandler) {
defaultWarningHandlerLock.Lock()
defer defaultWarningHandlerLock.Unlock()

View File

@@ -204,7 +204,9 @@ func (r *withRetry) Before(ctx context.Context, request *Request) error {
if r.retryAfter == nil {
// we do a backoff sleep before the first attempt is made,
// (preserving current behavior).
request.backoff.Sleep(request.backoff.CalculateBackoff(url))
if request.backoff != nil {
request.backoff.Sleep(request.backoff.CalculateBackoff(url))
}
return nil
}
@@ -220,14 +222,13 @@ func (r *withRetry) Before(ctx context.Context, request *Request) error {
}
}
// if we are here, we have made attempt(s) al least once before.
// if we are here, we have made attempt(s) at least once before.
if request.backoff != nil {
// TODO(tkashem) with default set to use exponential backoff
// we can merge these two sleeps:
// BackOffManager.Sleep(max(backoffManager.CalculateBackoff(), retryAfter))
// see https://github.com/kubernetes/kubernetes/issues/108302
request.backoff.Sleep(r.retryAfter.Wait)
request.backoff.Sleep(request.backoff.CalculateBackoff(url))
delay := request.backoff.CalculateBackoff(url)
if r.retryAfter.Wait > delay {
delay = r.retryAfter.Wait
}
request.backoff.Sleep(delay)
}
// We are retrying the request that we already send to
@@ -349,8 +350,12 @@ func readAndCloseResponseBody(resp *http.Response) {
}
func retryAfterResponse() *http.Response {
return retryAfterResponseWithDelay("1")
}
func retryAfterResponseWithDelay(delay string) *http.Response {
return &http.Response{
StatusCode: http.StatusInternalServerError,
Header: http.Header{"Retry-After": []string{"1"}},
Header: http.Header{"Retry-After": []string{delay}},
}
}

View File

@@ -102,10 +102,20 @@ func ObjectReaction(tracker ObjectTracker) ReactionFunc {
if action.GetSubresource() == "" {
err = tracker.Create(gvr, action.GetObject(), ns)
} else {
// TODO: Currently we're handling subresource creation as an update
// on the enclosing resource. This works for some subresources but
// might not be generic enough.
err = tracker.Update(gvr, action.GetObject(), ns)
oldObj, getOldObjErr := tracker.Get(gvr, ns, objMeta.GetName())
if getOldObjErr != nil {
return true, nil, getOldObjErr
}
// Check whether the existing historical object type is the same as the current operation object type that needs to be updated, and if it is the same, perform the update operation.
if reflect.TypeOf(oldObj) == reflect.TypeOf(action.GetObject()) {
// TODO: Currently we're handling subresource creation as an update
// on the enclosing resource. This works for some subresources but
// might not be generic enough.
err = tracker.Update(gvr, action.GetObject(), ns)
} else {
// If the historical object type is different from the current object type, need to make sure we return the object submitted,don't persist the submitted object in the tracker.
return true, action.GetObject(), nil
}
}
if err != nil {
return true, nil, err

View File

@@ -1,27 +0,0 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View File

@@ -1,94 +0,0 @@
//This package is copied from Go library text/template.
//The original private functions indirect and printableValue
//are exported as public functions.
package template
import (
"fmt"
"reflect"
)
var Indirect = indirect
var PrintableValue = printableValue
var (
errorType = reflect.TypeOf((*error)(nil)).Elem()
fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
)
// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
// We indirect through pointers and empty interfaces (only) because
// non-empty interfaces have methods we might need.
func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
if v.IsNil() {
return v, true
}
if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
break
}
}
return v, false
}
// printableValue returns the, possibly indirected, interface value inside v that
// is best for a call to formatted printer.
func printableValue(v reflect.Value) (interface{}, bool) {
if v.Kind() == reflect.Ptr {
v, _ = indirect(v) // fmt.Fprint handles nil.
}
if !v.IsValid() {
return "<no value>", true
}
if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
v = v.Addr()
} else {
switch v.Kind() {
case reflect.Chan, reflect.Func:
return nil, false
}
}
}
return v.Interface(), true
}
// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
func canBeNil(typ reflect.Type) bool {
switch typ.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return true
}
return false
}
// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
// and whether the value has a meaningful truth value.
func isTrue(val reflect.Value) (truth, ok bool) {
if !val.IsValid() {
// Something like var x interface{}, never set. It's a form of nil.
return false, true
}
switch val.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
truth = val.Len() > 0
case reflect.Bool:
truth = val.Bool()
case reflect.Complex64, reflect.Complex128:
truth = val.Complex() != 0
case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
truth = !val.IsNil()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
truth = val.Int() != 0
case reflect.Float32, reflect.Float64:
truth = val.Float() != 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
truth = val.Uint() != 0
case reflect.Struct:
truth = true // Struct values are always true.
default:
return
}
return truth, true
}

View File

@@ -1,599 +0,0 @@
//This package is copied from Go library text/template.
//The original private functions eq, ge, gt, le, lt, and ne
//are exported as public functions.
package template
import (
"bytes"
"errors"
"fmt"
"io"
"net/url"
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
var Equal = eq
var GreaterEqual = ge
var Greater = gt
var LessEqual = le
var Less = lt
var NotEqual = ne
// FuncMap is the type of the map defining the mapping from names to functions.
// Each function must have either a single return value, or two return values of
// which the second has type error. In that case, if the second (error)
// return value evaluates to non-nil during execution, execution terminates and
// Execute returns that error.
type FuncMap map[string]interface{}
var builtins = FuncMap{
"and": and,
"call": call,
"html": HTMLEscaper,
"index": index,
"js": JSEscaper,
"len": length,
"not": not,
"or": or,
"print": fmt.Sprint,
"printf": fmt.Sprintf,
"println": fmt.Sprintln,
"urlquery": URLQueryEscaper,
// Comparisons
"eq": eq, // ==
"ge": ge, // >=
"gt": gt, // >
"le": le, // <=
"lt": lt, // <
"ne": ne, // !=
}
var builtinFuncs = createValueFuncs(builtins)
// createValueFuncs turns a FuncMap into a map[string]reflect.Value
func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
m := make(map[string]reflect.Value)
addValueFuncs(m, funcMap)
return m
}
// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
for name, fn := range in {
v := reflect.ValueOf(fn)
if v.Kind() != reflect.Func {
panic("value for " + name + " not a function")
}
if !goodFunc(v.Type()) {
panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
}
out[name] = v
}
}
// AddFuncs adds to values the functions in funcs. It does no checking of the input -
// call addValueFuncs first.
func addFuncs(out, in FuncMap) {
for name, fn := range in {
out[name] = fn
}
}
// goodFunc checks that the function or method has the right result signature.
func goodFunc(typ reflect.Type) bool {
// We allow functions with 1 result or 2 results where the second is an error.
switch {
case typ.NumOut() == 1:
return true
case typ.NumOut() == 2 && typ.Out(1) == errorType:
return true
}
return false
}
// findFunction looks for a function in the template, and global map.
func findFunction(name string) (reflect.Value, bool) {
if fn := builtinFuncs[name]; fn.IsValid() {
return fn, true
}
return reflect.Value{}, false
}
// Indexing.
// index returns the result of indexing its first argument by the following
// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
// indexed item must be a map, slice, or array.
func index(item interface{}, indices ...interface{}) (interface{}, error) {
v := reflect.ValueOf(item)
for _, i := range indices {
index := reflect.ValueOf(i)
var isNil bool
if v, isNil = indirect(v); isNil {
return nil, fmt.Errorf("index of nil pointer")
}
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.String:
var x int64
switch index.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
x = index.Int()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
x = int64(index.Uint())
default:
return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
}
if x < 0 || x >= int64(v.Len()) {
return nil, fmt.Errorf("index out of range: %d", x)
}
v = v.Index(int(x))
case reflect.Map:
if !index.IsValid() {
index = reflect.Zero(v.Type().Key())
}
if !index.Type().AssignableTo(v.Type().Key()) {
return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
}
if x := v.MapIndex(index); x.IsValid() {
v = x
} else {
v = reflect.Zero(v.Type().Elem())
}
default:
return nil, fmt.Errorf("can't index item of type %s", v.Type())
}
}
return v.Interface(), nil
}
// Length
// length returns the length of the item, with an error if it has no defined length.
func length(item interface{}) (int, error) {
v, isNil := indirect(reflect.ValueOf(item))
if isNil {
return 0, fmt.Errorf("len of nil pointer")
}
switch v.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
return v.Len(), nil
}
return 0, fmt.Errorf("len of type %s", v.Type())
}
// Function invocation
// call returns the result of evaluating the first argument as a function.
// The function must return 1 result, or 2 results, the second of which is an error.
func call(fn interface{}, args ...interface{}) (interface{}, error) {
v := reflect.ValueOf(fn)
typ := v.Type()
if typ.Kind() != reflect.Func {
return nil, fmt.Errorf("non-function of type %s", typ)
}
if !goodFunc(typ) {
return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
}
numIn := typ.NumIn()
var dddType reflect.Type
if typ.IsVariadic() {
if len(args) < numIn-1 {
return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
}
dddType = typ.In(numIn - 1).Elem()
} else {
if len(args) != numIn {
return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
}
}
argv := make([]reflect.Value, len(args))
for i, arg := range args {
value := reflect.ValueOf(arg)
// Compute the expected type. Clumsy because of variadics.
var argType reflect.Type
if !typ.IsVariadic() || i < numIn-1 {
argType = typ.In(i)
} else {
argType = dddType
}
if !value.IsValid() && canBeNil(argType) {
value = reflect.Zero(argType)
}
if !value.Type().AssignableTo(argType) {
return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
}
argv[i] = value
}
result := v.Call(argv)
if len(result) == 2 && !result[1].IsNil() {
return result[0].Interface(), result[1].Interface().(error)
}
return result[0].Interface(), nil
}
// Boolean logic.
func truth(a interface{}) bool {
t, _ := isTrue(reflect.ValueOf(a))
return t
}
// and computes the Boolean AND of its arguments, returning
// the first false argument it encounters, or the last argument.
func and(arg0 interface{}, args ...interface{}) interface{} {
if !truth(arg0) {
return arg0
}
for i := range args {
arg0 = args[i]
if !truth(arg0) {
break
}
}
return arg0
}
// or computes the Boolean OR of its arguments, returning
// the first true argument it encounters, or the last argument.
func or(arg0 interface{}, args ...interface{}) interface{} {
if truth(arg0) {
return arg0
}
for i := range args {
arg0 = args[i]
if truth(arg0) {
break
}
}
return arg0
}
// not returns the Boolean negation of its argument.
func not(arg interface{}) (truth bool) {
truth, _ = isTrue(reflect.ValueOf(arg))
return !truth
}
// Comparison.
// TODO: Perhaps allow comparison between signed and unsigned integers.
var (
errBadComparisonType = errors.New("invalid type for comparison")
errBadComparison = errors.New("incompatible types for comparison")
errNoComparison = errors.New("missing argument for comparison")
)
type kind int
const (
invalidKind kind = iota
boolKind
complexKind
intKind
floatKind
integerKind
stringKind
uintKind
)
func basicKind(v reflect.Value) (kind, error) {
switch v.Kind() {
case reflect.Bool:
return boolKind, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return intKind, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return uintKind, nil
case reflect.Float32, reflect.Float64:
return floatKind, nil
case reflect.Complex64, reflect.Complex128:
return complexKind, nil
case reflect.String:
return stringKind, nil
}
return invalidKind, errBadComparisonType
}
// eq evaluates the comparison a == b || a == c || ...
func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
v1 := reflect.ValueOf(arg1)
k1, err := basicKind(v1)
if err != nil {
return false, err
}
if len(arg2) == 0 {
return false, errNoComparison
}
for _, arg := range arg2 {
v2 := reflect.ValueOf(arg)
k2, err := basicKind(v2)
if err != nil {
return false, err
}
truth := false
if k1 != k2 {
// Special case: Can compare integer values regardless of type's sign.
switch {
case k1 == intKind && k2 == uintKind:
truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
case k1 == uintKind && k2 == intKind:
truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
default:
return false, errBadComparison
}
} else {
switch k1 {
case boolKind:
truth = v1.Bool() == v2.Bool()
case complexKind:
truth = v1.Complex() == v2.Complex()
case floatKind:
truth = v1.Float() == v2.Float()
case intKind:
truth = v1.Int() == v2.Int()
case stringKind:
truth = v1.String() == v2.String()
case uintKind:
truth = v1.Uint() == v2.Uint()
default:
panic("invalid kind")
}
}
if truth {
return true, nil
}
}
return false, nil
}
// ne evaluates the comparison a != b.
func ne(arg1, arg2 interface{}) (bool, error) {
// != is the inverse of ==.
equal, err := eq(arg1, arg2)
return !equal, err
}
// lt evaluates the comparison a < b.
func lt(arg1, arg2 interface{}) (bool, error) {
v1 := reflect.ValueOf(arg1)
k1, err := basicKind(v1)
if err != nil {
return false, err
}
v2 := reflect.ValueOf(arg2)
k2, err := basicKind(v2)
if err != nil {
return false, err
}
truth := false
if k1 != k2 {
// Special case: Can compare integer values regardless of type's sign.
switch {
case k1 == intKind && k2 == uintKind:
truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
case k1 == uintKind && k2 == intKind:
truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
default:
return false, errBadComparison
}
} else {
switch k1 {
case boolKind, complexKind:
return false, errBadComparisonType
case floatKind:
truth = v1.Float() < v2.Float()
case intKind:
truth = v1.Int() < v2.Int()
case stringKind:
truth = v1.String() < v2.String()
case uintKind:
truth = v1.Uint() < v2.Uint()
default:
panic("invalid kind")
}
}
return truth, nil
}
// le evaluates the comparison <= b.
func le(arg1, arg2 interface{}) (bool, error) {
// <= is < or ==.
lessThan, err := lt(arg1, arg2)
if lessThan || err != nil {
return lessThan, err
}
return eq(arg1, arg2)
}
// gt evaluates the comparison a > b.
func gt(arg1, arg2 interface{}) (bool, error) {
// > is the inverse of <=.
lessOrEqual, err := le(arg1, arg2)
if err != nil {
return false, err
}
return !lessOrEqual, nil
}
// ge evaluates the comparison a >= b.
func ge(arg1, arg2 interface{}) (bool, error) {
// >= is the inverse of <.
lessThan, err := lt(arg1, arg2)
if err != nil {
return false, err
}
return !lessThan, nil
}
// HTML escaping.
var (
htmlQuot = []byte("&#34;") // shorter than "&quot;"
htmlApos = []byte("&#39;") // shorter than "&apos;" and apos was not in HTML until HTML5
htmlAmp = []byte("&amp;")
htmlLt = []byte("&lt;")
htmlGt = []byte("&gt;")
)
// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
func HTMLEscape(w io.Writer, b []byte) {
last := 0
for i, c := range b {
var html []byte
switch c {
case '"':
html = htmlQuot
case '\'':
html = htmlApos
case '&':
html = htmlAmp
case '<':
html = htmlLt
case '>':
html = htmlGt
default:
continue
}
w.Write(b[last:i])
w.Write(html)
last = i + 1
}
w.Write(b[last:])
}
// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
func HTMLEscapeString(s string) string {
// Avoid allocation if we can.
if strings.IndexAny(s, `'"&<>`) < 0 {
return s
}
var b bytes.Buffer
HTMLEscape(&b, []byte(s))
return b.String()
}
// HTMLEscaper returns the escaped HTML equivalent of the textual
// representation of its arguments.
func HTMLEscaper(args ...interface{}) string {
return HTMLEscapeString(evalArgs(args))
}
// JavaScript escaping.
var (
jsLowUni = []byte(`\u00`)
hex = []byte("0123456789ABCDEF")
jsBackslash = []byte(`\\`)
jsApos = []byte(`\'`)
jsQuot = []byte(`\"`)
jsLt = []byte(`\x3C`)
jsGt = []byte(`\x3E`)
)
// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
func JSEscape(w io.Writer, b []byte) {
last := 0
for i := 0; i < len(b); i++ {
c := b[i]
if !jsIsSpecial(rune(c)) {
// fast path: nothing to do
continue
}
w.Write(b[last:i])
if c < utf8.RuneSelf {
// Quotes, slashes and angle brackets get quoted.
// Control characters get written as \u00XX.
switch c {
case '\\':
w.Write(jsBackslash)
case '\'':
w.Write(jsApos)
case '"':
w.Write(jsQuot)
case '<':
w.Write(jsLt)
case '>':
w.Write(jsGt)
default:
w.Write(jsLowUni)
t, b := c>>4, c&0x0f
w.Write(hex[t : t+1])
w.Write(hex[b : b+1])
}
} else {
// Unicode rune.
r, size := utf8.DecodeRune(b[i:])
if unicode.IsPrint(r) {
w.Write(b[i : i+size])
} else {
fmt.Fprintf(w, "\\u%04X", r)
}
i += size - 1
}
last = i + 1
}
w.Write(b[last:])
}
// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
func JSEscapeString(s string) string {
// Avoid allocation if we can.
if strings.IndexFunc(s, jsIsSpecial) < 0 {
return s
}
var b bytes.Buffer
JSEscape(&b, []byte(s))
return b.String()
}
func jsIsSpecial(r rune) bool {
switch r {
case '\\', '\'', '"', '<', '>':
return true
}
return r < ' ' || utf8.RuneSelf <= r
}
// JSEscaper returns the escaped JavaScript equivalent of the textual
// representation of its arguments.
func JSEscaper(args ...interface{}) string {
return JSEscapeString(evalArgs(args))
}
// URLQueryEscaper returns the escaped value of the textual representation of
// its arguments in a form suitable for embedding in a URL query.
func URLQueryEscaper(args ...interface{}) string {
return url.QueryEscape(evalArgs(args))
}
// evalArgs formats the list of arguments into a string. It is therefore equivalent to
// fmt.Sprint(args...)
// except that each argument is indirected (if a pointer), as required,
// using the same rules as the default string evaluation during template
// execution.
func evalArgs(args []interface{}) string {
ok := false
var s string
// Fast path for simple common case.
if len(args) == 1 {
s, ok = args[0].(string)
}
if !ok {
for i, arg := range args {
a, ok := printableValue(reflect.ValueOf(arg))
if ok {
args[i] = a
} // else left fmt do its thing
}
s = fmt.Sprint(args...)
}
return s
}

View File

@@ -45,20 +45,20 @@ client.Client from an authcfg.Info.
Example:
import (
"pkg/client"
"pkg/client/auth"
)
import (
"pkg/client"
"pkg/client/auth"
)
info, err := auth.LoadFromFile(filename)
if err != nil {
// handle error
}
clientConfig = client.Config{}
clientConfig.Host = "example.com:4901"
clientConfig = info.MergeWithConfig()
client := client.New(clientConfig)
client.Pods(ns).List()
info, err := auth.LoadFromFile(filename)
if err != nil {
// handle error
}
clientConfig = client.Config{}
clientConfig.Host = "example.com:4901"
clientConfig = info.MergeWithConfig()
client := client.New(clientConfig)
client.Pods(ns).List()
*/
package auth

View File

@@ -199,17 +199,17 @@ func (c *controller) processLoop() {
// can't return an error. The handlers MUST NOT modify the objects
// received; this concerns not only the top level of structure but all
// the data structures reachable from it.
// * OnAdd is called when an object is added.
// * OnUpdate is called when an object is modified. Note that oldObj is the
// last known state of the object-- it is possible that several changes
// were combined together, so you can't use this to see every single
// change. OnUpdate is also called when a re-list happens, and it will
// get called even if nothing changed. This is useful for periodically
// evaluating or syncing something.
// * OnDelete will get the final state of the item if it is known, otherwise
// it will get an object of type DeletedFinalStateUnknown. This can
// happen if the watch is closed and misses the delete event and we don't
// notice the deletion until the subsequent re-list.
// - OnAdd is called when an object is added.
// - OnUpdate is called when an object is modified. Note that oldObj is the
// last known state of the object-- it is possible that several changes
// were combined together, so you can't use this to see every single
// change. OnUpdate is also called when a re-list happens, and it will
// get called even if nothing changed. This is useful for periodically
// evaluating or syncing something.
// - OnDelete will get the final state of the item if it is known, otherwise
// it will get an object of type DeletedFinalStateUnknown. This can
// happen if the watch is closed and misses the delete event and we don't
// notice the deletion until the subsequent re-list.
type ResourceEventHandler interface {
OnAdd(obj interface{})
OnUpdate(oldObj, newObj interface{})
@@ -305,15 +305,14 @@ func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
// notifications to be faulty.
//
// Parameters:
// * lw is list and watch functions for the source of the resource you want to
// be informed of.
// * objType is an object of the type that you expect to receive.
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
// calls, even if nothing changed). Otherwise, re-list will be delayed as
// long as possible (until the upstream source closes the watch or times out,
// or you stop the controller).
// * h is the object you want notifications sent to.
//
// - lw is list and watch functions for the source of the resource you want to
// be informed of.
// - objType is an object of the type that you expect to receive.
// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
// calls, even if nothing changed). Otherwise, re-list will be delayed as
// long as possible (until the upstream source closes the watch or times out,
// or you stop the controller).
// - h is the object you want notifications sent to.
func NewInformer(
lw ListerWatcher,
objType runtime.Object,
@@ -332,16 +331,15 @@ func NewInformer(
// notifications to be faulty.
//
// Parameters:
// * lw is list and watch functions for the source of the resource you want to
// be informed of.
// * objType is an object of the type that you expect to receive.
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
// calls, even if nothing changed). Otherwise, re-list will be delayed as
// long as possible (until the upstream source closes the watch or times out,
// or you stop the controller).
// * h is the object you want notifications sent to.
// * indexers is the indexer for the received object type.
//
// - lw is list and watch functions for the source of the resource you want to
// be informed of.
// - objType is an object of the type that you expect to receive.
// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
// calls, even if nothing changed). Otherwise, re-list will be delayed as
// long as possible (until the upstream source closes the watch or times out,
// or you stop the controller).
// - h is the object you want notifications sent to.
// - indexers is the indexer for the received object type.
func NewIndexerInformer(
lw ListerWatcher,
objType runtime.Object,
@@ -454,16 +452,15 @@ func processDeltas(
// providing event notifications.
//
// Parameters
// * lw is list and watch functions for the source of the resource you want to
// be informed of.
// * objType is an object of the type that you expect to receive.
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
// calls, even if nothing changed). Otherwise, re-list will be delayed as
// long as possible (until the upstream source closes the watch or times out,
// or you stop the controller).
// * h is the object you want notifications sent to.
// * clientState is the store you want to populate
//
// - lw is list and watch functions for the source of the resource you want to
// be informed of.
// - objType is an object of the type that you expect to receive.
// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
// calls, even if nothing changed). Otherwise, re-list will be delayed as
// long as possible (until the upstream source closes the watch or times out,
// or you stop the controller).
// - h is the object you want notifications sent to.
// - clientState is the store you want to populate
func newInformer(
lw ListerWatcher,
objType runtime.Object,

View File

@@ -74,11 +74,11 @@ type DeltaFIFOOptions struct {
// the Pop() method.
//
// DeltaFIFO solves this use case:
// * You want to process every object change (delta) at most once.
// * When you process an object, you want to see everything
// that's happened to it since you last processed it.
// * You want to process the deletion of some of the objects.
// * You might want to periodically reprocess objects.
// - You want to process every object change (delta) at most once.
// - When you process an object, you want to see everything
// that's happened to it since you last processed it.
// - You want to process the deletion of some of the objects.
// - You might want to periodically reprocess objects.
//
// DeltaFIFO's Pop(), Get(), and GetByKey() methods return
// interface{} to satisfy the Store/Queue interfaces, but they
@@ -179,21 +179,21 @@ type Deltas []Delta
// "known" keys when Pop() is called. Have to think about how that
// affects error retrying.
//
// NOTE: It is possible to misuse this and cause a race when using an
// external known object source.
// Whether there is a potential race depends on how the consumer
// modifies knownObjects. In Pop(), process function is called under
// lock, so it is safe to update data structures in it that need to be
// in sync with the queue (e.g. knownObjects).
// NOTE: It is possible to misuse this and cause a race when using an
// external known object source.
// Whether there is a potential race depends on how the consumer
// modifies knownObjects. In Pop(), process function is called under
// lock, so it is safe to update data structures in it that need to be
// in sync with the queue (e.g. knownObjects).
//
// Example:
// In case of sharedIndexInformer being a consumer
// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
// there is no race as knownObjects (s.indexer) is modified safely
// under DeltaFIFO's lock. The only exceptions are GetStore() and
// GetIndexer() methods, which expose ways to modify the underlying
// storage. Currently these two methods are used for creating Lister
// and internal tests.
// Example:
// In case of sharedIndexInformer being a consumer
// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
// there is no race as knownObjects (s.indexer) is modified safely
// under DeltaFIFO's lock. The only exceptions are GetStore() and
// GetIndexer() methods, which expose ways to modify the underlying
// storage. Currently these two methods are used for creating Lister
// and internal tests.
//
// Also see the comment on DeltaFIFO.
//

View File

@@ -25,13 +25,14 @@ import (
)
// ExpirationCache implements the store interface
// 1. All entries are automatically time stamped on insert
// a. The key is computed based off the original item/keyFunc
// b. The value inserted under that key is the timestamped item
// 2. Expiration happens lazily on read based on the expiration policy
// a. No item can be inserted into the store while we're expiring
// *any* item in the cache.
// 3. Time-stamps are stripped off unexpired entries before return
// 1. All entries are automatically time stamped on insert
// a. The key is computed based off the original item/keyFunc
// b. The value inserted under that key is the timestamped item
// 2. Expiration happens lazily on read based on the expiration policy
// a. No item can be inserted into the store while we're expiring
// *any* item in the cache.
// 3. Time-stamps are stripped off unexpired entries before return
//
// Note that the ExpirationCache is inherently slower than a normal
// threadSafeStore because it takes a write lock every time it checks if
// an item has expired.

View File

@@ -103,10 +103,11 @@ func Pop(queue Queue) interface{} {
// recent version will be processed. This can't be done with a channel
//
// FIFO solves this use case:
// * You want to process every object (exactly) once.
// * You want to process the most recent version of the object when you process it.
// * You do not want to process deleted objects, they should be removed from the queue.
// * You do not want to periodically reprocess objects.
// - You want to process every object (exactly) once.
// - You want to process the most recent version of the object when you process it.
// - You do not want to process deleted objects, they should be removed from the queue.
// - You do not want to periodically reprocess objects.
//
// Compare with DeltaFIFO for other use cases.
type FIFO struct {
lock sync.RWMutex

View File

@@ -28,10 +28,10 @@ import (
// Delete).
//
// There are three kinds of strings here:
// 1. a storage key, as defined in the Store interface,
// 2. a name of an index, and
// 3. an "indexed value", which is produced by an IndexFunc and
// can be a field value or any other string computed from the object.
// 1. a storage key, as defined in the Store interface,
// 2. a name of an index, and
// 3. an "indexed value", which is produced by an IndexFunc and
// can be a field value or any other string computed from the object.
type Indexer interface {
Store
// Index returns the stored objects whose set of indexed values
@@ -47,7 +47,7 @@ type Indexer interface {
// ByIndex returns the stored objects whose set of indexed values
// for the named index includes the given indexed value
ByIndex(indexName, indexedValue string) ([]interface{}, error)
// GetIndexer return the indexers
// GetIndexers return the indexers
GetIndexers() Indexers
// AddIndexers adds more indexers to this store. If you call this after you already have data

View File

@@ -71,6 +71,8 @@ type Reflector struct {
backoffManager wait.BackoffManager
// initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch.
initConnBackoffManager wait.BackoffManager
// MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch.
MaxInternalErrorRetryDuration time.Duration
resyncPeriod time.Duration
// ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked
@@ -253,112 +255,9 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
// It returns error if ListAndWatch didn't even try to initialize watch.
func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name)
var resourceVersion string
options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}
if err := func() error {
initTrace := trace.New("Reflector ListAndWatch", trace.Field{Key: "name", Value: r.name})
defer initTrace.LogIfLong(10 * time.Second)
var list runtime.Object
var paginatedResult bool
var err error
listCh := make(chan struct{}, 1)
panicCh := make(chan interface{}, 1)
go func() {
defer func() {
if r := recover(); r != nil {
panicCh <- r
}
}()
// Attempt to gather list in chunks, if supported by listerWatcher, if not, the first
// list request will return the full response.
pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) {
return r.listerWatcher.List(opts)
}))
switch {
case r.WatchListPageSize != 0:
pager.PageSize = r.WatchListPageSize
case r.paginatedResult:
// We got a paginated result initially. Assume this resource and server honor
// paging requests (i.e. watch cache is probably disabled) and leave the default
// pager size set.
case options.ResourceVersion != "" && options.ResourceVersion != "0":
// User didn't explicitly request pagination.
//
// With ResourceVersion != "", we have a possibility to list from watch cache,
// but we do that (for ResourceVersion != "0") only if Limit is unset.
// To avoid thundering herd on etcd (e.g. on master upgrades), we explicitly
// switch off pagination to force listing from watch cache (if enabled).
// With the existing semantic of RV (result is at least as fresh as provided RV),
// this is correct and doesn't lead to going back in time.
//
// We also don't turn off pagination for ResourceVersion="0", since watch cache
// is ignoring Limit in that case anyway, and if watch cache is not enabled
// we don't introduce regression.
pager.PageSize = 0
}
list, paginatedResult, err = pager.List(context.Background(), options)
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
r.setIsLastSyncResourceVersionUnavailable(true)
// Retry immediately if the resource version used to list is unavailable.
// The pager already falls back to full list if paginated list calls fail due to an "Expired" error on
// continuation pages, but the pager might not be enabled, the full list might fail because the
// resource version it is listing at is expired or the cache may not yet be synced to the provided
// resource version. So we need to fallback to resourceVersion="" in all to recover and ensure
// the reflector makes forward progress.
list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
}
close(listCh)
}()
select {
case <-stopCh:
return nil
case r := <-panicCh:
panic(r)
case <-listCh:
}
initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err})
if err != nil {
klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err)
return fmt.Errorf("failed to list %v: %v", r.expectedTypeName, err)
}
// We check if the list was paginated and if so set the paginatedResult based on that.
// However, we want to do that only for the initial list (which is the only case
// when we set ResourceVersion="0"). The reasoning behind it is that later, in some
// situations we may force listing directly from etcd (by setting ResourceVersion="")
// which will return paginated result, even if watch cache is enabled. However, in
// that case, we still want to prefer sending requests to watch cache if possible.
//
// Paginated result returned for request with ResourceVersion="0" mean that watch
// cache is disabled and there are a lot of objects of a given type. In such case,
// there is no need to prefer listing from watch cache.
if options.ResourceVersion == "0" && paginatedResult {
r.paginatedResult = true
}
r.setIsLastSyncResourceVersionUnavailable(false) // list was successful
listMetaInterface, err := meta.ListAccessor(list)
if err != nil {
return fmt.Errorf("unable to understand list result %#v: %v", list, err)
}
resourceVersion = listMetaInterface.GetResourceVersion()
initTrace.Step("Resource version extracted")
items, err := meta.ExtractList(list)
if err != nil {
return fmt.Errorf("unable to understand list result %#v (%v)", list, err)
}
initTrace.Step("Objects extracted")
if err := r.syncWith(items, resourceVersion); err != nil {
return fmt.Errorf("unable to sync list result: %v", err)
}
initTrace.Step("SyncWith done")
r.setLastSyncResourceVersion(resourceVersion)
initTrace.Step("Resource version updated")
return nil
}(); err != nil {
err := r.list(stopCh)
if err != nil {
return err
}
@@ -390,6 +289,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
}
}()
retry := NewRetryWithDeadline(r.MaxInternalErrorRetryDuration, time.Minute, apierrors.IsInternalError, r.clock)
for {
// give the stopCh a chance to stop the loop, even in case of continue statements further down on errors
select {
@@ -399,8 +299,8 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
}
timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
options = metav1.ListOptions{
ResourceVersion: resourceVersion,
options := metav1.ListOptions{
ResourceVersion: r.LastSyncResourceVersion(),
// We want to avoid situations of hanging watchers. Stop any watchers that do not
// receive any events within the timeout window.
TimeoutSeconds: &timeoutSeconds,
@@ -426,7 +326,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
return err
}
if err := r.watchHandler(start, w, &resourceVersion, resyncerrc, stopCh); err != nil {
err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.expectedTypeName, r.setLastSyncResourceVersion, r.clock, resyncerrc, stopCh)
retry.After(err)
if err != nil {
if err != errorStopRequested {
switch {
case isExpiredError(err):
@@ -438,6 +340,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.expectedTypeName)
<-r.initConnBackoffManager.Backoff().C()
continue
case apierrors.IsInternalError(err) && retry.ShouldRetry():
klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.expectedTypeName, err)
continue
default:
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err)
}
@@ -447,6 +352,114 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
}
}
// list simply lists all items and records a resource version obtained from the server at the moment of the call.
// the resource version can be used for further progress notification (aka. watch).
func (r *Reflector) list(stopCh <-chan struct{}) error {
var resourceVersion string
options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}
initTrace := trace.New("Reflector ListAndWatch", trace.Field{Key: "name", Value: r.name})
defer initTrace.LogIfLong(10 * time.Second)
var list runtime.Object
var paginatedResult bool
var err error
listCh := make(chan struct{}, 1)
panicCh := make(chan interface{}, 1)
go func() {
defer func() {
if r := recover(); r != nil {
panicCh <- r
}
}()
// Attempt to gather list in chunks, if supported by listerWatcher, if not, the first
// list request will return the full response.
pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) {
return r.listerWatcher.List(opts)
}))
switch {
case r.WatchListPageSize != 0:
pager.PageSize = r.WatchListPageSize
case r.paginatedResult:
// We got a paginated result initially. Assume this resource and server honor
// paging requests (i.e. watch cache is probably disabled) and leave the default
// pager size set.
case options.ResourceVersion != "" && options.ResourceVersion != "0":
// User didn't explicitly request pagination.
//
// With ResourceVersion != "", we have a possibility to list from watch cache,
// but we do that (for ResourceVersion != "0") only if Limit is unset.
// To avoid thundering herd on etcd (e.g. on master upgrades), we explicitly
// switch off pagination to force listing from watch cache (if enabled).
// With the existing semantic of RV (result is at least as fresh as provided RV),
// this is correct and doesn't lead to going back in time.
//
// We also don't turn off pagination for ResourceVersion="0", since watch cache
// is ignoring Limit in that case anyway, and if watch cache is not enabled
// we don't introduce regression.
pager.PageSize = 0
}
list, paginatedResult, err = pager.List(context.Background(), options)
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
r.setIsLastSyncResourceVersionUnavailable(true)
// Retry immediately if the resource version used to list is unavailable.
// The pager already falls back to full list if paginated list calls fail due to an "Expired" error on
// continuation pages, but the pager might not be enabled, the full list might fail because the
// resource version it is listing at is expired or the cache may not yet be synced to the provided
// resource version. So we need to fallback to resourceVersion="" in all to recover and ensure
// the reflector makes forward progress.
list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
}
close(listCh)
}()
select {
case <-stopCh:
return nil
case r := <-panicCh:
panic(r)
case <-listCh:
}
initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err})
if err != nil {
klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err)
return fmt.Errorf("failed to list %v: %w", r.expectedTypeName, err)
}
// We check if the list was paginated and if so set the paginatedResult based on that.
// However, we want to do that only for the initial list (which is the only case
// when we set ResourceVersion="0"). The reasoning behind it is that later, in some
// situations we may force listing directly from etcd (by setting ResourceVersion="")
// which will return paginated result, even if watch cache is enabled. However, in
// that case, we still want to prefer sending requests to watch cache if possible.
//
// Paginated result returned for request with ResourceVersion="0" mean that watch
// cache is disabled and there are a lot of objects of a given type. In such case,
// there is no need to prefer listing from watch cache.
if options.ResourceVersion == "0" && paginatedResult {
r.paginatedResult = true
}
r.setIsLastSyncResourceVersionUnavailable(false) // list was successful
listMetaInterface, err := meta.ListAccessor(list)
if err != nil {
return fmt.Errorf("unable to understand list result %#v: %v", list, err)
}
resourceVersion = listMetaInterface.GetResourceVersion()
initTrace.Step("Resource version extracted")
items, err := meta.ExtractList(list)
if err != nil {
return fmt.Errorf("unable to understand list result %#v (%v)", list, err)
}
initTrace.Step("Objects extracted")
if err := r.syncWith(items, resourceVersion); err != nil {
return fmt.Errorf("unable to sync list result: %v", err)
}
initTrace.Step("SyncWith done")
r.setLastSyncResourceVersion(resourceVersion)
initTrace.Step("Resource version updated")
return nil
}
// syncWith replaces the store's items with the given list.
func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error {
found := make([]interface{}, 0, len(items))
@@ -456,8 +469,19 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err
return r.store.Replace(found, resourceVersion)
}
// watchHandler watches w and keeps *resourceVersion up to date.
func (r *Reflector) watchHandler(start time.Time, w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error {
// watchHandler watches w and sets setLastSyncResourceVersion
func watchHandler(start time.Time,
w watch.Interface,
store Store,
expectedType reflect.Type,
expectedGVK *schema.GroupVersionKind,
name string,
expectedTypeName string,
setLastSyncResourceVersion func(string),
clock clock.Clock,
errc chan error,
stopCh <-chan struct{},
) error {
eventCount := 0
// Stopping the watcher should be idempotent and if we return from this function there's no way
@@ -478,62 +502,61 @@ loop:
if event.Type == watch.Error {
return apierrors.FromObject(event.Object)
}
if r.expectedType != nil {
if e, a := r.expectedType, reflect.TypeOf(event.Object); e != a {
utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a))
if expectedType != nil {
if e, a := expectedType, reflect.TypeOf(event.Object); e != a {
utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", name, e, a))
continue
}
}
if r.expectedGVK != nil {
if e, a := *r.expectedGVK, event.Object.GetObjectKind().GroupVersionKind(); e != a {
utilruntime.HandleError(fmt.Errorf("%s: expected gvk %v, but watch event object had gvk %v", r.name, e, a))
if expectedGVK != nil {
if e, a := *expectedGVK, event.Object.GetObjectKind().GroupVersionKind(); e != a {
utilruntime.HandleError(fmt.Errorf("%s: expected gvk %v, but watch event object had gvk %v", name, e, a))
continue
}
}
meta, err := meta.Accessor(event.Object)
if err != nil {
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event))
continue
}
newResourceVersion := meta.GetResourceVersion()
resourceVersion := meta.GetResourceVersion()
switch event.Type {
case watch.Added:
err := r.store.Add(event.Object)
err := store.Add(event.Object)
if err != nil {
utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", r.name, event.Object, err))
utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", name, event.Object, err))
}
case watch.Modified:
err := r.store.Update(event.Object)
err := store.Update(event.Object)
if err != nil {
utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", r.name, event.Object, err))
utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", name, event.Object, err))
}
case watch.Deleted:
// TODO: Will any consumers need access to the "last known
// state", which is passed in event.Object? If so, may need
// to change this.
err := r.store.Delete(event.Object)
err := store.Delete(event.Object)
if err != nil {
utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err))
utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", name, event.Object, err))
}
case watch.Bookmark:
// A `Bookmark` means watch has synced here, just update the resourceVersion
default:
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event))
}
*resourceVersion = newResourceVersion
r.setLastSyncResourceVersion(newResourceVersion)
if rvu, ok := r.store.(ResourceVersionUpdater); ok {
rvu.UpdateResourceVersion(newResourceVersion)
setLastSyncResourceVersion(resourceVersion)
if rvu, ok := store.(ResourceVersionUpdater); ok {
rvu.UpdateResourceVersion(resourceVersion)
}
eventCount++
}
}
watchDuration := r.clock.Since(start)
watchDuration := clock.Since(start)
if watchDuration < 1*time.Second && eventCount == 0 {
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", name)
}
klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedTypeName, eventCount)
klog.V(4).Infof("%s: Watch close - %v total %v items received", name, expectedTypeName, eventCount)
return nil
}

View File

@@ -0,0 +1,78 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"k8s.io/utils/clock"
"time"
)
type RetryWithDeadline interface {
After(error)
ShouldRetry() bool
}
type retryWithDeadlineImpl struct {
firstErrorTime time.Time
lastErrorTime time.Time
maxRetryDuration time.Duration
minResetPeriod time.Duration
isRetryable func(error) bool
clock clock.Clock
}
func NewRetryWithDeadline(maxRetryDuration, minResetPeriod time.Duration, isRetryable func(error) bool, clock clock.Clock) RetryWithDeadline {
return &retryWithDeadlineImpl{
firstErrorTime: time.Time{},
lastErrorTime: time.Time{},
maxRetryDuration: maxRetryDuration,
minResetPeriod: minResetPeriod,
isRetryable: isRetryable,
clock: clock,
}
}
func (r *retryWithDeadlineImpl) reset() {
r.firstErrorTime = time.Time{}
r.lastErrorTime = time.Time{}
}
func (r *retryWithDeadlineImpl) After(err error) {
if r.isRetryable(err) {
if r.clock.Now().Sub(r.lastErrorTime) >= r.minResetPeriod {
r.reset()
}
if r.firstErrorTime.IsZero() {
r.firstErrorTime = r.clock.Now()
}
r.lastErrorTime = r.clock.Now()
}
}
func (r *retryWithDeadlineImpl) ShouldRetry() bool {
if r.maxRetryDuration <= time.Duration(0) {
return false
}
if r.clock.Now().Sub(r.firstErrorTime) <= r.maxRetryDuration {
return true
}
r.reset()
return false
}

View File

@@ -199,8 +199,11 @@ func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error)
return c.cacheStorage.Index(indexName, obj)
}
func (c *cache) IndexKeys(indexName, indexKey string) ([]string, error) {
return c.cacheStorage.IndexKeys(indexName, indexKey)
// IndexKeys returns the storage keys of the stored objects whose set of
// indexed values for the named index includes the given indexed value.
// The returned keys are suitable to pass to GetByKey().
func (c *cache) IndexKeys(indexName, indexedValue string) ([]string, error) {
return c.cacheStorage.IndexKeys(indexName, indexedValue)
}
// ListIndexFuncValues returns the list of generated values of an Index func
@@ -208,8 +211,10 @@ func (c *cache) ListIndexFuncValues(indexName string) []string {
return c.cacheStorage.ListIndexFuncValues(indexName)
}
func (c *cache) ByIndex(indexName, indexKey string) ([]interface{}, error) {
return c.cacheStorage.ByIndex(indexName, indexKey)
// ByIndex returns the stored objects whose set of indexed values
// for the named index includes the given indexed value.
func (c *cache) ByIndex(indexName, indexedValue string) ([]interface{}, error) {
return c.cacheStorage.ByIndex(indexName, indexedValue)
}
func (c *cache) AddIndexers(newIndexers Indexers) error {

View File

@@ -47,9 +47,9 @@ type ThreadSafeStore interface {
ListKeys() []string
Replace(map[string]interface{}, string)
Index(indexName string, obj interface{}) ([]interface{}, error)
IndexKeys(indexName, indexKey string) ([]string, error)
IndexKeys(indexName, indexedValue string) ([]string, error)
ListIndexFuncValues(name string) []string
ByIndex(indexName, indexKey string) ([]interface{}, error)
ByIndex(indexName, indexedValue string) ([]interface{}, error)
GetIndexers() Indexers
// AddIndexers adds more indexers to this store. If you call this after you already have data

View File

@@ -51,10 +51,10 @@ func (a *PromptingAuthLoader) LoadAuth(path string) (*clientauth.Info, error) {
// Prompt for user/pass and write a file if none exists.
if _, err := os.Stat(path); os.IsNotExist(err) {
authPtr, err := a.Prompt()
auth := *authPtr
if err != nil {
return nil, err
}
auth := *authPtr
data, err := json.Marshal(auth)
if err != nil {
return &auth, err

View File

@@ -160,8 +160,10 @@ func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules {
// Load starts by running the MigrationRules and then
// takes the loading rules and returns a Config object based on following rules.
// if the ExplicitPath, return the unmerged explicit file
// Otherwise, return a merged config based on the Precedence slice
//
// if the ExplicitPath, return the unmerged explicit file
// Otherwise, return a merged config based on the Precedence slice
//
// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored.
// Read errors or files with non-deserializable content produce errors.
// The first file to set a particular map key wins and map key's value is never changed.

View File

@@ -204,8 +204,19 @@ func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error {
if exists {
validationErrors = append(validationErrors, validateContext(contextName, *context, config)...)
validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...)
validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...)
// Default to empty users and clusters and let the validation function report an error.
authInfo := config.AuthInfos[context.AuthInfo]
if authInfo == nil {
authInfo = &clientcmdapi.AuthInfo{}
}
validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *authInfo)...)
cluster := config.Clusters[context.Cluster]
if cluster == nil {
cluster = &clientcmdapi.Cluster{}
}
validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *cluster)...)
}
return newErrConfigurationInvalid(validationErrors)

View File

@@ -307,7 +307,15 @@ func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) func
// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function.
// The return value is used to stop recording
func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime.Object)) func() {
watcher := e.Watch()
watcher, err := e.Watch()
if err != nil {
klog.Errorf("Unable start event watcher: '%v' (will not retry!)", err)
// TODO: Rewrite the function signature to return an error, for
// now just return a no-op function
return func() {
klog.Error("The event watcher failed to start")
}
}
go func() {
defer utilruntime.HandleCrash()
for {

View File

@@ -161,7 +161,7 @@ type LeaderElectionConfig struct {
// lifecycle events of the LeaderElector. These are invoked asynchronously.
//
// possible future callbacks:
// * OnChallenge()
// - OnChallenge()
type LeaderCallbacks struct {
// OnStartedLeading is called when a LeaderElector client starts leading
OnStartedLeading func(context.Context)

View File

@@ -92,7 +92,7 @@ type EventRecorder interface {
// Event constructs an event from the given information and puts it in the queue for sending.
// 'object' is the object this event is about. Event will make a reference-- or you may also
// pass a reference to the object directly.
// 'type' of this event, and can be one of Normal, Warning. New types could be added in future
// 'eventtype' of this event, and can be one of Normal, Warning. New types could be added in future
// 'reason' is the reason this event is generated. 'reason' should be short and unique; it
// should be in UpperCamelCase format (starting with a capital letter). "reason" will be used
// to automate handling of events, so imagine people writing switch statements to handle them.
@@ -298,7 +298,10 @@ func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watc
// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function.
// The return value can be ignored or used to stop recording, if desired.
func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface {
watcher := e.Watch()
watcher, err := e.Watch()
if err != nil {
klog.Errorf("Unable start event watcher: '%v' (will not retry!)", err)
}
go func() {
defer utilruntime.HandleCrash()
for watchEvent := range watcher.ResultChan() {
@@ -346,7 +349,12 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations m
// when we go to shut down this broadcaster. Just drop events if we get overloaded,
// and log an error if that happens (we've configured the broadcaster to drop
// outgoing events anyway).
if sent := recorder.ActionOrDrop(watch.Added, event); !sent {
sent, err := recorder.ActionOrDrop(watch.Added, event)
if err != nil {
klog.Errorf("unable to record event: %v (will not retry!)", err)
return
}
if !sent {
klog.Errorf("unable to record event: too many queued events, dropped event %#v", event)
}
}

View File

@@ -235,10 +235,10 @@ type aggregateRecord struct {
// EventAggregate checks if a similar event has been seen according to the
// aggregation configuration (max events, max interval, etc) and returns:
//
// - The (potentially modified) event that should be created
// - The cache key for the event, for correlation purposes. This will be set to
// the full key for normal events, and to the result of
// EventAggregatorMessageFunc for aggregate events.
// - The (potentially modified) event that should be created
// - The cache key for the event, for correlation purposes. This will be set to
// the full key for normal events, and to the result of
// EventAggregatorMessageFunc for aggregate events.
func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, string) {
now := metav1.NewTime(e.clock.Now())
var record aggregateRecord
@@ -427,14 +427,14 @@ type EventCorrelateResult struct {
// prior to interacting with the API server to record the event.
//
// The default behavior is as follows:
// * Aggregation is performed if a similar event is recorded 10 times
// - Aggregation is performed if a similar event is recorded 10 times
// in a 10 minute rolling interval. A similar event is an event that varies only by
// the Event.Message field. Rather than recording the precise event, aggregation
// will create a new event whose message reports that it has combined events with
// the same reason.
// * Events are incrementally counted if the exact same event is encountered multiple
// - Events are incrementally counted if the exact same event is encountered multiple
// times.
// * A source may burst 25 events about an object, but has a refill rate budget
// - A source may burst 25 events about an object, but has a refill rate budget
// per object of 1 event every 5 minutes to control long-tail of spam.
func NewEventCorrelator(clock clock.PassiveClock) *EventCorrelator {
cacheSize := maxLruCacheEntries

View File

@@ -36,6 +36,11 @@ type tlsTransportCache struct {
transports map[tlsCacheKey]*http.Transport
}
// DialerStopCh is stop channel that is passed down to dynamic cert dialer.
// It's exposed as variable for testing purposes to avoid testing for goroutine
// leakages.
var DialerStopCh = wait.NeverStop
const idleConnsPerHost = 25
var tlsCache = &tlsTransportCache{transports: make(map[tlsCacheKey]*http.Transport)}
@@ -101,7 +106,7 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
dynamicCertDialer := certRotatingDialer(tlsConfig.GetClientCertificate, dial)
tlsConfig.GetClientCertificate = dynamicCertDialer.GetClientCertificate
dial = dynamicCertDialer.connDialer.DialContext
go dynamicCertDialer.Run(wait.NeverStop)
go dynamicCertDialer.Run(DialerStopCh)
}
proxy := http.ProxyFromEnvironment

View File

@@ -491,7 +491,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e
DNSDone: func(info httptrace.DNSDoneInfo) {
reqInfo.muTrace.Lock()
defer reqInfo.muTrace.Unlock()
reqInfo.DNSLookup = time.Now().Sub(dnsStart)
reqInfo.DNSLookup = time.Since(dnsStart)
klog.Infof("HTTP Trace: DNS Lookup for %s resolved to %v", host, info.Addrs)
},
// Dial
@@ -503,7 +503,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e
ConnectDone: func(network, addr string, err error) {
reqInfo.muTrace.Lock()
defer reqInfo.muTrace.Unlock()
reqInfo.Dialing = time.Now().Sub(dialStart)
reqInfo.Dialing = time.Since(dialStart)
if err != nil {
klog.Infof("HTTP Trace: Dial to %s:%s failed: %v", network, addr, err)
} else {
@@ -517,7 +517,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e
TLSHandshakeDone: func(_ tls.ConnectionState, _ error) {
reqInfo.muTrace.Lock()
defer reqInfo.muTrace.Unlock()
reqInfo.TLSHandshake = time.Now().Sub(tlsStart)
reqInfo.TLSHandshake = time.Since(tlsStart)
},
// Connection (it can be DNS + Dial or just the time to get one from the connection pool)
GetConn: func(hostPort string) {
@@ -526,7 +526,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e
GotConn: func(info httptrace.GotConnInfo) {
reqInfo.muTrace.Lock()
defer reqInfo.muTrace.Unlock()
reqInfo.GetConnection = time.Now().Sub(getConn)
reqInfo.GetConnection = time.Since(getConn)
reqInfo.ConnectionReused = info.Reused
},
// Server Processing (time since we wrote the request until first byte is received)
@@ -538,7 +538,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e
GotFirstResponseByte: func() {
reqInfo.muTrace.Lock()
defer reqInfo.muTrace.Unlock()
reqInfo.ServerProcessing = time.Now().Sub(serverStart)
reqInfo.ServerProcessing = time.Since(serverStart)
},
}
req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace))

View File

@@ -1,579 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonpath
import (
"bytes"
"encoding/json"
"fmt"
"io"
"reflect"
"strings"
"k8s.io/client-go/third_party/forked/golang/template"
)
type JSONPath struct {
name string
parser *Parser
beginRange int
inRange int
endRange int
lastEndNode *Node
allowMissingKeys bool
outputJSON bool
}
// New creates a new JSONPath with the given name.
func New(name string) *JSONPath {
return &JSONPath{
name: name,
beginRange: 0,
inRange: 0,
endRange: 0,
}
}
// AllowMissingKeys allows a caller to specify whether they want an error if a field or map key
// cannot be located, or simply an empty result. The receiver is returned for chaining.
func (j *JSONPath) AllowMissingKeys(allow bool) *JSONPath {
j.allowMissingKeys = allow
return j
}
// Parse parses the given template and returns an error.
func (j *JSONPath) Parse(text string) error {
var err error
j.parser, err = Parse(j.name, text)
return err
}
// Execute bounds data into template and writes the result.
func (j *JSONPath) Execute(wr io.Writer, data interface{}) error {
fullResults, err := j.FindResults(data)
if err != nil {
return err
}
for ix := range fullResults {
if err := j.PrintResults(wr, fullResults[ix]); err != nil {
return err
}
}
return nil
}
func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) {
if j.parser == nil {
return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name)
}
cur := []reflect.Value{reflect.ValueOf(data)}
nodes := j.parser.Root.Nodes
fullResult := [][]reflect.Value{}
for i := 0; i < len(nodes); i++ {
node := nodes[i]
results, err := j.walk(cur, node)
if err != nil {
return nil, err
}
// encounter an end node, break the current block
if j.endRange > 0 && j.endRange <= j.inRange {
j.endRange--
j.lastEndNode = &nodes[i]
break
}
// encounter a range node, start a range loop
if j.beginRange > 0 {
j.beginRange--
j.inRange++
if len(results) > 0 {
for _, value := range results {
j.parser.Root.Nodes = nodes[i+1:]
nextResults, err := j.FindResults(value.Interface())
if err != nil {
return nil, err
}
fullResult = append(fullResult, nextResults...)
}
} else {
// If the range has no results, we still need to process the nodes within the range
// so the position will advance to the end node
j.parser.Root.Nodes = nodes[i+1:]
_, err := j.FindResults(nil)
if err != nil {
return nil, err
}
}
j.inRange--
// Fast forward to resume processing after the most recent end node that was encountered
for k := i + 1; k < len(nodes); k++ {
if &nodes[k] == j.lastEndNode {
i = k
break
}
}
continue
}
fullResult = append(fullResult, results)
}
return fullResult, nil
}
// EnableJSONOutput changes the PrintResults behavior to return a JSON array of results
func (j *JSONPath) EnableJSONOutput(v bool) {
j.outputJSON = v
}
// PrintResults writes the results into writer
func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error {
if j.outputJSON {
// convert the []reflect.Value to something that json
// will be able to marshal
r := make([]interface{}, 0, len(results))
for i := range results {
r = append(r, results[i].Interface())
}
results = []reflect.Value{reflect.ValueOf(r)}
}
for i, r := range results {
var text []byte
var err error
outputJSON := true
kind := r.Kind()
if kind == reflect.Interface {
kind = r.Elem().Kind()
}
switch kind {
case reflect.Map:
case reflect.Array:
case reflect.Slice:
case reflect.Struct:
default:
outputJSON = false
}
switch {
case outputJSON || j.outputJSON:
if j.outputJSON {
text, err = json.MarshalIndent(r.Interface(), "", " ")
text = append(text, '\n')
} else {
text, err = json.Marshal(r.Interface())
}
default:
text, err = j.evalToText(r)
}
if err != nil {
return err
}
if i != len(results)-1 {
text = append(text, ' ')
}
if _, err = wr.Write(text); err != nil {
return err
}
}
return nil
}
// walk visits tree rooted at the given node in DFS order
func (j *JSONPath) walk(value []reflect.Value, node Node) ([]reflect.Value, error) {
switch node := node.(type) {
case *ListNode:
return j.evalList(value, node)
case *TextNode:
return []reflect.Value{reflect.ValueOf(node.Text)}, nil
case *FieldNode:
return j.evalField(value, node)
case *ArrayNode:
return j.evalArray(value, node)
case *FilterNode:
return j.evalFilter(value, node)
case *IntNode:
return j.evalInt(value, node)
case *BoolNode:
return j.evalBool(value, node)
case *FloatNode:
return j.evalFloat(value, node)
case *WildcardNode:
return j.evalWildcard(value, node)
case *RecursiveNode:
return j.evalRecursive(value, node)
case *UnionNode:
return j.evalUnion(value, node)
case *IdentifierNode:
return j.evalIdentifier(value, node)
default:
return value, fmt.Errorf("unexpected Node %v", node)
}
}
// evalInt evaluates IntNode
func (j *JSONPath) evalInt(input []reflect.Value, node *IntNode) ([]reflect.Value, error) {
result := make([]reflect.Value, len(input))
for i := range input {
result[i] = reflect.ValueOf(node.Value)
}
return result, nil
}
// evalFloat evaluates FloatNode
func (j *JSONPath) evalFloat(input []reflect.Value, node *FloatNode) ([]reflect.Value, error) {
result := make([]reflect.Value, len(input))
for i := range input {
result[i] = reflect.ValueOf(node.Value)
}
return result, nil
}
// evalBool evaluates BoolNode
func (j *JSONPath) evalBool(input []reflect.Value, node *BoolNode) ([]reflect.Value, error) {
result := make([]reflect.Value, len(input))
for i := range input {
result[i] = reflect.ValueOf(node.Value)
}
return result, nil
}
// evalList evaluates ListNode
func (j *JSONPath) evalList(value []reflect.Value, node *ListNode) ([]reflect.Value, error) {
var err error
curValue := value
for _, node := range node.Nodes {
curValue, err = j.walk(curValue, node)
if err != nil {
return curValue, err
}
}
return curValue, nil
}
// evalIdentifier evaluates IdentifierNode
func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ([]reflect.Value, error) {
results := []reflect.Value{}
switch node.Name {
case "range":
j.beginRange++
results = input
case "end":
if j.inRange > 0 {
j.endRange++
} else {
return results, fmt.Errorf("not in range, nothing to end")
}
default:
return input, fmt.Errorf("unrecognized identifier %v", node.Name)
}
return results, nil
}
// evalArray evaluates ArrayNode
func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect.Value, error) {
result := []reflect.Value{}
for _, value := range input {
value, isNil := template.Indirect(value)
if isNil {
continue
}
if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
return input, fmt.Errorf("%v is not array or slice", value.Type())
}
params := node.Params
if !params[0].Known {
params[0].Value = 0
}
if params[0].Value < 0 {
params[0].Value += value.Len()
}
if !params[1].Known {
params[1].Value = value.Len()
}
if params[1].Value < 0 || (params[1].Value == 0 && params[1].Derived) {
params[1].Value += value.Len()
}
sliceLength := value.Len()
if params[1].Value != params[0].Value { // if you're requesting zero elements, allow it through.
if params[0].Value >= sliceLength || params[0].Value < 0 {
return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[0].Value, sliceLength)
}
if params[1].Value > sliceLength || params[1].Value < 0 {
return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[1].Value-1, sliceLength)
}
if params[0].Value > params[1].Value {
return input, fmt.Errorf("starting index %d is greater than ending index %d", params[0].Value, params[1].Value)
}
} else {
return result, nil
}
value = value.Slice(params[0].Value, params[1].Value)
step := 1
if params[2].Known {
if params[2].Value <= 0 {
return input, fmt.Errorf("step must be > 0")
}
step = params[2].Value
}
for i := 0; i < value.Len(); i += step {
result = append(result, value.Index(i))
}
}
return result, nil
}
// evalUnion evaluates UnionNode
func (j *JSONPath) evalUnion(input []reflect.Value, node *UnionNode) ([]reflect.Value, error) {
result := []reflect.Value{}
for _, listNode := range node.Nodes {
temp, err := j.evalList(input, listNode)
if err != nil {
return input, err
}
result = append(result, temp...)
}
return result, nil
}
func (j *JSONPath) findFieldInValue(value *reflect.Value, node *FieldNode) (reflect.Value, error) {
t := value.Type()
var inlineValue *reflect.Value
for ix := 0; ix < t.NumField(); ix++ {
f := t.Field(ix)
jsonTag := f.Tag.Get("json")
parts := strings.Split(jsonTag, ",")
if len(parts) == 0 {
continue
}
if parts[0] == node.Value {
return value.Field(ix), nil
}
if len(parts[0]) == 0 {
val := value.Field(ix)
inlineValue = &val
}
}
if inlineValue != nil {
if inlineValue.Kind() == reflect.Struct {
// handle 'inline'
match, err := j.findFieldInValue(inlineValue, node)
if err != nil {
return reflect.Value{}, err
}
if match.IsValid() {
return match, nil
}
}
}
return value.FieldByName(node.Value), nil
}
// evalField evaluates field of struct or key of map.
func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect.Value, error) {
results := []reflect.Value{}
// If there's no input, there's no output
if len(input) == 0 {
return results, nil
}
for _, value := range input {
var result reflect.Value
value, isNil := template.Indirect(value)
if isNil {
continue
}
if value.Kind() == reflect.Struct {
var err error
if result, err = j.findFieldInValue(&value, node); err != nil {
return nil, err
}
} else if value.Kind() == reflect.Map {
mapKeyType := value.Type().Key()
nodeValue := reflect.ValueOf(node.Value)
// node value type must be convertible to map key type
if !nodeValue.Type().ConvertibleTo(mapKeyType) {
return results, fmt.Errorf("%s is not convertible to %s", nodeValue, mapKeyType)
}
result = value.MapIndex(nodeValue.Convert(mapKeyType))
}
if result.IsValid() {
results = append(results, result)
}
}
if len(results) == 0 {
if j.allowMissingKeys {
return results, nil
}
return results, fmt.Errorf("%s is not found", node.Value)
}
return results, nil
}
// evalWildcard extracts all contents of the given value
func (j *JSONPath) evalWildcard(input []reflect.Value, node *WildcardNode) ([]reflect.Value, error) {
results := []reflect.Value{}
for _, value := range input {
value, isNil := template.Indirect(value)
if isNil {
continue
}
kind := value.Kind()
if kind == reflect.Struct {
for i := 0; i < value.NumField(); i++ {
results = append(results, value.Field(i))
}
} else if kind == reflect.Map {
for _, key := range value.MapKeys() {
results = append(results, value.MapIndex(key))
}
} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
for i := 0; i < value.Len(); i++ {
results = append(results, value.Index(i))
}
}
}
return results, nil
}
// evalRecursive visits the given value recursively and pushes all of them to result
func (j *JSONPath) evalRecursive(input []reflect.Value, node *RecursiveNode) ([]reflect.Value, error) {
result := []reflect.Value{}
for _, value := range input {
results := []reflect.Value{}
value, isNil := template.Indirect(value)
if isNil {
continue
}
kind := value.Kind()
if kind == reflect.Struct {
for i := 0; i < value.NumField(); i++ {
results = append(results, value.Field(i))
}
} else if kind == reflect.Map {
for _, key := range value.MapKeys() {
results = append(results, value.MapIndex(key))
}
} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
for i := 0; i < value.Len(); i++ {
results = append(results, value.Index(i))
}
}
if len(results) != 0 {
result = append(result, value)
output, err := j.evalRecursive(results, node)
if err != nil {
return result, err
}
result = append(result, output...)
}
}
return result, nil
}
// evalFilter filters array according to FilterNode
func (j *JSONPath) evalFilter(input []reflect.Value, node *FilterNode) ([]reflect.Value, error) {
results := []reflect.Value{}
for _, value := range input {
value, _ = template.Indirect(value)
if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
return input, fmt.Errorf("%v is not array or slice and cannot be filtered", value)
}
for i := 0; i < value.Len(); i++ {
temp := []reflect.Value{value.Index(i)}
lefts, err := j.evalList(temp, node.Left)
//case exists
if node.Operator == "exists" {
if len(lefts) > 0 {
results = append(results, value.Index(i))
}
continue
}
if err != nil {
return input, err
}
var left, right interface{}
switch {
case len(lefts) == 0:
continue
case len(lefts) > 1:
return input, fmt.Errorf("can only compare one element at a time")
}
left = lefts[0].Interface()
rights, err := j.evalList(temp, node.Right)
if err != nil {
return input, err
}
switch {
case len(rights) == 0:
continue
case len(rights) > 1:
return input, fmt.Errorf("can only compare one element at a time")
}
right = rights[0].Interface()
pass := false
switch node.Operator {
case "<":
pass, err = template.Less(left, right)
case ">":
pass, err = template.Greater(left, right)
case "==":
pass, err = template.Equal(left, right)
case "!=":
pass, err = template.NotEqual(left, right)
case "<=":
pass, err = template.LessEqual(left, right)
case ">=":
pass, err = template.GreaterEqual(left, right)
default:
return results, fmt.Errorf("unrecognized filter operator %s", node.Operator)
}
if err != nil {
return results, err
}
if pass {
results = append(results, value.Index(i))
}
}
}
return results, nil
}
// evalToText translates reflect value to corresponding text
func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) {
iface, ok := template.PrintableValue(v)
if !ok {
return nil, fmt.Errorf("can't print type %s", v.Type())
}
var buffer bytes.Buffer
fmt.Fprint(&buffer, iface)
return buffer.Bytes(), nil
}

View File

@@ -1,256 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonpath
import "fmt"
// NodeType identifies the type of a parse tree node.
type NodeType int
// Type returns itself and provides an easy default implementation
func (t NodeType) Type() NodeType {
return t
}
func (t NodeType) String() string {
return NodeTypeName[t]
}
const (
NodeText NodeType = iota
NodeArray
NodeList
NodeField
NodeIdentifier
NodeFilter
NodeInt
NodeFloat
NodeWildcard
NodeRecursive
NodeUnion
NodeBool
)
var NodeTypeName = map[NodeType]string{
NodeText: "NodeText",
NodeArray: "NodeArray",
NodeList: "NodeList",
NodeField: "NodeField",
NodeIdentifier: "NodeIdentifier",
NodeFilter: "NodeFilter",
NodeInt: "NodeInt",
NodeFloat: "NodeFloat",
NodeWildcard: "NodeWildcard",
NodeRecursive: "NodeRecursive",
NodeUnion: "NodeUnion",
NodeBool: "NodeBool",
}
type Node interface {
Type() NodeType
String() string
}
// ListNode holds a sequence of nodes.
type ListNode struct {
NodeType
Nodes []Node // The element nodes in lexical order.
}
func newList() *ListNode {
return &ListNode{NodeType: NodeList}
}
func (l *ListNode) append(n Node) {
l.Nodes = append(l.Nodes, n)
}
func (l *ListNode) String() string {
return l.Type().String()
}
// TextNode holds plain text.
type TextNode struct {
NodeType
Text string // The text; may span newlines.
}
func newText(text string) *TextNode {
return &TextNode{NodeType: NodeText, Text: text}
}
func (t *TextNode) String() string {
return fmt.Sprintf("%s: %s", t.Type(), t.Text)
}
// FieldNode holds field of struct
type FieldNode struct {
NodeType
Value string
}
func newField(value string) *FieldNode {
return &FieldNode{NodeType: NodeField, Value: value}
}
func (f *FieldNode) String() string {
return fmt.Sprintf("%s: %s", f.Type(), f.Value)
}
// IdentifierNode holds an identifier
type IdentifierNode struct {
NodeType
Name string
}
func newIdentifier(value string) *IdentifierNode {
return &IdentifierNode{
NodeType: NodeIdentifier,
Name: value,
}
}
func (f *IdentifierNode) String() string {
return fmt.Sprintf("%s: %s", f.Type(), f.Name)
}
// ParamsEntry holds param information for ArrayNode
type ParamsEntry struct {
Value int
Known bool // whether the value is known when parse it
Derived bool
}
// ArrayNode holds start, end, step information for array index selection
type ArrayNode struct {
NodeType
Params [3]ParamsEntry // start, end, step
}
func newArray(params [3]ParamsEntry) *ArrayNode {
return &ArrayNode{
NodeType: NodeArray,
Params: params,
}
}
func (a *ArrayNode) String() string {
return fmt.Sprintf("%s: %v", a.Type(), a.Params)
}
// FilterNode holds operand and operator information for filter
type FilterNode struct {
NodeType
Left *ListNode
Right *ListNode
Operator string
}
func newFilter(left, right *ListNode, operator string) *FilterNode {
return &FilterNode{
NodeType: NodeFilter,
Left: left,
Right: right,
Operator: operator,
}
}
func (f *FilterNode) String() string {
return fmt.Sprintf("%s: %s %s %s", f.Type(), f.Left, f.Operator, f.Right)
}
// IntNode holds integer value
type IntNode struct {
NodeType
Value int
}
func newInt(num int) *IntNode {
return &IntNode{NodeType: NodeInt, Value: num}
}
func (i *IntNode) String() string {
return fmt.Sprintf("%s: %d", i.Type(), i.Value)
}
// FloatNode holds float value
type FloatNode struct {
NodeType
Value float64
}
func newFloat(num float64) *FloatNode {
return &FloatNode{NodeType: NodeFloat, Value: num}
}
func (i *FloatNode) String() string {
return fmt.Sprintf("%s: %f", i.Type(), i.Value)
}
// WildcardNode means a wildcard
type WildcardNode struct {
NodeType
}
func newWildcard() *WildcardNode {
return &WildcardNode{NodeType: NodeWildcard}
}
func (i *WildcardNode) String() string {
return i.Type().String()
}
// RecursiveNode means a recursive descent operator
type RecursiveNode struct {
NodeType
}
func newRecursive() *RecursiveNode {
return &RecursiveNode{NodeType: NodeRecursive}
}
func (r *RecursiveNode) String() string {
return r.Type().String()
}
// UnionNode is union of ListNode
type UnionNode struct {
NodeType
Nodes []*ListNode
}
func newUnion(nodes []*ListNode) *UnionNode {
return &UnionNode{NodeType: NodeUnion, Nodes: nodes}
}
func (u *UnionNode) String() string {
return u.Type().String()
}
// BoolNode holds bool value
type BoolNode struct {
NodeType
Value bool
}
func newBool(value bool) *BoolNode {
return &BoolNode{NodeType: NodeBool, Value: value}
}
func (b *BoolNode) String() string {
return fmt.Sprintf("%s: %t", b.Type(), b.Value)
}

View File

@@ -1,527 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonpath
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
const eof = -1
const (
leftDelim = "{"
rightDelim = "}"
)
type Parser struct {
Name string
Root *ListNode
input string
pos int
start int
width int
}
var (
ErrSyntax = errors.New("invalid syntax")
dictKeyRex = regexp.MustCompile(`^'([^']*)'$`)
sliceOperatorRex = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:-?[\d]*)?$`)
)
// Parse parsed the given text and return a node Parser.
// If an error is encountered, parsing stops and an empty
// Parser is returned with the error
func Parse(name, text string) (*Parser, error) {
p := NewParser(name)
err := p.Parse(text)
if err != nil {
p = nil
}
return p, err
}
func NewParser(name string) *Parser {
return &Parser{
Name: name,
}
}
// parseAction parsed the expression inside delimiter
func parseAction(name, text string) (*Parser, error) {
p, err := Parse(name, fmt.Sprintf("%s%s%s", leftDelim, text, rightDelim))
// when error happens, p will be nil, so we need to return here
if err != nil {
return p, err
}
p.Root = p.Root.Nodes[0].(*ListNode)
return p, nil
}
func (p *Parser) Parse(text string) error {
p.input = text
p.Root = newList()
p.pos = 0
return p.parseText(p.Root)
}
// consumeText return the parsed text since last cosumeText
func (p *Parser) consumeText() string {
value := p.input[p.start:p.pos]
p.start = p.pos
return value
}
// next returns the next rune in the input.
func (p *Parser) next() rune {
if p.pos >= len(p.input) {
p.width = 0
return eof
}
r, w := utf8.DecodeRuneInString(p.input[p.pos:])
p.width = w
p.pos += p.width
return r
}
// peek returns but does not consume the next rune in the input.
func (p *Parser) peek() rune {
r := p.next()
p.backup()
return r
}
// backup steps back one rune. Can only be called once per call of next.
func (p *Parser) backup() {
p.pos -= p.width
}
func (p *Parser) parseText(cur *ListNode) error {
for {
if strings.HasPrefix(p.input[p.pos:], leftDelim) {
if p.pos > p.start {
cur.append(newText(p.consumeText()))
}
return p.parseLeftDelim(cur)
}
if p.next() == eof {
break
}
}
// Correctly reached EOF.
if p.pos > p.start {
cur.append(newText(p.consumeText()))
}
return nil
}
// parseLeftDelim scans the left delimiter, which is known to be present.
func (p *Parser) parseLeftDelim(cur *ListNode) error {
p.pos += len(leftDelim)
p.consumeText()
newNode := newList()
cur.append(newNode)
cur = newNode
return p.parseInsideAction(cur)
}
func (p *Parser) parseInsideAction(cur *ListNode) error {
prefixMap := map[string]func(*ListNode) error{
rightDelim: p.parseRightDelim,
"[?(": p.parseFilter,
"..": p.parseRecursive,
}
for prefix, parseFunc := range prefixMap {
if strings.HasPrefix(p.input[p.pos:], prefix) {
return parseFunc(cur)
}
}
switch r := p.next(); {
case r == eof || isEndOfLine(r):
return fmt.Errorf("unclosed action")
case r == ' ':
p.consumeText()
case r == '@' || r == '$': //the current object, just pass it
p.consumeText()
case r == '[':
return p.parseArray(cur)
case r == '"' || r == '\'':
return p.parseQuote(cur, r)
case r == '.':
return p.parseField(cur)
case r == '+' || r == '-' || unicode.IsDigit(r):
p.backup()
return p.parseNumber(cur)
case isAlphaNumeric(r):
p.backup()
return p.parseIdentifier(cur)
default:
return fmt.Errorf("unrecognized character in action: %#U", r)
}
return p.parseInsideAction(cur)
}
// parseRightDelim scans the right delimiter, which is known to be present.
func (p *Parser) parseRightDelim(cur *ListNode) error {
p.pos += len(rightDelim)
p.consumeText()
return p.parseText(p.Root)
}
// parseIdentifier scans build-in keywords, like "range" "end"
func (p *Parser) parseIdentifier(cur *ListNode) error {
var r rune
for {
r = p.next()
if isTerminator(r) {
p.backup()
break
}
}
value := p.consumeText()
if isBool(value) {
v, err := strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("can not parse bool '%s': %s", value, err.Error())
}
cur.append(newBool(v))
} else {
cur.append(newIdentifier(value))
}
return p.parseInsideAction(cur)
}
// parseRecursive scans the recursive descent operator ..
func (p *Parser) parseRecursive(cur *ListNode) error {
if lastIndex := len(cur.Nodes) - 1; lastIndex >= 0 && cur.Nodes[lastIndex].Type() == NodeRecursive {
return fmt.Errorf("invalid multiple recursive descent")
}
p.pos += len("..")
p.consumeText()
cur.append(newRecursive())
if r := p.peek(); isAlphaNumeric(r) {
return p.parseField(cur)
}
return p.parseInsideAction(cur)
}
// parseNumber scans number
func (p *Parser) parseNumber(cur *ListNode) error {
r := p.peek()
if r == '+' || r == '-' {
p.next()
}
for {
r = p.next()
if r != '.' && !unicode.IsDigit(r) {
p.backup()
break
}
}
value := p.consumeText()
i, err := strconv.Atoi(value)
if err == nil {
cur.append(newInt(i))
return p.parseInsideAction(cur)
}
d, err := strconv.ParseFloat(value, 64)
if err == nil {
cur.append(newFloat(d))
return p.parseInsideAction(cur)
}
return fmt.Errorf("cannot parse number %s", value)
}
// parseArray scans array index selection
func (p *Parser) parseArray(cur *ListNode) error {
Loop:
for {
switch p.next() {
case eof, '\n':
return fmt.Errorf("unterminated array")
case ']':
break Loop
}
}
text := p.consumeText()
text = text[1 : len(text)-1]
if text == "*" {
text = ":"
}
//union operator
strs := strings.Split(text, ",")
if len(strs) > 1 {
union := []*ListNode{}
for _, str := range strs {
parser, err := parseAction("union", fmt.Sprintf("[%s]", strings.Trim(str, " ")))
if err != nil {
return err
}
union = append(union, parser.Root)
}
cur.append(newUnion(union))
return p.parseInsideAction(cur)
}
// dict key
value := dictKeyRex.FindStringSubmatch(text)
if value != nil {
parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1]))
if err != nil {
return err
}
for _, node := range parser.Root.Nodes {
cur.append(node)
}
return p.parseInsideAction(cur)
}
//slice operator
value = sliceOperatorRex.FindStringSubmatch(text)
if value == nil {
return fmt.Errorf("invalid array index %s", text)
}
value = value[1:]
params := [3]ParamsEntry{}
for i := 0; i < 3; i++ {
if value[i] != "" {
if i > 0 {
value[i] = value[i][1:]
}
if i > 0 && value[i] == "" {
params[i].Known = false
} else {
var err error
params[i].Known = true
params[i].Value, err = strconv.Atoi(value[i])
if err != nil {
return fmt.Errorf("array index %s is not a number", value[i])
}
}
} else {
if i == 1 {
params[i].Known = true
params[i].Value = params[0].Value + 1
params[i].Derived = true
} else {
params[i].Known = false
params[i].Value = 0
}
}
}
cur.append(newArray(params))
return p.parseInsideAction(cur)
}
// parseFilter scans filter inside array selection
func (p *Parser) parseFilter(cur *ListNode) error {
p.pos += len("[?(")
p.consumeText()
begin := false
end := false
var pair rune
Loop:
for {
r := p.next()
switch r {
case eof, '\n':
return fmt.Errorf("unterminated filter")
case '"', '\'':
if begin == false {
//save the paired rune
begin = true
pair = r
continue
}
//only add when met paired rune
if p.input[p.pos-2] != '\\' && r == pair {
end = true
}
case ')':
//in rightParser below quotes only appear zero or once
//and must be paired at the beginning and end
if begin == end {
break Loop
}
}
}
if p.next() != ']' {
return fmt.Errorf("unclosed array expect ]")
}
reg := regexp.MustCompile(`^([^!<>=]+)([!<>=]+)(.+?)$`)
text := p.consumeText()
text = text[:len(text)-2]
value := reg.FindStringSubmatch(text)
if value == nil {
parser, err := parseAction("text", text)
if err != nil {
return err
}
cur.append(newFilter(parser.Root, newList(), "exists"))
} else {
leftParser, err := parseAction("left", value[1])
if err != nil {
return err
}
rightParser, err := parseAction("right", value[3])
if err != nil {
return err
}
cur.append(newFilter(leftParser.Root, rightParser.Root, value[2]))
}
return p.parseInsideAction(cur)
}
// parseQuote unquotes string inside double or single quote
func (p *Parser) parseQuote(cur *ListNode, end rune) error {
Loop:
for {
switch p.next() {
case eof, '\n':
return fmt.Errorf("unterminated quoted string")
case end:
//if it's not escape break the Loop
if p.input[p.pos-2] != '\\' {
break Loop
}
}
}
value := p.consumeText()
s, err := UnquoteExtend(value)
if err != nil {
return fmt.Errorf("unquote string %s error %v", value, err)
}
cur.append(newText(s))
return p.parseInsideAction(cur)
}
// parseField scans a field until a terminator
func (p *Parser) parseField(cur *ListNode) error {
p.consumeText()
for p.advance() {
}
value := p.consumeText()
if value == "*" {
cur.append(newWildcard())
} else {
cur.append(newField(strings.Replace(value, "\\", "", -1)))
}
return p.parseInsideAction(cur)
}
// advance scans until next non-escaped terminator
func (p *Parser) advance() bool {
r := p.next()
if r == '\\' {
p.next()
} else if isTerminator(r) {
p.backup()
return false
}
return true
}
// isTerminator reports whether the input is at valid termination character to appear after an identifier.
func isTerminator(r rune) bool {
if isSpace(r) || isEndOfLine(r) {
return true
}
switch r {
case eof, '.', ',', '[', ']', '$', '@', '{', '}':
return true
}
return false
}
// isSpace reports whether r is a space character.
func isSpace(r rune) bool {
return r == ' ' || r == '\t'
}
// isEndOfLine reports whether r is an end-of-line character.
func isEndOfLine(r rune) bool {
return r == '\r' || r == '\n'
}
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
func isAlphaNumeric(r rune) bool {
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
}
// isBool reports whether s is a boolean value.
func isBool(s string) bool {
return s == "true" || s == "false"
}
//UnquoteExtend is almost same as strconv.Unquote(), but it support parse single quotes as a string
func UnquoteExtend(s string) (string, error) {
n := len(s)
if n < 2 {
return "", ErrSyntax
}
quote := s[0]
if quote != s[n-1] {
return "", ErrSyntax
}
s = s[1 : n-1]
if quote != '"' && quote != '\'' {
return "", ErrSyntax
}
// Is it trivial? Avoid allocation.
if !contains(s, '\\') && !contains(s, quote) {
return s, nil
}
var runeTmp [utf8.UTFMax]byte
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
for len(s) > 0 {
c, multibyte, ss, err := strconv.UnquoteChar(s, quote)
if err != nil {
return "", err
}
s = ss
if c < utf8.RuneSelf || !multibyte {
buf = append(buf, byte(c))
} else {
n := utf8.EncodeRune(runeTmp[:], c)
buf = append(buf, runeTmp[:n]...)
}
}
return string(buf), nil
}
func contains(s string, c byte) bool {
for i := 0; i < len(s); i++ {
if s[i] == c {
return true
}
}
return false
}

View File

@@ -16,11 +16,11 @@ limitations under the License.
// Package workqueue provides a simple queue that supports the following
// features:
// * Fair: items processed in the order in which they are added.
// * Stingy: a single item will not be processed multiple times concurrently,
// and if an item is added multiple times before it can be processed, it
// will only be processed once.
// * Multiple consumers and producers. In particular, it is allowed for an
// item to be reenqueued while it is being processed.
// * Shutdown notifications.
// - Fair: items processed in the order in which they are added.
// - Stingy: a single item will not be processed multiple times concurrently,
// and if an item is added multiple times before it can be processed, it
// will only be processed once.
// - Multiple consumers and producers. In particular, it is allowed for an
// item to be reenqueued while it is being processed.
// - Shutdown notifications.
package workqueue // import "k8s.io/client-go/util/workqueue"

View File

@@ -50,6 +50,13 @@ func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitin
}
}
func NewRateLimitingQueueWithDelayingInterface(di DelayingInterface, rateLimiter RateLimiter) RateLimitingInterface {
return &rateLimitingType{
DelayingInterface: di,
rateLimiter: rateLimiter,
}
}
// rateLimitingType wraps an Interface and provides rateLimited re-enquing
type rateLimitingType struct {
DelayingInterface