1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-28 14:41:10 +01:00

remove pod security policy; additional policy/v1beta1 cleanup; use informers for descheduler unit tests

update go to 1.19 and helm kubernetes cluster to 1.25
bump -rc.0 to 1.25 GA
bump k8s utils library
bump golang-ci
use go 1.19 for helm github action
upgrade kubectl from 0.20 to 0.25

Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
This commit is contained in:
Amir Alavi
2022-09-01 21:26:19 -04:00
parent c9b0fbe467
commit e8fae9a3b7
171 changed files with 23514 additions and 692 deletions

View File

@@ -5133,6 +5133,7 @@ message ServiceSpec {
// service or not. If this field is specified when creating a Service
// which does not need it, creation will fail. This field will be wiped
// when updating a Service to no longer need it (e.g. changing type).
// This field cannot be updated once set.
// +optional
optional int32 healthCheckNodePort = 12;

1
vendor/k8s.io/api/core/v1/types.go generated vendored
View File

@@ -4502,6 +4502,7 @@ type ServiceSpec struct {
// service or not. If this field is specified when creating a Service
// which does not need it, creation will fail. This field will be wiped
// when updating a Service to no longer need it (e.g. changing type).
// This field cannot be updated once set.
// +optional
HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"`

View File

@@ -2277,7 +2277,7 @@ var map_ServiceSpec = map[string]string{
"loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/",
"externalName": "externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \"ExternalName\".",
"externalTrafficPolicy": "externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's \"externally-facing\" addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to \"Local\", the proxy will configure the service in a way that assumes that external load balancers will take care of balancing the service traffic between nodes, and so each node will deliver traffic only to the node-local endpoints of the service, without masquerading the client source IP. (Traffic mistakenly sent to a node with no endpoints will be dropped.) The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). Note that traffic sent to an External IP or LoadBalancer IP from within the cluster will always get \"Cluster\" semantics, but clients sending to a NodePort from within the cluster may need to take traffic policy into account when picking a node.",
"healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type).",
"healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type). This field cannot be updated once set.",
"publishNotReadyAddresses": "publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.",
"sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.",
"ipFamilies": "IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName.\n\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.",

View File

@@ -1,43 +0,0 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package filters
import (
"fmt"
"net/http"
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
"k8s.io/apiserver/pkg/endpoints/request"
)
// CompressionDisabledFunc checks if a given request should disable compression.
type CompressionDisabledFunc func(*http.Request) (bool, error)
// WithCompressionDisabled stores result of CompressionDisabledFunc in context.
func WithCompressionDisabled(handler http.Handler, predicate CompressionDisabledFunc) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
decision, err := predicate(req)
if err != nil {
responsewriters.InternalError(w, req, fmt.Errorf("failed to determine if request should disable compression: %v", err))
return
}
req = req.WithContext(request.WithCompressionDisabled(ctx, decision))
handler.ServeHTTP(w, req)
})
}

View File

@@ -87,21 +87,19 @@ func StreamObject(statusCode int, gv schema.GroupVersion, s runtime.NegotiatedSe
// The context is optional and can be nil. This method will perform optional content compression if requested by
// a client and the feature gate for APIResponseCompression is enabled.
func SerializeObject(mediaType string, encoder runtime.Encoder, hw http.ResponseWriter, req *http.Request, statusCode int, object runtime.Object) {
disableCompression := request.CompressionDisabledFrom(req.Context())
trace := utiltrace.New("SerializeObject",
utiltrace.Field{"audit-id", request.GetAuditIDTruncated(req.Context())},
utiltrace.Field{"method", req.Method},
utiltrace.Field{"url", req.URL.Path},
utiltrace.Field{"protocol", req.Proto},
utiltrace.Field{"mediaType", mediaType},
utiltrace.Field{"encoder", encoder.Identifier()},
utiltrace.Field{"disableCompression", disableCompression})
utiltrace.Field{"encoder", encoder.Identifier()})
defer trace.LogIfLong(5 * time.Second)
w := &deferredResponseWriter{
mediaType: mediaType,
statusCode: statusCode,
contentEncoding: negotiateContentEncoding(req, disableCompression),
contentEncoding: negotiateContentEncoding(req),
hw: hw,
trace: trace,
}
@@ -157,12 +155,12 @@ const (
// negotiateContentEncoding returns a supported client-requested content encoding for the
// provided request. It will return the empty string if no supported content encoding was
// found or if response compression is disabled.
func negotiateContentEncoding(req *http.Request, disableCompression bool) string {
func negotiateContentEncoding(req *http.Request) string {
encoding := req.Header.Get("Accept-Encoding")
if len(encoding) == 0 {
return ""
}
if !utilfeature.DefaultFeatureGate.Enabled(features.APIResponseCompression) || disableCompression {
if !utilfeature.DefaultFeatureGate.Enabled(features.APIResponseCompression) {
return ""
}
for len(encoding) > 0 {

View File

@@ -1,37 +0,0 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package request
import (
"context"
)
type disableCompressionIDKeyType int
const disableCompressionIDKey disableCompressionIDKeyType = iota
// WithCompressionDisabled stores bool in context.
func WithCompressionDisabled(parent context.Context, disableCompression bool) context.Context {
return WithValue(parent, disableCompressionIDKey, disableCompression)
}
// CompressionDisabledFrom retrieves bool from context.
// Defaults to false if not set.
func CompressionDisabledFrom(ctx context.Context) bool {
decision, _ := ctx.Value(disableCompressionIDKey).(bool)
return decision
}

View File

@@ -257,9 +257,6 @@ type Config struct {
// StorageVersionManager holds the storage versions of the API resources installed by this server.
StorageVersionManager storageversion.Manager
// CompressionDisabledFunc returns whether compression should be disabled for a given request.
CompressionDisabledFunc genericapifilters.CompressionDisabledFunc
}
type RecommendedConfig struct {
@@ -859,9 +856,6 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler {
if c.ShutdownSendRetryAfter {
handler = genericfilters.WithRetryAfter(handler, c.lifecycleSignals.NotAcceptingNewRequest.Signaled())
}
if c.CompressionDisabledFunc != nil {
handler = genericapifilters.WithCompressionDisabled(handler, c.CompressionDisabledFunc)
}
handler = genericfilters.WithHTTPLogging(handler)
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerTracing) {
handler = genericapifilters.WithTracing(handler, c.TracerProvider)

View File

@@ -54,10 +54,7 @@ type ResourceExpirationEvaluator interface {
}
func NewResourceExpirationEvaluator(currentVersion apimachineryversion.Info) (ResourceExpirationEvaluator, error) {
ret := &resourceExpirationEvaluator{
// TODO https://github.com/kubernetes/kubernetes/issues/109799 set this back to false after beta is tagged.
strictRemovedHandlingInAlpha: true,
}
ret := &resourceExpirationEvaluator{}
if len(currentVersion.Major) > 0 {
currentMajor64, err := strconv.ParseInt(currentVersion.Major, 10, 32)
if err != nil {

View File

@@ -26,9 +26,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/util/disablecompression"
utilfeature "k8s.io/apiserver/pkg/util/feature"
netutils "k8s.io/utils/net"
"github.com/spf13/pflag"
)
@@ -65,27 +63,21 @@ type ServerRunOptions struct {
// If enabled, after ShutdownDelayDuration elapses, any incoming request is
// rejected with a 429 status code and a 'Retry-After' response.
ShutdownSendRetryAfter bool
// DisableCompressionForClientIPs is a comma separated list of CIDR IP ranges
// (parsable by net.ParseCIDR, as defined in RFC 4632 and RFC 4291) for which
// traffic compression should be disabled.
DisableCompressionForClientIPs []string
}
func NewServerRunOptions() *ServerRunOptions {
defaults := server.NewConfig(serializer.CodecFactory{})
return &ServerRunOptions{
MaxRequestsInFlight: defaults.MaxRequestsInFlight,
MaxMutatingRequestsInFlight: defaults.MaxMutatingRequestsInFlight,
RequestTimeout: defaults.RequestTimeout,
LivezGracePeriod: defaults.LivezGracePeriod,
MinRequestTimeout: defaults.MinRequestTimeout,
ShutdownDelayDuration: defaults.ShutdownDelayDuration,
JSONPatchMaxCopyBytes: defaults.JSONPatchMaxCopyBytes,
MaxRequestBodyBytes: defaults.MaxRequestBodyBytes,
EnablePriorityAndFairness: true,
ShutdownSendRetryAfter: false,
DisableCompressionForClientIPs: nil,
MaxRequestsInFlight: defaults.MaxRequestsInFlight,
MaxMutatingRequestsInFlight: defaults.MaxMutatingRequestsInFlight,
RequestTimeout: defaults.RequestTimeout,
LivezGracePeriod: defaults.LivezGracePeriod,
MinRequestTimeout: defaults.MinRequestTimeout,
ShutdownDelayDuration: defaults.ShutdownDelayDuration,
JSONPatchMaxCopyBytes: defaults.JSONPatchMaxCopyBytes,
MaxRequestBodyBytes: defaults.MaxRequestBodyBytes,
EnablePriorityAndFairness: true,
ShutdownSendRetryAfter: false,
}
}
@@ -105,13 +97,6 @@ func (s *ServerRunOptions) ApplyTo(c *server.Config) error {
c.MaxRequestBodyBytes = s.MaxRequestBodyBytes
c.PublicAddress = s.AdvertiseAddress
c.ShutdownSendRetryAfter = s.ShutdownSendRetryAfter
if len(s.DisableCompressionForClientIPs) != 0 {
pred, err := disablecompression.NewClientIPPredicate(s.DisableCompressionForClientIPs)
if err != nil {
return err
}
c.CompressionDisabledFunc = pred.Predicate
}
return nil
}
@@ -176,10 +161,6 @@ func (s *ServerRunOptions) Validate() []error {
if err := validateHSTSDirectives(s.HSTSDirectives); err != nil {
errors = append(errors, err)
}
if _, err := netutils.ParseCIDRs(s.DisableCompressionForClientIPs); err != nil {
errors = append(errors, err)
}
return errors
}
@@ -275,8 +256,5 @@ func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) {
"during this window all incoming requests will be rejected with a status code 429 and a 'Retry-After' response header, "+
"in addition 'Connection: close' response header is set in order to tear down the TCP connection when idle.")
fs.StringSliceVar(&s.DisableCompressionForClientIPs, "disable-compression-for-client-ips", s.DisableCompressionForClientIPs, ""+
"A comma separated list of client IP ranges in CIDR notation like \"192.0.2.0/24\" or \"2001:db8::/32\", as defined in RFC 4632 and RFC 4291, for which traffic compression will be disabled.")
utilfeature.DefaultMutableFeatureGate.AddFlag(fs)
}

View File

@@ -1,57 +0,0 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package disablecompression
import (
"fmt"
"net"
"net/http"
utilnet "k8s.io/apimachinery/pkg/util/net"
netutils "k8s.io/utils/net"
)
// ClientIPPredicate.Predicate implements CompressionDisabledFunc interface that decides
// based on client IP.
type ClientIPPredicate struct {
cidrs []*net.IPNet
}
// NewClientIPPredicate creates a new ClientIPPredicate instance.
func NewClientIPPredicate(cidrStrings []string) (*ClientIPPredicate, error) {
cidrs, err := netutils.ParseCIDRs(cidrStrings)
if err != nil {
return nil, fmt.Errorf("failed to parse cidrs: %v", err)
}
return &ClientIPPredicate{cidrs: cidrs}, nil
}
// Predicate checks if ClientIP matches any cidr.
func (c *ClientIPPredicate) Predicate(req *http.Request) (bool, error) {
ip := utilnet.GetClientIP(req)
if ip == nil {
return false, fmt.Errorf("unable to determine source IP for %v", req)
}
for _, cidr := range c.cidrs {
if cidr.Contains(ip) {
return true, nil
}
}
return false, nil
}

View File

@@ -0,0 +1,56 @@
# Azure Active Directory plugin for client authentication
This plugin provides an integration with Azure Active Directory device flow. If no tokens are present in the kubectl configuration, it will prompt a device code which can be used to login in a browser. After login it will automatically fetch the tokens and store them in the kubectl configuration. In addition it will refresh and update the tokens in the configuration when expired.
## Usage
1. Create an Azure Active Directory *Web App / API* application for `apiserver` following these [instructions](https://docs.microsoft.com/en-us/azure/active-directory/active-directory-app-registration). The callback URL does not matter (just cannot be empty).
2. Create a second Azure Active Directory native application for `kubectl`. The callback URL does not matter (just cannot be empty).
3. On `kubectl` application's configuration page in Azure portal grant permissions to `apiserver` application by clicking on *Required Permissions*, click the *Add* button and search for the apiserver application created in step 1. Select "Access apiserver" under the *DELEGATED PERMISSIONS*. Once added click the *Grant Permissions* button to apply the changes.
4. Configure the `apiserver` to use the Azure Active Directory as an OIDC provider with following options
```
--oidc-client-id="spn:APISERVER_APPLICATION_ID" \
--oidc-issuer-url="https://sts.windows.net/TENANT_ID/"
--oidc-username-claim="sub"
```
* Replace the `APISERVER_APPLICATION_ID` with the application ID of `apiserver` application
* Replace `TENANT_ID` with your tenant ID.
  * For a list of alternative username claims that are supported by the OIDC issuer check the JSON response at `https://sts.windows.net/TENANT_ID/.well-known/openid-configuration`.
5. Configure `kubectl` to use the `azure` authentication provider
```
kubectl config set-credentials "USER_NAME" --auth-provider=azure \
--auth-provider-arg=environment=AzurePublicCloud \
--auth-provider-arg=client-id=APPLICATION_ID \
--auth-provider-arg=tenant-id=TENANT_ID \
--auth-provider-arg=apiserver-id=APISERVER_APPLICATION_ID
```
* Supported environments: `AzurePublicCloud`, `AzureUSGovernmentCloud`, `AzureChinaCloud`, `AzureGermanCloud`
* Replace `USER_NAME` and `TENANT_ID` with your user name and tenant ID
* Replace `APPLICATION_ID` with the application ID of your`kubectl` application ID
* Replace `APISERVER_APPLICATION_ID` with the application ID of your `apiserver` application ID
* Be sure to also (create and) select a context that uses above user
6. (Optionally) the AAD token has `aud` claim with `spn:` prefix. To omit that, add following auth configuration:
```
--auth-provider-arg=config-mode="1"
```
7. The access token is acquired when first `kubectl` command is executed
```
kubectl get pods
To sign in, use a web browser to open the page https://aka.ms/devicelogin and enter the code DEC7D48GA to authenticate.
```
* After signing in a web browser, the token is stored in the configuration, and it will be reused when executing further commands.
* The resulting username in Kubernetes depends on your [configuration of the `--oidc-username-claim` and `--oidc-username-prefix` flags on the API server](https://kubernetes.io/docs/admin/authentication/#configuring-the-api-server). If you are using any authorization method you need to give permissions to that user, e.g. by binding the user to a role in the case of RBAC.

View File

@@ -0,0 +1,477 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"strconv"
"sync"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/util/net"
restclient "k8s.io/client-go/rest"
)
type configMode int
const (
azureTokenKey = "azureTokenKey"
tokenType = "Bearer"
authHeader = "Authorization"
cfgClientID = "client-id"
cfgTenantID = "tenant-id"
cfgAccessToken = "access-token"
cfgRefreshToken = "refresh-token"
cfgExpiresIn = "expires-in"
cfgExpiresOn = "expires-on"
cfgEnvironment = "environment"
cfgApiserverID = "apiserver-id"
cfgConfigMode = "config-mode"
configModeDefault configMode = 0
configModeOmitSPNPrefix configMode = 1
)
func init() {
if err := restclient.RegisterAuthProviderPlugin("azure", newAzureAuthProvider); err != nil {
klog.Fatalf("Failed to register azure auth plugin: %v", err)
}
}
var cache = newAzureTokenCache()
type azureTokenCache struct {
lock sync.Mutex
cache map[string]*azureToken
}
func newAzureTokenCache() *azureTokenCache {
return &azureTokenCache{cache: make(map[string]*azureToken)}
}
func (c *azureTokenCache) getToken(tokenKey string) *azureToken {
c.lock.Lock()
defer c.lock.Unlock()
return c.cache[tokenKey]
}
func (c *azureTokenCache) setToken(tokenKey string, token *azureToken) {
c.lock.Lock()
defer c.lock.Unlock()
c.cache[tokenKey] = token
}
var warnOnce sync.Once
func newAzureAuthProvider(_ string, cfg map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) {
// deprecated in v1.22, remove in v1.25
warnOnce.Do(func() {
klog.Warningf(`WARNING: the azure auth plugin is deprecated in v1.22+, unavailable in v1.26+; use https://github.com/Azure/kubelogin instead.
To learn more, consult https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins`)
})
var (
ts tokenSource
environment azure.Environment
err error
mode configMode
)
environment, err = azure.EnvironmentFromName(cfg[cfgEnvironment])
if err != nil {
environment = azure.PublicCloud
}
mode = configModeDefault
if cfg[cfgConfigMode] != "" {
configModeInt, err := strconv.Atoi(cfg[cfgConfigMode])
if err != nil {
return nil, fmt.Errorf("failed to parse %s, error: %s", cfgConfigMode, err)
}
mode = configMode(configModeInt)
switch mode {
case configModeOmitSPNPrefix:
case configModeDefault:
default:
return nil, fmt.Errorf("%s:%s is not a valid mode", cfgConfigMode, cfg[cfgConfigMode])
}
}
ts, err = newAzureTokenSourceDeviceCode(environment, cfg[cfgClientID], cfg[cfgTenantID], cfg[cfgApiserverID], mode)
if err != nil {
return nil, fmt.Errorf("creating a new azure token source for device code authentication: %v", err)
}
cacheSource := newAzureTokenSource(ts, cache, cfg, mode, persister)
return &azureAuthProvider{
tokenSource: cacheSource,
}, nil
}
type azureAuthProvider struct {
tokenSource tokenSource
}
func (p *azureAuthProvider) Login() error {
return errors.New("not yet implemented")
}
func (p *azureAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper {
return &azureRoundTripper{
tokenSource: p.tokenSource,
roundTripper: rt,
}
}
type azureRoundTripper struct {
tokenSource tokenSource
roundTripper http.RoundTripper
}
var _ net.RoundTripperWrapper = &azureRoundTripper{}
func (r *azureRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
if len(req.Header.Get(authHeader)) != 0 {
return r.roundTripper.RoundTrip(req)
}
token, err := r.tokenSource.Token()
if err != nil {
klog.Errorf("Failed to acquire a token: %v", err)
return nil, fmt.Errorf("acquiring a token for authorization header: %v", err)
}
// clone the request in order to avoid modifying the headers of the original request
req2 := new(http.Request)
*req2 = *req
req2.Header = make(http.Header, len(req.Header))
for k, s := range req.Header {
req2.Header[k] = append([]string(nil), s...)
}
req2.Header.Set(authHeader, fmt.Sprintf("%s %s", tokenType, token.token.AccessToken))
return r.roundTripper.RoundTrip(req2)
}
func (r *azureRoundTripper) WrappedRoundTripper() http.RoundTripper { return r.roundTripper }
type azureToken struct {
token adal.Token
environment string
clientID string
tenantID string
apiserverID string
}
type tokenSource interface {
Token() (*azureToken, error)
Refresh(*azureToken) (*azureToken, error)
}
type azureTokenSource struct {
source tokenSource
cache *azureTokenCache
lock sync.Mutex
configMode configMode
cfg map[string]string
persister restclient.AuthProviderConfigPersister
}
func newAzureTokenSource(source tokenSource, cache *azureTokenCache, cfg map[string]string, configMode configMode, persister restclient.AuthProviderConfigPersister) tokenSource {
return &azureTokenSource{
source: source,
cache: cache,
cfg: cfg,
persister: persister,
configMode: configMode,
}
}
// Token fetches a token from the cache of configuration if present otherwise
// acquires a new token from the configured source. Automatically refreshes
// the token if expired.
func (ts *azureTokenSource) Token() (*azureToken, error) {
ts.lock.Lock()
defer ts.lock.Unlock()
var err error
token := ts.cache.getToken(azureTokenKey)
if token != nil && !token.token.IsExpired() {
return token, nil
}
// retrieve from config if no cache
if token == nil {
tokenFromCfg, err := ts.retrieveTokenFromCfg()
if err == nil {
token = tokenFromCfg
}
}
if token != nil {
// cache and return if the token is as good
// avoids frequent persistor calls
if !token.token.IsExpired() {
ts.cache.setToken(azureTokenKey, token)
return token, nil
}
klog.V(4).Info("Refreshing token.")
tokenFromRefresh, err := ts.Refresh(token)
switch {
case err == nil:
token = tokenFromRefresh
case autorest.IsTokenRefreshError(err):
klog.V(4).Infof("Failed to refresh expired token, proceed to auth: %v", err)
// reset token to nil so that the token source will be used to acquire new
token = nil
default:
return nil, fmt.Errorf("unexpected error when refreshing token: %v", err)
}
}
if token == nil {
tokenFromSource, err := ts.source.Token()
if err != nil {
return nil, fmt.Errorf("failed acquiring new token: %v", err)
}
token = tokenFromSource
}
// sanity check
if token == nil {
return nil, fmt.Errorf("unable to acquire token")
}
// corner condition, newly got token is valid but expired
if token.token.IsExpired() {
return nil, fmt.Errorf("newly acquired token is expired")
}
err = ts.storeTokenInCfg(token)
if err != nil {
return nil, fmt.Errorf("storing the refreshed token in configuration: %v", err)
}
ts.cache.setToken(azureTokenKey, token)
return token, nil
}
func (ts *azureTokenSource) retrieveTokenFromCfg() (*azureToken, error) {
accessToken := ts.cfg[cfgAccessToken]
if accessToken == "" {
return nil, fmt.Errorf("no access token in cfg: %s", cfgAccessToken)
}
refreshToken := ts.cfg[cfgRefreshToken]
if refreshToken == "" {
return nil, fmt.Errorf("no refresh token in cfg: %s", cfgRefreshToken)
}
environment := ts.cfg[cfgEnvironment]
if environment == "" {
return nil, fmt.Errorf("no environment in cfg: %s", cfgEnvironment)
}
clientID := ts.cfg[cfgClientID]
if clientID == "" {
return nil, fmt.Errorf("no client ID in cfg: %s", cfgClientID)
}
tenantID := ts.cfg[cfgTenantID]
if tenantID == "" {
return nil, fmt.Errorf("no tenant ID in cfg: %s", cfgTenantID)
}
resourceID := ts.cfg[cfgApiserverID]
if resourceID == "" {
return nil, fmt.Errorf("no apiserver ID in cfg: %s", cfgApiserverID)
}
expiresIn := ts.cfg[cfgExpiresIn]
if expiresIn == "" {
return nil, fmt.Errorf("no expiresIn in cfg: %s", cfgExpiresIn)
}
expiresOn := ts.cfg[cfgExpiresOn]
if expiresOn == "" {
return nil, fmt.Errorf("no expiresOn in cfg: %s", cfgExpiresOn)
}
tokenAudience := resourceID
if ts.configMode == configModeDefault {
tokenAudience = fmt.Sprintf("spn:%s", resourceID)
}
return &azureToken{
token: adal.Token{
AccessToken: accessToken,
RefreshToken: refreshToken,
ExpiresIn: json.Number(expiresIn),
ExpiresOn: json.Number(expiresOn),
NotBefore: json.Number(expiresOn),
Resource: tokenAudience,
Type: tokenType,
},
environment: environment,
clientID: clientID,
tenantID: tenantID,
apiserverID: resourceID,
}, nil
}
func (ts *azureTokenSource) storeTokenInCfg(token *azureToken) error {
newCfg := make(map[string]string)
newCfg[cfgAccessToken] = token.token.AccessToken
newCfg[cfgRefreshToken] = token.token.RefreshToken
newCfg[cfgEnvironment] = token.environment
newCfg[cfgClientID] = token.clientID
newCfg[cfgTenantID] = token.tenantID
newCfg[cfgApiserverID] = token.apiserverID
newCfg[cfgExpiresIn] = string(token.token.ExpiresIn)
newCfg[cfgExpiresOn] = string(token.token.ExpiresOn)
newCfg[cfgConfigMode] = strconv.Itoa(int(ts.configMode))
err := ts.persister.Persist(newCfg)
if err != nil {
return fmt.Errorf("persisting the configuration: %v", err)
}
ts.cfg = newCfg
return nil
}
func (ts *azureTokenSource) Refresh(token *azureToken) (*azureToken, error) {
return ts.source.Refresh(token)
}
// refresh outdated token with adal.
func (ts *azureTokenSourceDeviceCode) Refresh(token *azureToken) (*azureToken, error) {
env, err := azure.EnvironmentFromName(token.environment)
if err != nil {
return nil, err
}
var oauthConfig *adal.OAuthConfig
if ts.configMode == configModeOmitSPNPrefix {
oauthConfig, err = adal.NewOAuthConfigWithAPIVersion(env.ActiveDirectoryEndpoint, token.tenantID, nil)
if err != nil {
return nil, fmt.Errorf("building the OAuth configuration without api-version for token refresh: %v", err)
}
} else {
oauthConfig, err = adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, token.tenantID)
if err != nil {
return nil, fmt.Errorf("building the OAuth configuration for token refresh: %v", err)
}
}
callback := func(t adal.Token) error {
return nil
}
spt, err := adal.NewServicePrincipalTokenFromManualToken(
*oauthConfig,
token.clientID,
token.apiserverID,
token.token,
callback)
if err != nil {
return nil, fmt.Errorf("creating new service principal for token refresh: %v", err)
}
if err := spt.Refresh(); err != nil {
// Caller expects IsTokenRefreshError(err) to trigger prompt.
return nil, fmt.Errorf("refreshing token: %w", err)
}
return &azureToken{
token: spt.Token(),
environment: token.environment,
clientID: token.clientID,
tenantID: token.tenantID,
apiserverID: token.apiserverID,
}, nil
}
type azureTokenSourceDeviceCode struct {
environment azure.Environment
clientID string
tenantID string
apiserverID string
configMode configMode
}
func newAzureTokenSourceDeviceCode(environment azure.Environment, clientID string, tenantID string, apiserverID string, configMode configMode) (tokenSource, error) {
if clientID == "" {
return nil, errors.New("client-id is empty")
}
if tenantID == "" {
return nil, errors.New("tenant-id is empty")
}
if apiserverID == "" {
return nil, errors.New("apiserver-id is empty")
}
return &azureTokenSourceDeviceCode{
environment: environment,
clientID: clientID,
tenantID: tenantID,
apiserverID: apiserverID,
configMode: configMode,
}, nil
}
func (ts *azureTokenSourceDeviceCode) Token() (*azureToken, error) {
var (
oauthConfig *adal.OAuthConfig
err error
)
if ts.configMode == configModeOmitSPNPrefix {
oauthConfig, err = adal.NewOAuthConfigWithAPIVersion(ts.environment.ActiveDirectoryEndpoint, ts.tenantID, nil)
if err != nil {
return nil, fmt.Errorf("building the OAuth configuration without api-version for device code authentication: %v", err)
}
} else {
oauthConfig, err = adal.NewOAuthConfig(ts.environment.ActiveDirectoryEndpoint, ts.tenantID)
if err != nil {
return nil, fmt.Errorf("building the OAuth configuration for device code authentication: %v", err)
}
}
client := &autorest.Client{}
deviceCode, err := adal.InitiateDeviceAuth(client, *oauthConfig, ts.clientID, ts.apiserverID)
if err != nil {
return nil, fmt.Errorf("initialing the device code authentication: %v", err)
}
_, err = fmt.Fprintln(os.Stderr, *deviceCode.Message)
if err != nil {
return nil, fmt.Errorf("prompting the device code message: %v", err)
}
token, err := adal.WaitForUserCompletion(client, deviceCode)
if err != nil {
return nil, fmt.Errorf("waiting for device code authentication to complete: %v", err)
}
return &azureToken{
token: *token,
environment: ts.environment.Name,
clientID: ts.clientID,
tenantID: ts.tenantID,
apiserverID: ts.apiserverID,
}, nil
}

View File

@@ -1,36 +0,0 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"errors"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
)
func init() {
if err := rest.RegisterAuthProviderPlugin("azure", newAzureAuthProvider); err != nil {
klog.Fatalf("Failed to register azure auth plugin: %v", err)
}
}
func newAzureAuthProvider(_ string, _ map[string]string, _ rest.AuthProviderConfigPersister) (rest.AuthProvider, error) {
return nil, errors.New(`The azure auth plugin has been removed.
Please use the https://github.com/Azure/kubelogin kubectl/client-go credential plugin instead.
See https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins for further details`)
}

View File

@@ -0,0 +1,8 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- cjcullen
reviewers:
- cjcullen
emeritus_approvers:
- jlowdermilk

View File

@@ -0,0 +1,389 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gcp
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"os/exec"
"strings"
"sync"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/yaml"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/util/jsonpath"
"k8s.io/klog/v2"
)
func init() {
if err := restclient.RegisterAuthProviderPlugin("gcp", newGCPAuthProvider); err != nil {
klog.Fatalf("Failed to register gcp auth plugin: %v", err)
}
}
var (
// Stubbable for testing
execCommand = exec.Command
// defaultScopes:
// - cloud-platform is the base scope to authenticate to GCP.
// - userinfo.email is used to authenticate to GKE APIs with gserviceaccount
// email instead of numeric uniqueID.
defaultScopes = []string{
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/userinfo.email"}
)
// gcpAuthProvider is an auth provider plugin that uses GCP credentials to provide
// tokens for kubectl to authenticate itself to the apiserver. A sample json config
// is provided below with all recognized options described.
//
// {
// 'auth-provider': {
// # Required
// "name": "gcp",
//
// 'config': {
// # Authentication options
// # These options are used while getting a token.
//
// # comma-separated list of GCP API scopes. default value of this field
// # is "https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/userinfo.email".
// # to override the API scopes, specify this field explicitly.
// "scopes": "https://www.googleapis.com/auth/cloud-platform"
//
// # Caching options
//
// # Raw string data representing cached access token.
// "access-token": "ya29.CjWdA4GiBPTt",
// # RFC3339Nano expiration timestamp for cached access token.
// "expiry": "2016-10-31 22:31:9.123",
//
// # Command execution options
// # These options direct the plugin to execute a specified command and parse
// # token and expiry time from the output of the command.
//
// # Command to execute for access token. Command output will be parsed as JSON.
// # If "cmd-args" is not present, this value will be split on whitespace, with
// # the first element interpreted as the command, remaining elements as args.
// "cmd-path": "/usr/bin/gcloud",
//
// # Arguments to pass to command to execute for access token.
// "cmd-args": "config config-helper --output=json"
//
// # JSONPath to the string field that represents the access token in
// # command output. If omitted, defaults to "{.access_token}".
// "token-key": "{.credential.access_token}",
//
// # JSONPath to the string field that represents expiration timestamp
// # of the access token in the command output. If omitted, defaults to
// # "{.token_expiry}"
// "expiry-key": ""{.credential.token_expiry}",
//
// # golang reference time in the format that the expiration timestamp uses.
// # If omitted, defaults to time.RFC3339Nano
// "time-fmt": "2006-01-02 15:04:05.999999999"
// }
// }
// }
type gcpAuthProvider struct {
tokenSource oauth2.TokenSource
persister restclient.AuthProviderConfigPersister
}
var warnOnce sync.Once
func newGCPAuthProvider(_ string, gcpConfig map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) {
warnOnce.Do(func() {
klog.Warningf(`WARNING: the gcp auth plugin is deprecated in v1.22+, unavailable in v1.26+; use gcloud instead.
To learn more, consult https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke`)
})
ts, err := tokenSource(isCmdTokenSource(gcpConfig), gcpConfig)
if err != nil {
return nil, err
}
cts, err := newCachedTokenSource(gcpConfig["access-token"], gcpConfig["expiry"], persister, ts, gcpConfig)
if err != nil {
return nil, err
}
return &gcpAuthProvider{cts, persister}, nil
}
func isCmdTokenSource(gcpConfig map[string]string) bool {
_, ok := gcpConfig["cmd-path"]
return ok
}
func tokenSource(isCmd bool, gcpConfig map[string]string) (oauth2.TokenSource, error) {
// Command-based token source
if isCmd {
cmd := gcpConfig["cmd-path"]
if len(cmd) == 0 {
return nil, fmt.Errorf("missing access token cmd")
}
if gcpConfig["scopes"] != "" {
return nil, fmt.Errorf("scopes can only be used when kubectl is using a gcp service account key")
}
var args []string
if cmdArgs, ok := gcpConfig["cmd-args"]; ok {
args = strings.Fields(cmdArgs)
} else {
fields := strings.Fields(cmd)
cmd = fields[0]
args = fields[1:]
}
return newCmdTokenSource(cmd, args, gcpConfig["token-key"], gcpConfig["expiry-key"], gcpConfig["time-fmt"]), nil
}
// Google Application Credentials-based token source
scopes := parseScopes(gcpConfig)
ts, err := google.DefaultTokenSource(context.Background(), scopes...)
if err != nil {
return nil, fmt.Errorf("cannot construct google default token source: %v", err)
}
return ts, nil
}
// parseScopes constructs a list of scopes that should be included in token source
// from the config map.
func parseScopes(gcpConfig map[string]string) []string {
scopes, ok := gcpConfig["scopes"]
if !ok {
return defaultScopes
}
if scopes == "" {
return []string{}
}
return strings.Split(gcpConfig["scopes"], ",")
}
func (g *gcpAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper {
var resetCache map[string]string
if cts, ok := g.tokenSource.(*cachedTokenSource); ok {
resetCache = cts.baseCache()
} else {
resetCache = make(map[string]string)
}
return &conditionalTransport{&oauth2.Transport{Source: g.tokenSource, Base: rt}, g.persister, resetCache}
}
func (g *gcpAuthProvider) Login() error { return nil }
type cachedTokenSource struct {
lk sync.Mutex
source oauth2.TokenSource
accessToken string `datapolicy:"token"`
expiry time.Time
persister restclient.AuthProviderConfigPersister
cache map[string]string
}
func newCachedTokenSource(accessToken, expiry string, persister restclient.AuthProviderConfigPersister, ts oauth2.TokenSource, cache map[string]string) (*cachedTokenSource, error) {
var expiryTime time.Time
if parsedTime, err := time.Parse(time.RFC3339Nano, expiry); err == nil {
expiryTime = parsedTime
}
if cache == nil {
cache = make(map[string]string)
}
return &cachedTokenSource{
source: ts,
accessToken: accessToken,
expiry: expiryTime,
persister: persister,
cache: cache,
}, nil
}
func (t *cachedTokenSource) Token() (*oauth2.Token, error) {
tok := t.cachedToken()
if tok.Valid() && !tok.Expiry.IsZero() {
return tok, nil
}
tok, err := t.source.Token()
if err != nil {
return nil, err
}
cache := t.update(tok)
if t.persister != nil {
if err := t.persister.Persist(cache); err != nil {
klog.V(4).Infof("Failed to persist token: %v", err)
}
}
return tok, nil
}
func (t *cachedTokenSource) cachedToken() *oauth2.Token {
t.lk.Lock()
defer t.lk.Unlock()
return &oauth2.Token{
AccessToken: t.accessToken,
TokenType: "Bearer",
Expiry: t.expiry,
}
}
func (t *cachedTokenSource) update(tok *oauth2.Token) map[string]string {
t.lk.Lock()
defer t.lk.Unlock()
t.accessToken = tok.AccessToken
t.expiry = tok.Expiry
ret := map[string]string{}
for k, v := range t.cache {
ret[k] = v
}
ret["access-token"] = t.accessToken
ret["expiry"] = t.expiry.Format(time.RFC3339Nano)
return ret
}
// baseCache is the base configuration value for this TokenSource, without any cached ephemeral tokens.
func (t *cachedTokenSource) baseCache() map[string]string {
t.lk.Lock()
defer t.lk.Unlock()
ret := map[string]string{}
for k, v := range t.cache {
ret[k] = v
}
delete(ret, "access-token")
delete(ret, "expiry")
return ret
}
type commandTokenSource struct {
cmd string
args []string
tokenKey string `datapolicy:"token"`
expiryKey string `datapolicy:"secret-key"`
timeFmt string
}
func newCmdTokenSource(cmd string, args []string, tokenKey, expiryKey, timeFmt string) *commandTokenSource {
if len(timeFmt) == 0 {
timeFmt = time.RFC3339Nano
}
if len(tokenKey) == 0 {
tokenKey = "{.access_token}"
}
if len(expiryKey) == 0 {
expiryKey = "{.token_expiry}"
}
return &commandTokenSource{
cmd: cmd,
args: args,
tokenKey: tokenKey,
expiryKey: expiryKey,
timeFmt: timeFmt,
}
}
func (c *commandTokenSource) Token() (*oauth2.Token, error) {
fullCmd := strings.Join(append([]string{c.cmd}, c.args...), " ")
cmd := execCommand(c.cmd, c.args...)
var stderr bytes.Buffer
cmd.Stderr = &stderr
output, err := cmd.Output()
if err != nil {
return nil, fmt.Errorf("error executing access token command %q: err=%v output=%s stderr=%s", fullCmd, err, output, string(stderr.Bytes()))
}
token, err := c.parseTokenCmdOutput(output)
if err != nil {
return nil, fmt.Errorf("error parsing output for access token command %q: %v", fullCmd, err)
}
return token, nil
}
func (c *commandTokenSource) parseTokenCmdOutput(output []byte) (*oauth2.Token, error) {
output, err := yaml.ToJSON(output)
if err != nil {
return nil, err
}
var data interface{}
if err := json.Unmarshal(output, &data); err != nil {
return nil, err
}
accessToken, err := parseJSONPath(data, "token-key", c.tokenKey)
if err != nil {
return nil, fmt.Errorf("error parsing token-key %q from %q: %v", c.tokenKey, string(output), err)
}
expiryStr, err := parseJSONPath(data, "expiry-key", c.expiryKey)
if err != nil {
return nil, fmt.Errorf("error parsing expiry-key %q from %q: %v", c.expiryKey, string(output), err)
}
var expiry time.Time
if t, err := time.Parse(c.timeFmt, expiryStr); err != nil {
klog.V(4).Infof("Failed to parse token expiry from %s (fmt=%s): %v", expiryStr, c.timeFmt, err)
} else {
expiry = t
}
return &oauth2.Token{
AccessToken: accessToken,
TokenType: "Bearer",
Expiry: expiry,
}, nil
}
func parseJSONPath(input interface{}, name, template string) (string, error) {
j := jsonpath.New(name)
buf := new(bytes.Buffer)
if err := j.Parse(template); err != nil {
return "", err
}
if err := j.Execute(buf, input); err != nil {
return "", err
}
return buf.String(), nil
}
type conditionalTransport struct {
oauthTransport *oauth2.Transport
persister restclient.AuthProviderConfigPersister
resetCache map[string]string
}
var _ net.RoundTripperWrapper = &conditionalTransport{}
func (t *conditionalTransport) RoundTrip(req *http.Request) (*http.Response, error) {
if len(req.Header.Get("Authorization")) != 0 {
return t.oauthTransport.Base.RoundTrip(req)
}
res, err := t.oauthTransport.RoundTrip(req)
if err != nil {
return nil, err
}
if res.StatusCode == 401 {
klog.V(4).Infof("The credentials that were supplied are invalid for the target cluster")
t.persister.Persist(t.resetCache)
}
return res, nil
}
func (t *conditionalTransport) WrappedRoundTripper() http.RoundTripper { return t.oauthTransport.Base }

View File

@@ -1,36 +0,0 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gcp
import (
"errors"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
)
func init() {
if err := rest.RegisterAuthProviderPlugin("gcp", newGCPAuthProvider); err != nil {
klog.Fatalf("Failed to register gcp auth plugin: %v", err)
}
}
func newGCPAuthProvider(_ string, _ map[string]string, _ rest.AuthProviderConfigPersister) (rest.AuthProvider, error) {
return nil, errors.New(`The gcp auth plugin has been removed.
Please use the "gke-gcloud-auth-plugin" kubectl/client-go credential plugin instead.
See https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke for further details`)
}

View File

@@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View File

@@ -0,0 +1,52 @@
//This package is copied from Go library text/template.
//The original private functions indirect and printableValue
//are exported as public functions.
package template
import (
"fmt"
"reflect"
)
var (
errorType = reflect.TypeOf((*error)(nil)).Elem()
fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
)
// Indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
// We indirect through pointers and empty interfaces (only) because
// non-empty interfaces have methods we might need.
func Indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
for ; v.Kind() == reflect.Pointer || v.Kind() == reflect.Interface; v = v.Elem() {
if v.IsNil() {
return v, true
}
if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
break
}
}
return v, false
}
// PrintableValue returns the, possibly indirected, interface value inside v that
// is best for a call to formatted printer.
func PrintableValue(v reflect.Value) (interface{}, bool) {
if v.Kind() == reflect.Pointer {
v, _ = Indirect(v) // fmt.Fprint handles nil.
}
if !v.IsValid() {
return "<no value>", true
}
if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
if v.CanAddr() && (reflect.PointerTo(v.Type()).Implements(errorType) || reflect.PointerTo(v.Type()).Implements(fmtStringerType)) {
v = v.Addr()
} else {
switch v.Kind() {
case reflect.Chan, reflect.Func:
return nil, false
}
}
}
return v.Interface(), true
}

View File

@@ -0,0 +1,177 @@
//This package is copied from Go library text/template.
//The original private functions eq, ge, gt, le, lt, and ne
//are exported as public functions.
package template
import (
"errors"
"reflect"
)
var (
errBadComparisonType = errors.New("invalid type for comparison")
errBadComparison = errors.New("incompatible types for comparison")
errNoComparison = errors.New("missing argument for comparison")
)
type kind int
const (
invalidKind kind = iota
boolKind
complexKind
intKind
floatKind
integerKind
stringKind
uintKind
)
func basicKind(v reflect.Value) (kind, error) {
switch v.Kind() {
case reflect.Bool:
return boolKind, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return intKind, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return uintKind, nil
case reflect.Float32, reflect.Float64:
return floatKind, nil
case reflect.Complex64, reflect.Complex128:
return complexKind, nil
case reflect.String:
return stringKind, nil
}
return invalidKind, errBadComparisonType
}
// Equal evaluates the comparison a == b || a == c || ...
func Equal(arg1 interface{}, arg2 ...interface{}) (bool, error) {
v1 := reflect.ValueOf(arg1)
k1, err := basicKind(v1)
if err != nil {
return false, err
}
if len(arg2) == 0 {
return false, errNoComparison
}
for _, arg := range arg2 {
v2 := reflect.ValueOf(arg)
k2, err := basicKind(v2)
if err != nil {
return false, err
}
truth := false
if k1 != k2 {
// Special case: Can compare integer values regardless of type's sign.
switch {
case k1 == intKind && k2 == uintKind:
truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
case k1 == uintKind && k2 == intKind:
truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
default:
return false, errBadComparison
}
} else {
switch k1 {
case boolKind:
truth = v1.Bool() == v2.Bool()
case complexKind:
truth = v1.Complex() == v2.Complex()
case floatKind:
truth = v1.Float() == v2.Float()
case intKind:
truth = v1.Int() == v2.Int()
case stringKind:
truth = v1.String() == v2.String()
case uintKind:
truth = v1.Uint() == v2.Uint()
default:
panic("invalid kind")
}
}
if truth {
return true, nil
}
}
return false, nil
}
// NotEqual evaluates the comparison a != b.
func NotEqual(arg1, arg2 interface{}) (bool, error) {
// != is the inverse of ==.
equal, err := Equal(arg1, arg2)
return !equal, err
}
// Less evaluates the comparison a < b.
func Less(arg1, arg2 interface{}) (bool, error) {
v1 := reflect.ValueOf(arg1)
k1, err := basicKind(v1)
if err != nil {
return false, err
}
v2 := reflect.ValueOf(arg2)
k2, err := basicKind(v2)
if err != nil {
return false, err
}
truth := false
if k1 != k2 {
// Special case: Can compare integer values regardless of type's sign.
switch {
case k1 == intKind && k2 == uintKind:
truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
case k1 == uintKind && k2 == intKind:
truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
default:
return false, errBadComparison
}
} else {
switch k1 {
case boolKind, complexKind:
return false, errBadComparisonType
case floatKind:
truth = v1.Float() < v2.Float()
case intKind:
truth = v1.Int() < v2.Int()
case stringKind:
truth = v1.String() < v2.String()
case uintKind:
truth = v1.Uint() < v2.Uint()
default:
panic("invalid kind")
}
}
return truth, nil
}
// LessEqual evaluates the comparison <= b.
func LessEqual(arg1, arg2 interface{}) (bool, error) {
// <= is < or ==.
lessThan, err := Less(arg1, arg2)
if lessThan || err != nil {
return lessThan, err
}
return Equal(arg1, arg2)
}
// Greater evaluates the comparison a > b.
func Greater(arg1, arg2 interface{}) (bool, error) {
// > is the inverse of <=.
lessOrEqual, err := LessEqual(arg1, arg2)
if err != nil {
return false, err
}
return !lessOrEqual, nil
}
// GreaterEqual evaluates the comparison a >= b.
func GreaterEqual(arg1, arg2 interface{}) (bool, error) {
// >= is the inverse of <.
lessThan, err := Less(arg1, arg2)
if err != nil {
return false, err
}
return !lessThan, nil
}

20
vendor/k8s.io/client-go/util/jsonpath/doc.go generated vendored Normal file
View File

@@ -0,0 +1,20 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// package jsonpath is a template engine using jsonpath syntax,
// which can be seen at http://goessner.net/articles/JsonPath/.
// In addition, it has {range} {end} function to iterate list and slice.
package jsonpath // import "k8s.io/client-go/util/jsonpath"

579
vendor/k8s.io/client-go/util/jsonpath/jsonpath.go generated vendored Normal file
View File

@@ -0,0 +1,579 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonpath
import (
"bytes"
"encoding/json"
"fmt"
"io"
"reflect"
"strings"
"k8s.io/client-go/third_party/forked/golang/template"
)
type JSONPath struct {
name string
parser *Parser
beginRange int
inRange int
endRange int
lastEndNode *Node
allowMissingKeys bool
outputJSON bool
}
// New creates a new JSONPath with the given name.
func New(name string) *JSONPath {
return &JSONPath{
name: name,
beginRange: 0,
inRange: 0,
endRange: 0,
}
}
// AllowMissingKeys allows a caller to specify whether they want an error if a field or map key
// cannot be located, or simply an empty result. The receiver is returned for chaining.
func (j *JSONPath) AllowMissingKeys(allow bool) *JSONPath {
j.allowMissingKeys = allow
return j
}
// Parse parses the given template and returns an error.
func (j *JSONPath) Parse(text string) error {
var err error
j.parser, err = Parse(j.name, text)
return err
}
// Execute bounds data into template and writes the result.
func (j *JSONPath) Execute(wr io.Writer, data interface{}) error {
fullResults, err := j.FindResults(data)
if err != nil {
return err
}
for ix := range fullResults {
if err := j.PrintResults(wr, fullResults[ix]); err != nil {
return err
}
}
return nil
}
func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) {
if j.parser == nil {
return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name)
}
cur := []reflect.Value{reflect.ValueOf(data)}
nodes := j.parser.Root.Nodes
fullResult := [][]reflect.Value{}
for i := 0; i < len(nodes); i++ {
node := nodes[i]
results, err := j.walk(cur, node)
if err != nil {
return nil, err
}
// encounter an end node, break the current block
if j.endRange > 0 && j.endRange <= j.inRange {
j.endRange--
j.lastEndNode = &nodes[i]
break
}
// encounter a range node, start a range loop
if j.beginRange > 0 {
j.beginRange--
j.inRange++
if len(results) > 0 {
for _, value := range results {
j.parser.Root.Nodes = nodes[i+1:]
nextResults, err := j.FindResults(value.Interface())
if err != nil {
return nil, err
}
fullResult = append(fullResult, nextResults...)
}
} else {
// If the range has no results, we still need to process the nodes within the range
// so the position will advance to the end node
j.parser.Root.Nodes = nodes[i+1:]
_, err := j.FindResults(nil)
if err != nil {
return nil, err
}
}
j.inRange--
// Fast forward to resume processing after the most recent end node that was encountered
for k := i + 1; k < len(nodes); k++ {
if &nodes[k] == j.lastEndNode {
i = k
break
}
}
continue
}
fullResult = append(fullResult, results)
}
return fullResult, nil
}
// EnableJSONOutput changes the PrintResults behavior to return a JSON array of results
func (j *JSONPath) EnableJSONOutput(v bool) {
j.outputJSON = v
}
// PrintResults writes the results into writer
func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error {
if j.outputJSON {
// convert the []reflect.Value to something that json
// will be able to marshal
r := make([]interface{}, 0, len(results))
for i := range results {
r = append(r, results[i].Interface())
}
results = []reflect.Value{reflect.ValueOf(r)}
}
for i, r := range results {
var text []byte
var err error
outputJSON := true
kind := r.Kind()
if kind == reflect.Interface {
kind = r.Elem().Kind()
}
switch kind {
case reflect.Map:
case reflect.Array:
case reflect.Slice:
case reflect.Struct:
default:
outputJSON = false
}
switch {
case outputJSON || j.outputJSON:
if j.outputJSON {
text, err = json.MarshalIndent(r.Interface(), "", " ")
text = append(text, '\n')
} else {
text, err = json.Marshal(r.Interface())
}
default:
text, err = j.evalToText(r)
}
if err != nil {
return err
}
if i != len(results)-1 {
text = append(text, ' ')
}
if _, err = wr.Write(text); err != nil {
return err
}
}
return nil
}
// walk visits tree rooted at the given node in DFS order
func (j *JSONPath) walk(value []reflect.Value, node Node) ([]reflect.Value, error) {
switch node := node.(type) {
case *ListNode:
return j.evalList(value, node)
case *TextNode:
return []reflect.Value{reflect.ValueOf(node.Text)}, nil
case *FieldNode:
return j.evalField(value, node)
case *ArrayNode:
return j.evalArray(value, node)
case *FilterNode:
return j.evalFilter(value, node)
case *IntNode:
return j.evalInt(value, node)
case *BoolNode:
return j.evalBool(value, node)
case *FloatNode:
return j.evalFloat(value, node)
case *WildcardNode:
return j.evalWildcard(value, node)
case *RecursiveNode:
return j.evalRecursive(value, node)
case *UnionNode:
return j.evalUnion(value, node)
case *IdentifierNode:
return j.evalIdentifier(value, node)
default:
return value, fmt.Errorf("unexpected Node %v", node)
}
}
// evalInt evaluates IntNode
func (j *JSONPath) evalInt(input []reflect.Value, node *IntNode) ([]reflect.Value, error) {
result := make([]reflect.Value, len(input))
for i := range input {
result[i] = reflect.ValueOf(node.Value)
}
return result, nil
}
// evalFloat evaluates FloatNode
func (j *JSONPath) evalFloat(input []reflect.Value, node *FloatNode) ([]reflect.Value, error) {
result := make([]reflect.Value, len(input))
for i := range input {
result[i] = reflect.ValueOf(node.Value)
}
return result, nil
}
// evalBool evaluates BoolNode
func (j *JSONPath) evalBool(input []reflect.Value, node *BoolNode) ([]reflect.Value, error) {
result := make([]reflect.Value, len(input))
for i := range input {
result[i] = reflect.ValueOf(node.Value)
}
return result, nil
}
// evalList evaluates ListNode
func (j *JSONPath) evalList(value []reflect.Value, node *ListNode) ([]reflect.Value, error) {
var err error
curValue := value
for _, node := range node.Nodes {
curValue, err = j.walk(curValue, node)
if err != nil {
return curValue, err
}
}
return curValue, nil
}
// evalIdentifier evaluates IdentifierNode
func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ([]reflect.Value, error) {
results := []reflect.Value{}
switch node.Name {
case "range":
j.beginRange++
results = input
case "end":
if j.inRange > 0 {
j.endRange++
} else {
return results, fmt.Errorf("not in range, nothing to end")
}
default:
return input, fmt.Errorf("unrecognized identifier %v", node.Name)
}
return results, nil
}
// evalArray evaluates ArrayNode
func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect.Value, error) {
result := []reflect.Value{}
for _, value := range input {
value, isNil := template.Indirect(value)
if isNil {
continue
}
if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
return input, fmt.Errorf("%v is not array or slice", value.Type())
}
params := node.Params
if !params[0].Known {
params[0].Value = 0
}
if params[0].Value < 0 {
params[0].Value += value.Len()
}
if !params[1].Known {
params[1].Value = value.Len()
}
if params[1].Value < 0 || (params[1].Value == 0 && params[1].Derived) {
params[1].Value += value.Len()
}
sliceLength := value.Len()
if params[1].Value != params[0].Value { // if you're requesting zero elements, allow it through.
if params[0].Value >= sliceLength || params[0].Value < 0 {
return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[0].Value, sliceLength)
}
if params[1].Value > sliceLength || params[1].Value < 0 {
return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[1].Value-1, sliceLength)
}
if params[0].Value > params[1].Value {
return input, fmt.Errorf("starting index %d is greater than ending index %d", params[0].Value, params[1].Value)
}
} else {
return result, nil
}
value = value.Slice(params[0].Value, params[1].Value)
step := 1
if params[2].Known {
if params[2].Value <= 0 {
return input, fmt.Errorf("step must be > 0")
}
step = params[2].Value
}
for i := 0; i < value.Len(); i += step {
result = append(result, value.Index(i))
}
}
return result, nil
}
// evalUnion evaluates UnionNode
func (j *JSONPath) evalUnion(input []reflect.Value, node *UnionNode) ([]reflect.Value, error) {
result := []reflect.Value{}
for _, listNode := range node.Nodes {
temp, err := j.evalList(input, listNode)
if err != nil {
return input, err
}
result = append(result, temp...)
}
return result, nil
}
func (j *JSONPath) findFieldInValue(value *reflect.Value, node *FieldNode) (reflect.Value, error) {
t := value.Type()
var inlineValue *reflect.Value
for ix := 0; ix < t.NumField(); ix++ {
f := t.Field(ix)
jsonTag := f.Tag.Get("json")
parts := strings.Split(jsonTag, ",")
if len(parts) == 0 {
continue
}
if parts[0] == node.Value {
return value.Field(ix), nil
}
if len(parts[0]) == 0 {
val := value.Field(ix)
inlineValue = &val
}
}
if inlineValue != nil {
if inlineValue.Kind() == reflect.Struct {
// handle 'inline'
match, err := j.findFieldInValue(inlineValue, node)
if err != nil {
return reflect.Value{}, err
}
if match.IsValid() {
return match, nil
}
}
}
return value.FieldByName(node.Value), nil
}
// evalField evaluates field of struct or key of map.
func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect.Value, error) {
results := []reflect.Value{}
// If there's no input, there's no output
if len(input) == 0 {
return results, nil
}
for _, value := range input {
var result reflect.Value
value, isNil := template.Indirect(value)
if isNil {
continue
}
if value.Kind() == reflect.Struct {
var err error
if result, err = j.findFieldInValue(&value, node); err != nil {
return nil, err
}
} else if value.Kind() == reflect.Map {
mapKeyType := value.Type().Key()
nodeValue := reflect.ValueOf(node.Value)
// node value type must be convertible to map key type
if !nodeValue.Type().ConvertibleTo(mapKeyType) {
return results, fmt.Errorf("%s is not convertible to %s", nodeValue, mapKeyType)
}
result = value.MapIndex(nodeValue.Convert(mapKeyType))
}
if result.IsValid() {
results = append(results, result)
}
}
if len(results) == 0 {
if j.allowMissingKeys {
return results, nil
}
return results, fmt.Errorf("%s is not found", node.Value)
}
return results, nil
}
// evalWildcard extracts all contents of the given value
func (j *JSONPath) evalWildcard(input []reflect.Value, node *WildcardNode) ([]reflect.Value, error) {
results := []reflect.Value{}
for _, value := range input {
value, isNil := template.Indirect(value)
if isNil {
continue
}
kind := value.Kind()
if kind == reflect.Struct {
for i := 0; i < value.NumField(); i++ {
results = append(results, value.Field(i))
}
} else if kind == reflect.Map {
for _, key := range value.MapKeys() {
results = append(results, value.MapIndex(key))
}
} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
for i := 0; i < value.Len(); i++ {
results = append(results, value.Index(i))
}
}
}
return results, nil
}
// evalRecursive visits the given value recursively and pushes all of them to result
func (j *JSONPath) evalRecursive(input []reflect.Value, node *RecursiveNode) ([]reflect.Value, error) {
result := []reflect.Value{}
for _, value := range input {
results := []reflect.Value{}
value, isNil := template.Indirect(value)
if isNil {
continue
}
kind := value.Kind()
if kind == reflect.Struct {
for i := 0; i < value.NumField(); i++ {
results = append(results, value.Field(i))
}
} else if kind == reflect.Map {
for _, key := range value.MapKeys() {
results = append(results, value.MapIndex(key))
}
} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
for i := 0; i < value.Len(); i++ {
results = append(results, value.Index(i))
}
}
if len(results) != 0 {
result = append(result, value)
output, err := j.evalRecursive(results, node)
if err != nil {
return result, err
}
result = append(result, output...)
}
}
return result, nil
}
// evalFilter filters array according to FilterNode
func (j *JSONPath) evalFilter(input []reflect.Value, node *FilterNode) ([]reflect.Value, error) {
results := []reflect.Value{}
for _, value := range input {
value, _ = template.Indirect(value)
if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
return input, fmt.Errorf("%v is not array or slice and cannot be filtered", value)
}
for i := 0; i < value.Len(); i++ {
temp := []reflect.Value{value.Index(i)}
lefts, err := j.evalList(temp, node.Left)
//case exists
if node.Operator == "exists" {
if len(lefts) > 0 {
results = append(results, value.Index(i))
}
continue
}
if err != nil {
return input, err
}
var left, right interface{}
switch {
case len(lefts) == 0:
continue
case len(lefts) > 1:
return input, fmt.Errorf("can only compare one element at a time")
}
left = lefts[0].Interface()
rights, err := j.evalList(temp, node.Right)
if err != nil {
return input, err
}
switch {
case len(rights) == 0:
continue
case len(rights) > 1:
return input, fmt.Errorf("can only compare one element at a time")
}
right = rights[0].Interface()
pass := false
switch node.Operator {
case "<":
pass, err = template.Less(left, right)
case ">":
pass, err = template.Greater(left, right)
case "==":
pass, err = template.Equal(left, right)
case "!=":
pass, err = template.NotEqual(left, right)
case "<=":
pass, err = template.LessEqual(left, right)
case ">=":
pass, err = template.GreaterEqual(left, right)
default:
return results, fmt.Errorf("unrecognized filter operator %s", node.Operator)
}
if err != nil {
return results, err
}
if pass {
results = append(results, value.Index(i))
}
}
}
return results, nil
}
// evalToText translates reflect value to corresponding text
func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) {
iface, ok := template.PrintableValue(v)
if !ok {
return nil, fmt.Errorf("can't print type %s", v.Type())
}
var buffer bytes.Buffer
fmt.Fprint(&buffer, iface)
return buffer.Bytes(), nil
}

256
vendor/k8s.io/client-go/util/jsonpath/node.go generated vendored Normal file
View File

@@ -0,0 +1,256 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonpath
import "fmt"
// NodeType identifies the type of a parse tree node.
type NodeType int
// Type returns itself and provides an easy default implementation
func (t NodeType) Type() NodeType {
return t
}
func (t NodeType) String() string {
return NodeTypeName[t]
}
const (
NodeText NodeType = iota
NodeArray
NodeList
NodeField
NodeIdentifier
NodeFilter
NodeInt
NodeFloat
NodeWildcard
NodeRecursive
NodeUnion
NodeBool
)
var NodeTypeName = map[NodeType]string{
NodeText: "NodeText",
NodeArray: "NodeArray",
NodeList: "NodeList",
NodeField: "NodeField",
NodeIdentifier: "NodeIdentifier",
NodeFilter: "NodeFilter",
NodeInt: "NodeInt",
NodeFloat: "NodeFloat",
NodeWildcard: "NodeWildcard",
NodeRecursive: "NodeRecursive",
NodeUnion: "NodeUnion",
NodeBool: "NodeBool",
}
type Node interface {
Type() NodeType
String() string
}
// ListNode holds a sequence of nodes.
type ListNode struct {
NodeType
Nodes []Node // The element nodes in lexical order.
}
func newList() *ListNode {
return &ListNode{NodeType: NodeList}
}
func (l *ListNode) append(n Node) {
l.Nodes = append(l.Nodes, n)
}
func (l *ListNode) String() string {
return l.Type().String()
}
// TextNode holds plain text.
type TextNode struct {
NodeType
Text string // The text; may span newlines.
}
func newText(text string) *TextNode {
return &TextNode{NodeType: NodeText, Text: text}
}
func (t *TextNode) String() string {
return fmt.Sprintf("%s: %s", t.Type(), t.Text)
}
// FieldNode holds field of struct
type FieldNode struct {
NodeType
Value string
}
func newField(value string) *FieldNode {
return &FieldNode{NodeType: NodeField, Value: value}
}
func (f *FieldNode) String() string {
return fmt.Sprintf("%s: %s", f.Type(), f.Value)
}
// IdentifierNode holds an identifier
type IdentifierNode struct {
NodeType
Name string
}
func newIdentifier(value string) *IdentifierNode {
return &IdentifierNode{
NodeType: NodeIdentifier,
Name: value,
}
}
func (f *IdentifierNode) String() string {
return fmt.Sprintf("%s: %s", f.Type(), f.Name)
}
// ParamsEntry holds param information for ArrayNode
type ParamsEntry struct {
Value int
Known bool // whether the value is known when parse it
Derived bool
}
// ArrayNode holds start, end, step information for array index selection
type ArrayNode struct {
NodeType
Params [3]ParamsEntry // start, end, step
}
func newArray(params [3]ParamsEntry) *ArrayNode {
return &ArrayNode{
NodeType: NodeArray,
Params: params,
}
}
func (a *ArrayNode) String() string {
return fmt.Sprintf("%s: %v", a.Type(), a.Params)
}
// FilterNode holds operand and operator information for filter
type FilterNode struct {
NodeType
Left *ListNode
Right *ListNode
Operator string
}
func newFilter(left, right *ListNode, operator string) *FilterNode {
return &FilterNode{
NodeType: NodeFilter,
Left: left,
Right: right,
Operator: operator,
}
}
func (f *FilterNode) String() string {
return fmt.Sprintf("%s: %s %s %s", f.Type(), f.Left, f.Operator, f.Right)
}
// IntNode holds integer value
type IntNode struct {
NodeType
Value int
}
func newInt(num int) *IntNode {
return &IntNode{NodeType: NodeInt, Value: num}
}
func (i *IntNode) String() string {
return fmt.Sprintf("%s: %d", i.Type(), i.Value)
}
// FloatNode holds float value
type FloatNode struct {
NodeType
Value float64
}
func newFloat(num float64) *FloatNode {
return &FloatNode{NodeType: NodeFloat, Value: num}
}
func (i *FloatNode) String() string {
return fmt.Sprintf("%s: %f", i.Type(), i.Value)
}
// WildcardNode means a wildcard
type WildcardNode struct {
NodeType
}
func newWildcard() *WildcardNode {
return &WildcardNode{NodeType: NodeWildcard}
}
func (i *WildcardNode) String() string {
return i.Type().String()
}
// RecursiveNode means a recursive descent operator
type RecursiveNode struct {
NodeType
}
func newRecursive() *RecursiveNode {
return &RecursiveNode{NodeType: NodeRecursive}
}
func (r *RecursiveNode) String() string {
return r.Type().String()
}
// UnionNode is union of ListNode
type UnionNode struct {
NodeType
Nodes []*ListNode
}
func newUnion(nodes []*ListNode) *UnionNode {
return &UnionNode{NodeType: NodeUnion, Nodes: nodes}
}
func (u *UnionNode) String() string {
return u.Type().String()
}
// BoolNode holds bool value
type BoolNode struct {
NodeType
Value bool
}
func newBool(value bool) *BoolNode {
return &BoolNode{NodeType: NodeBool, Value: value}
}
func (b *BoolNode) String() string {
return fmt.Sprintf("%s: %t", b.Type(), b.Value)
}

527
vendor/k8s.io/client-go/util/jsonpath/parser.go generated vendored Normal file
View File

@@ -0,0 +1,527 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonpath
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
const eof = -1
const (
leftDelim = "{"
rightDelim = "}"
)
type Parser struct {
Name string
Root *ListNode
input string
pos int
start int
width int
}
var (
ErrSyntax = errors.New("invalid syntax")
dictKeyRex = regexp.MustCompile(`^'([^']*)'$`)
sliceOperatorRex = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:-?[\d]*)?$`)
)
// Parse parsed the given text and return a node Parser.
// If an error is encountered, parsing stops and an empty
// Parser is returned with the error
func Parse(name, text string) (*Parser, error) {
p := NewParser(name)
err := p.Parse(text)
if err != nil {
p = nil
}
return p, err
}
func NewParser(name string) *Parser {
return &Parser{
Name: name,
}
}
// parseAction parsed the expression inside delimiter
func parseAction(name, text string) (*Parser, error) {
p, err := Parse(name, fmt.Sprintf("%s%s%s", leftDelim, text, rightDelim))
// when error happens, p will be nil, so we need to return here
if err != nil {
return p, err
}
p.Root = p.Root.Nodes[0].(*ListNode)
return p, nil
}
func (p *Parser) Parse(text string) error {
p.input = text
p.Root = newList()
p.pos = 0
return p.parseText(p.Root)
}
// consumeText return the parsed text since last cosumeText
func (p *Parser) consumeText() string {
value := p.input[p.start:p.pos]
p.start = p.pos
return value
}
// next returns the next rune in the input.
func (p *Parser) next() rune {
if p.pos >= len(p.input) {
p.width = 0
return eof
}
r, w := utf8.DecodeRuneInString(p.input[p.pos:])
p.width = w
p.pos += p.width
return r
}
// peek returns but does not consume the next rune in the input.
func (p *Parser) peek() rune {
r := p.next()
p.backup()
return r
}
// backup steps back one rune. Can only be called once per call of next.
func (p *Parser) backup() {
p.pos -= p.width
}
func (p *Parser) parseText(cur *ListNode) error {
for {
if strings.HasPrefix(p.input[p.pos:], leftDelim) {
if p.pos > p.start {
cur.append(newText(p.consumeText()))
}
return p.parseLeftDelim(cur)
}
if p.next() == eof {
break
}
}
// Correctly reached EOF.
if p.pos > p.start {
cur.append(newText(p.consumeText()))
}
return nil
}
// parseLeftDelim scans the left delimiter, which is known to be present.
func (p *Parser) parseLeftDelim(cur *ListNode) error {
p.pos += len(leftDelim)
p.consumeText()
newNode := newList()
cur.append(newNode)
cur = newNode
return p.parseInsideAction(cur)
}
func (p *Parser) parseInsideAction(cur *ListNode) error {
prefixMap := map[string]func(*ListNode) error{
rightDelim: p.parseRightDelim,
"[?(": p.parseFilter,
"..": p.parseRecursive,
}
for prefix, parseFunc := range prefixMap {
if strings.HasPrefix(p.input[p.pos:], prefix) {
return parseFunc(cur)
}
}
switch r := p.next(); {
case r == eof || isEndOfLine(r):
return fmt.Errorf("unclosed action")
case r == ' ':
p.consumeText()
case r == '@' || r == '$': //the current object, just pass it
p.consumeText()
case r == '[':
return p.parseArray(cur)
case r == '"' || r == '\'':
return p.parseQuote(cur, r)
case r == '.':
return p.parseField(cur)
case r == '+' || r == '-' || unicode.IsDigit(r):
p.backup()
return p.parseNumber(cur)
case isAlphaNumeric(r):
p.backup()
return p.parseIdentifier(cur)
default:
return fmt.Errorf("unrecognized character in action: %#U", r)
}
return p.parseInsideAction(cur)
}
// parseRightDelim scans the right delimiter, which is known to be present.
func (p *Parser) parseRightDelim(cur *ListNode) error {
p.pos += len(rightDelim)
p.consumeText()
return p.parseText(p.Root)
}
// parseIdentifier scans build-in keywords, like "range" "end"
func (p *Parser) parseIdentifier(cur *ListNode) error {
var r rune
for {
r = p.next()
if isTerminator(r) {
p.backup()
break
}
}
value := p.consumeText()
if isBool(value) {
v, err := strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("can not parse bool '%s': %s", value, err.Error())
}
cur.append(newBool(v))
} else {
cur.append(newIdentifier(value))
}
return p.parseInsideAction(cur)
}
// parseRecursive scans the recursive descent operator ..
func (p *Parser) parseRecursive(cur *ListNode) error {
if lastIndex := len(cur.Nodes) - 1; lastIndex >= 0 && cur.Nodes[lastIndex].Type() == NodeRecursive {
return fmt.Errorf("invalid multiple recursive descent")
}
p.pos += len("..")
p.consumeText()
cur.append(newRecursive())
if r := p.peek(); isAlphaNumeric(r) {
return p.parseField(cur)
}
return p.parseInsideAction(cur)
}
// parseNumber scans number
func (p *Parser) parseNumber(cur *ListNode) error {
r := p.peek()
if r == '+' || r == '-' {
p.next()
}
for {
r = p.next()
if r != '.' && !unicode.IsDigit(r) {
p.backup()
break
}
}
value := p.consumeText()
i, err := strconv.Atoi(value)
if err == nil {
cur.append(newInt(i))
return p.parseInsideAction(cur)
}
d, err := strconv.ParseFloat(value, 64)
if err == nil {
cur.append(newFloat(d))
return p.parseInsideAction(cur)
}
return fmt.Errorf("cannot parse number %s", value)
}
// parseArray scans array index selection
func (p *Parser) parseArray(cur *ListNode) error {
Loop:
for {
switch p.next() {
case eof, '\n':
return fmt.Errorf("unterminated array")
case ']':
break Loop
}
}
text := p.consumeText()
text = text[1 : len(text)-1]
if text == "*" {
text = ":"
}
//union operator
strs := strings.Split(text, ",")
if len(strs) > 1 {
union := []*ListNode{}
for _, str := range strs {
parser, err := parseAction("union", fmt.Sprintf("[%s]", strings.Trim(str, " ")))
if err != nil {
return err
}
union = append(union, parser.Root)
}
cur.append(newUnion(union))
return p.parseInsideAction(cur)
}
// dict key
value := dictKeyRex.FindStringSubmatch(text)
if value != nil {
parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1]))
if err != nil {
return err
}
for _, node := range parser.Root.Nodes {
cur.append(node)
}
return p.parseInsideAction(cur)
}
//slice operator
value = sliceOperatorRex.FindStringSubmatch(text)
if value == nil {
return fmt.Errorf("invalid array index %s", text)
}
value = value[1:]
params := [3]ParamsEntry{}
for i := 0; i < 3; i++ {
if value[i] != "" {
if i > 0 {
value[i] = value[i][1:]
}
if i > 0 && value[i] == "" {
params[i].Known = false
} else {
var err error
params[i].Known = true
params[i].Value, err = strconv.Atoi(value[i])
if err != nil {
return fmt.Errorf("array index %s is not a number", value[i])
}
}
} else {
if i == 1 {
params[i].Known = true
params[i].Value = params[0].Value + 1
params[i].Derived = true
} else {
params[i].Known = false
params[i].Value = 0
}
}
}
cur.append(newArray(params))
return p.parseInsideAction(cur)
}
// parseFilter scans filter inside array selection
func (p *Parser) parseFilter(cur *ListNode) error {
p.pos += len("[?(")
p.consumeText()
begin := false
end := false
var pair rune
Loop:
for {
r := p.next()
switch r {
case eof, '\n':
return fmt.Errorf("unterminated filter")
case '"', '\'':
if begin == false {
//save the paired rune
begin = true
pair = r
continue
}
//only add when met paired rune
if p.input[p.pos-2] != '\\' && r == pair {
end = true
}
case ')':
//in rightParser below quotes only appear zero or once
//and must be paired at the beginning and end
if begin == end {
break Loop
}
}
}
if p.next() != ']' {
return fmt.Errorf("unclosed array expect ]")
}
reg := regexp.MustCompile(`^([^!<>=]+)([!<>=]+)(.+?)$`)
text := p.consumeText()
text = text[:len(text)-2]
value := reg.FindStringSubmatch(text)
if value == nil {
parser, err := parseAction("text", text)
if err != nil {
return err
}
cur.append(newFilter(parser.Root, newList(), "exists"))
} else {
leftParser, err := parseAction("left", value[1])
if err != nil {
return err
}
rightParser, err := parseAction("right", value[3])
if err != nil {
return err
}
cur.append(newFilter(leftParser.Root, rightParser.Root, value[2]))
}
return p.parseInsideAction(cur)
}
// parseQuote unquotes string inside double or single quote
func (p *Parser) parseQuote(cur *ListNode, end rune) error {
Loop:
for {
switch p.next() {
case eof, '\n':
return fmt.Errorf("unterminated quoted string")
case end:
//if it's not escape break the Loop
if p.input[p.pos-2] != '\\' {
break Loop
}
}
}
value := p.consumeText()
s, err := UnquoteExtend(value)
if err != nil {
return fmt.Errorf("unquote string %s error %v", value, err)
}
cur.append(newText(s))
return p.parseInsideAction(cur)
}
// parseField scans a field until a terminator
func (p *Parser) parseField(cur *ListNode) error {
p.consumeText()
for p.advance() {
}
value := p.consumeText()
if value == "*" {
cur.append(newWildcard())
} else {
cur.append(newField(strings.Replace(value, "\\", "", -1)))
}
return p.parseInsideAction(cur)
}
// advance scans until next non-escaped terminator
func (p *Parser) advance() bool {
r := p.next()
if r == '\\' {
p.next()
} else if isTerminator(r) {
p.backup()
return false
}
return true
}
// isTerminator reports whether the input is at valid termination character to appear after an identifier.
func isTerminator(r rune) bool {
if isSpace(r) || isEndOfLine(r) {
return true
}
switch r {
case eof, '.', ',', '[', ']', '$', '@', '{', '}':
return true
}
return false
}
// isSpace reports whether r is a space character.
func isSpace(r rune) bool {
return r == ' ' || r == '\t'
}
// isEndOfLine reports whether r is an end-of-line character.
func isEndOfLine(r rune) bool {
return r == '\r' || r == '\n'
}
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
func isAlphaNumeric(r rune) bool {
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
}
// isBool reports whether s is a boolean value.
func isBool(s string) bool {
return s == "true" || s == "false"
}
// UnquoteExtend is almost same as strconv.Unquote(), but it support parse single quotes as a string
func UnquoteExtend(s string) (string, error) {
n := len(s)
if n < 2 {
return "", ErrSyntax
}
quote := s[0]
if quote != s[n-1] {
return "", ErrSyntax
}
s = s[1 : n-1]
if quote != '"' && quote != '\'' {
return "", ErrSyntax
}
// Is it trivial? Avoid allocation.
if !contains(s, '\\') && !contains(s, quote) {
return s, nil
}
var runeTmp [utf8.UTFMax]byte
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
for len(s) > 0 {
c, multibyte, ss, err := strconv.UnquoteChar(s, quote)
if err != nil {
return "", err
}
s = ss
if c < utf8.RuneSelf || !multibyte {
buf = append(buf, byte(c))
} else {
n := utf8.EncodeRune(runeTmp[:], c)
buf = append(buf, runeTmp[:n]...)
}
}
return string(buf), nil
}
func contains(s string, c byte) bool {
for i := 0; i < len(s); i++ {
if s[i] == c {
return true
}
}
return false
}

View File

@@ -17,14 +17,14 @@ limitations under the License.
package qos
import (
corev1 "k8s.io/api/core/v1"
core "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
)
var supportedQoSComputeResources = sets.NewString(string(corev1.ResourceCPU), string(corev1.ResourceMemory))
var supportedQoSComputeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory))
func isSupportedQoSComputeResource(name corev1.ResourceName) bool {
func isSupportedQoSComputeResource(name core.ResourceName) bool {
return supportedQoSComputeResources.Has(string(name))
}
@@ -32,12 +32,15 @@ func isSupportedQoSComputeResource(name corev1.ResourceName) bool {
// A pod is besteffort if none of its containers have specified any requests or limits.
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
// A pod is burstable if limits and requests do not match across all containers.
func GetPodQOS(pod *corev1.Pod) corev1.PodQOSClass {
requests := corev1.ResourceList{}
limits := corev1.ResourceList{}
func GetPodQOS(pod *core.Pod) core.PodQOSClass {
requests := core.ResourceList{}
limits := core.ResourceList{}
zeroQuantity := resource.MustParse("0")
isGuaranteed := true
for _, container := range pod.Spec.Containers {
allContainers := []core.Container{}
allContainers = append(allContainers, pod.Spec.Containers...)
allContainers = append(allContainers, pod.Spec.InitContainers...)
for _, container := range allContainers {
// process requests
for name, quantity := range container.Resources.Requests {
if !isSupportedQoSComputeResource(name) {
@@ -71,12 +74,12 @@ func GetPodQOS(pod *corev1.Pod) corev1.PodQOSClass {
}
}
if !qosLimitsFound.HasAll(string(corev1.ResourceMemory), string(corev1.ResourceCPU)) {
if !qosLimitsFound.HasAll(string(core.ResourceMemory), string(core.ResourceCPU)) {
isGuaranteed = false
}
}
if len(requests) == 0 && len(limits) == 0 {
return corev1.PodQOSBestEffort
return core.PodQOSBestEffort
}
// Check is requests match limits for all resources.
if isGuaranteed {
@@ -89,7 +92,7 @@ func GetPodQOS(pod *corev1.Pod) corev1.PodQOSClass {
}
if isGuaranteed &&
len(requests) == len(limits) {
return corev1.PodQOSGuaranteed
return core.PodQOSGuaranteed
}
return corev1.PodQOSBurstable
return core.PodQOSBurstable
}

View File

@@ -103,6 +103,62 @@ func Int32Equal(a, b *int32) bool {
return *a == *b
}
// Uint returns a pointer to an uint
func Uint(i uint) *uint {
return &i
}
// UintPtr is a function variable referring to Uint.
// Deprecated: Use Uint instead.
var UintPtr = Uint // for back-compat
// UintDeref dereferences the uint ptr and returns it if not nil, or else
// returns def.
func UintDeref(ptr *uint, def uint) uint {
if ptr != nil {
return *ptr
}
return def
}
// UintPtrDerefOr is a function variable referring to UintDeref.
// Deprecated: Use UintDeref instead.
var UintPtrDerefOr = UintDeref // for back-compat
// Uint32 returns a pointer to an uint32.
func Uint32(i uint32) *uint32 {
return &i
}
// Uint32Ptr is a function variable referring to Uint32.
// Deprecated: Use Uint32 instead.
var Uint32Ptr = Uint32 // for back-compat
// Uint32Deref dereferences the uint32 ptr and returns it if not nil, or else
// returns def.
func Uint32Deref(ptr *uint32, def uint32) uint32 {
if ptr != nil {
return *ptr
}
return def
}
// Uint32PtrDerefOr is a function variable referring to Uint32Deref.
// Deprecated: Use Uint32Deref instead.
var Uint32PtrDerefOr = Uint32Deref // for back-compat
// Uint32Equal returns true if both arguments are nil or both arguments
// dereference to the same value.
func Uint32Equal(a, b *uint32) bool {
if (a == nil) != (b == nil) {
return false
}
if a == nil {
return true
}
return *a == *b
}
// Int64 returns a pointer to an int64.
func Int64(i int64) *int64 {
return &i
@@ -137,6 +193,40 @@ func Int64Equal(a, b *int64) bool {
return *a == *b
}
// Uint64 returns a pointer to an uint64.
func Uint64(i uint64) *uint64 {
return &i
}
// Uint64Ptr is a function variable referring to Uint64.
// Deprecated: Use Uint64 instead.
var Uint64Ptr = Uint64 // for back-compat
// Uint64Deref dereferences the uint64 ptr and returns it if not nil, or else
// returns def.
func Uint64Deref(ptr *uint64, def uint64) uint64 {
if ptr != nil {
return *ptr
}
return def
}
// Uint64PtrDerefOr is a function variable referring to Uint64Deref.
// Deprecated: Use Uint64Deref instead.
var Uint64PtrDerefOr = Uint64Deref // for back-compat
// Uint64Equal returns true if both arguments are nil or both arguments
// dereference to the same value.
func Uint64Equal(a, b *uint64) bool {
if (a == nil) != (b == nil) {
return false
}
if a == nil {
return true
}
return *a == *b
}
// Bool returns a pointer to a bool.
func Bool(b bool) *bool {
return &b