Add vendor dependencies as part git repo

This commit is contained in:
Sam Stoelinga
2022-09-29 16:34:47 -07:00
parent a3b77e3e71
commit b624df4c39
3233 changed files with 1071657 additions and 0 deletions

8
vendor/k8s.io/client-go/util/cert/OWNERS generated vendored Normal file
View File

@@ -0,0 +1,8 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sig-auth-certificates-approvers
reviewers:
- sig-auth-certificates-reviewers
labels:
- sig/auth

208
vendor/k8s.io/client-go/util/cert/cert.go generated vendored Normal file
View File

@@ -0,0 +1,208 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cert
import (
"bytes"
"crypto"
cryptorand "crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"io/ioutil"
"math/big"
"net"
"path/filepath"
"strings"
"time"
"k8s.io/client-go/util/keyutil"
netutils "k8s.io/utils/net"
)
const duration365d = time.Hour * 24 * 365
// Config contains the basic fields required for creating a certificate
type Config struct {
CommonName string
Organization []string
AltNames AltNames
Usages []x509.ExtKeyUsage
}
// AltNames contains the domain names and IP addresses that will be added
// to the API Server's x509 certificate SubAltNames field. The values will
// be passed directly to the x509.Certificate object.
type AltNames struct {
DNSNames []string
IPs []net.IP
}
// NewSelfSignedCACert creates a CA certificate
func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) {
now := time.Now()
tmpl := x509.Certificate{
SerialNumber: new(big.Int).SetInt64(0),
Subject: pkix.Name{
CommonName: cfg.CommonName,
Organization: cfg.Organization,
},
DNSNames: []string{cfg.CommonName},
NotBefore: now.UTC(),
NotAfter: now.Add(duration365d * 10).UTC(),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
IsCA: true,
}
certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
if err != nil {
return nil, err
}
return x509.ParseCertificate(certDERBytes)
}
// GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host.
// Host may be an IP or a DNS name
// You may also specify additional subject alt names (either ip or dns names) for the certificate.
func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS []string) ([]byte, []byte, error) {
return GenerateSelfSignedCertKeyWithFixtures(host, alternateIPs, alternateDNS, "")
}
// GenerateSelfSignedCertKeyWithFixtures creates a self-signed certificate and key for the given host.
// Host may be an IP or a DNS name. You may also specify additional subject alt names (either ip or dns names)
// for the certificate.
//
// If fixtureDirectory is non-empty, it is a directory path which can contain pre-generated certs. The format is:
// <host>_<ip>-<ip>_<alternateDNS>-<alternateDNS>.crt
// <host>_<ip>-<ip>_<alternateDNS>-<alternateDNS>.key
// Certs/keys not existing in that directory are created.
func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, alternateDNS []string, fixtureDirectory string) ([]byte, []byte, error) {
validFrom := time.Now().Add(-time.Hour) // valid an hour earlier to avoid flakes due to clock skew
maxAge := time.Hour * 24 * 365 // one year self-signed certs
baseName := fmt.Sprintf("%s_%s_%s", host, strings.Join(ipsToStrings(alternateIPs), "-"), strings.Join(alternateDNS, "-"))
certFixturePath := filepath.Join(fixtureDirectory, baseName+".crt")
keyFixturePath := filepath.Join(fixtureDirectory, baseName+".key")
if len(fixtureDirectory) > 0 {
cert, err := ioutil.ReadFile(certFixturePath)
if err == nil {
key, err := ioutil.ReadFile(keyFixturePath)
if err == nil {
return cert, key, nil
}
return nil, nil, fmt.Errorf("cert %s can be read, but key %s cannot: %v", certFixturePath, keyFixturePath, err)
}
maxAge = 100 * time.Hour * 24 * 365 // 100 years fixtures
}
caKey, err := rsa.GenerateKey(cryptorand.Reader, 2048)
if err != nil {
return nil, nil, err
}
caTemplate := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: fmt.Sprintf("%s-ca@%d", host, time.Now().Unix()),
},
NotBefore: validFrom,
NotAfter: validFrom.Add(maxAge),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
IsCA: true,
}
caDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey)
if err != nil {
return nil, nil, err
}
caCertificate, err := x509.ParseCertificate(caDERBytes)
if err != nil {
return nil, nil, err
}
priv, err := rsa.GenerateKey(cryptorand.Reader, 2048)
if err != nil {
return nil, nil, err
}
template := x509.Certificate{
SerialNumber: big.NewInt(2),
Subject: pkix.Name{
CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()),
},
NotBefore: validFrom,
NotAfter: validFrom.Add(maxAge),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
if ip := netutils.ParseIPSloppy(host); ip != nil {
template.IPAddresses = append(template.IPAddresses, ip)
} else {
template.DNSNames = append(template.DNSNames, host)
}
template.IPAddresses = append(template.IPAddresses, alternateIPs...)
template.DNSNames = append(template.DNSNames, alternateDNS...)
derBytes, err := x509.CreateCertificate(cryptorand.Reader, &template, caCertificate, &priv.PublicKey, caKey)
if err != nil {
return nil, nil, err
}
// Generate cert, followed by ca
certBuffer := bytes.Buffer{}
if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: derBytes}); err != nil {
return nil, nil, err
}
if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: caDERBytes}); err != nil {
return nil, nil, err
}
// Generate key
keyBuffer := bytes.Buffer{}
if err := pem.Encode(&keyBuffer, &pem.Block{Type: keyutil.RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {
return nil, nil, err
}
if len(fixtureDirectory) > 0 {
if err := ioutil.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil {
return nil, nil, fmt.Errorf("failed to write cert fixture to %s: %v", certFixturePath, err)
}
if err := ioutil.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0644); err != nil {
return nil, nil, fmt.Errorf("failed to write key fixture to %s: %v", certFixturePath, err)
}
}
return certBuffer.Bytes(), keyBuffer.Bytes(), nil
}
func ipsToStrings(ips []net.IP) []string {
ss := make([]string, 0, len(ips))
for _, ip := range ips {
ss = append(ss, ip.String())
}
return ss
}

75
vendor/k8s.io/client-go/util/cert/csr.go generated vendored Normal file
View File

@@ -0,0 +1,75 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cert
import (
cryptorand "crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"net"
)
// MakeCSR generates a PEM-encoded CSR using the supplied private key, subject, and SANs.
// All key types that are implemented via crypto.Signer are supported (This includes *rsa.PrivateKey and *ecdsa.PrivateKey.)
func MakeCSR(privateKey interface{}, subject *pkix.Name, dnsSANs []string, ipSANs []net.IP) (csr []byte, err error) {
template := &x509.CertificateRequest{
Subject: *subject,
DNSNames: dnsSANs,
IPAddresses: ipSANs,
}
return MakeCSRFromTemplate(privateKey, template)
}
// MakeCSRFromTemplate generates a PEM-encoded CSR using the supplied private
// key and certificate request as a template. All key types that are
// implemented via crypto.Signer are supported (This includes *rsa.PrivateKey
// and *ecdsa.PrivateKey.)
func MakeCSRFromTemplate(privateKey interface{}, template *x509.CertificateRequest) ([]byte, error) {
t := *template
t.SignatureAlgorithm = sigType(privateKey)
csrDER, err := x509.CreateCertificateRequest(cryptorand.Reader, &t, privateKey)
if err != nil {
return nil, err
}
csrPemBlock := &pem.Block{
Type: CertificateRequestBlockType,
Bytes: csrDER,
}
return pem.EncodeToMemory(csrPemBlock), nil
}
func sigType(privateKey interface{}) x509.SignatureAlgorithm {
// Customize the signature for RSA keys, depending on the key size
if privateKey, ok := privateKey.(*rsa.PrivateKey); ok {
keySize := privateKey.N.BitLen()
switch {
case keySize >= 4096:
return x509.SHA512WithRSA
case keySize >= 3072:
return x509.SHA384WithRSA
default:
return x509.SHA256WithRSA
}
}
return x509.UnknownSignatureAlgorithm
}

113
vendor/k8s.io/client-go/util/cert/io.go generated vendored Normal file
View File

@@ -0,0 +1,113 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cert
import (
"crypto/x509"
"fmt"
"io/ioutil"
"os"
"path/filepath"
)
// CanReadCertAndKey returns true if the certificate and key files already exists,
// otherwise returns false. If lost one of cert and key, returns error.
func CanReadCertAndKey(certPath, keyPath string) (bool, error) {
certReadable := canReadFile(certPath)
keyReadable := canReadFile(keyPath)
if certReadable == false && keyReadable == false {
return false, nil
}
if certReadable == false {
return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", certPath)
}
if keyReadable == false {
return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", keyPath)
}
return true, nil
}
// If the file represented by path exists and
// readable, returns true otherwise returns false.
func canReadFile(path string) bool {
f, err := os.Open(path)
if err != nil {
return false
}
defer f.Close()
return true
}
// WriteCert writes the pem-encoded certificate data to certPath.
// The certificate file will be created with file mode 0644.
// If the certificate file already exists, it will be overwritten.
// The parent directory of the certPath will be created as needed with file mode 0755.
func WriteCert(certPath string, data []byte) error {
if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil {
return err
}
return ioutil.WriteFile(certPath, data, os.FileMode(0644))
}
// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file.
// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
func NewPool(filename string) (*x509.CertPool, error) {
pemBlock, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
pool, err := NewPoolFromBytes(pemBlock)
if err != nil {
return nil, fmt.Errorf("error creating pool from %s: %s", filename, err)
}
return pool, nil
}
// NewPoolFromBytes returns an x509.CertPool containing the certificates in the given PEM-encoded bytes.
// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
func NewPoolFromBytes(pemBlock []byte) (*x509.CertPool, error) {
certs, err := ParseCertsPEM(pemBlock)
if err != nil {
return nil, err
}
pool := x509.NewCertPool()
for _, cert := range certs {
pool.AddCert(cert)
}
return pool, nil
}
// CertsFromFile returns the x509.Certificates contained in the given PEM-encoded file.
// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
func CertsFromFile(file string) ([]*x509.Certificate, error) {
pemBlock, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
certs, err := ParseCertsPEM(pemBlock)
if err != nil {
return nil, fmt.Errorf("error reading %s: %s", file, err)
}
return certs, nil
}

73
vendor/k8s.io/client-go/util/cert/pem.go generated vendored Normal file
View File

@@ -0,0 +1,73 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cert
import (
"bytes"
"crypto/x509"
"encoding/pem"
"errors"
)
const (
// CertificateBlockType is a possible value for pem.Block.Type.
CertificateBlockType = "CERTIFICATE"
// CertificateRequestBlockType is a possible value for pem.Block.Type.
CertificateRequestBlockType = "CERTIFICATE REQUEST"
)
// ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array
// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates
func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) {
ok := false
certs := []*x509.Certificate{}
for len(pemCerts) > 0 {
var block *pem.Block
block, pemCerts = pem.Decode(pemCerts)
if block == nil {
break
}
// Only use PEM "CERTIFICATE" blocks without extra headers
if block.Type != CertificateBlockType || len(block.Headers) != 0 {
continue
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return certs, err
}
certs = append(certs, cert)
ok = true
}
if !ok {
return certs, errors.New("data does not contain any valid RSA or ECDSA certificates")
}
return certs, nil
}
// EncodeCertificates returns the PEM-encoded byte array that represents by the specified certs.
func EncodeCertificates(certs ...*x509.Certificate) ([]byte, error) {
b := bytes.Buffer{}
for _, cert := range certs {
if err := pem.Encode(&b, &pem.Block{Type: CertificateBlockType, Bytes: cert.Raw}); err != nil {
return []byte{}, err
}
}
return b.Bytes(), nil
}

102
vendor/k8s.io/client-go/util/cert/server_inspection.go generated vendored Normal file
View File

@@ -0,0 +1,102 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cert
import (
"crypto/tls"
"crypto/x509"
"fmt"
"net/url"
"strings"
)
// GetClientCANames gets the CA names for client certs that a server accepts. This is useful when inspecting the
// state of particular servers. apiHost is "host:port"
func GetClientCANames(apiHost string) ([]string, error) {
// when we run this the second time, we know which one we are expecting
acceptableCAs := []string{}
tlsConfig := &tls.Config{
InsecureSkipVerify: true, // this is insecure to always get to the GetClientCertificate
GetClientCertificate: func(hello *tls.CertificateRequestInfo) (*tls.Certificate, error) {
acceptableCAs = []string{}
for _, curr := range hello.AcceptableCAs {
acceptableCAs = append(acceptableCAs, string(curr))
}
return &tls.Certificate{}, nil
},
}
conn, err := tls.Dial("tcp", apiHost, tlsConfig)
if err != nil {
return nil, err
}
if err := conn.Close(); err != nil {
return nil, err
}
return acceptableCAs, nil
}
// GetClientCANamesForURL is GetClientCANames against a URL string like we use in kubeconfigs
func GetClientCANamesForURL(kubeConfigURL string) ([]string, error) {
apiserverURL, err := url.Parse(kubeConfigURL)
if err != nil {
return nil, err
}
return GetClientCANames(apiserverURL.Host)
}
// GetServingCertificates returns the x509 certs used by a server as certificates and pem encoded bytes.
// The serverName is optional for specifying a different name to get SNI certificates. apiHost is "host:port"
func GetServingCertificates(apiHost, serverName string) ([]*x509.Certificate, [][]byte, error) {
tlsConfig := &tls.Config{
InsecureSkipVerify: true, // this is insecure so that we always get connected
}
// if a name is specified for SNI, set it.
if len(serverName) > 0 {
tlsConfig.ServerName = serverName
}
conn, err := tls.Dial("tcp", apiHost, tlsConfig)
if err != nil {
return nil, nil, err
}
if err = conn.Close(); err != nil {
return nil, nil, fmt.Errorf("failed to close connection : %v", err)
}
peerCerts := conn.ConnectionState().PeerCertificates
peerCertBytes := [][]byte{}
for _, a := range peerCerts {
actualCert, err := EncodeCertificates(a)
if err != nil {
return nil, nil, err
}
peerCertBytes = append(peerCertBytes, []byte(strings.TrimSpace(string(actualCert))))
}
return peerCerts, peerCertBytes, err
}
// GetServingCertificatesForURL is GetServingCertificates against a URL string like we use in kubeconfigs
func GetServingCertificatesForURL(kubeConfigURL, serverName string) ([]*x509.Certificate, [][]byte, error) {
apiserverURL, err := url.Parse(kubeConfigURL)
if err != nil {
return nil, nil, err
}
return GetServingCertificates(apiserverURL.Host, serverName)
}

View File

@@ -0,0 +1,133 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package connrotation implements a connection dialer that tracks and can close
// all created connections.
//
// This is used for credential rotation of long-lived connections, when there's
// no way to re-authenticate on a live connection.
package connrotation
import (
"context"
"net"
"sync"
)
// DialFunc is a shorthand for signature of net.DialContext.
type DialFunc func(ctx context.Context, network, address string) (net.Conn, error)
// Dialer opens connections through Dial and tracks them.
type Dialer struct {
dial DialFunc
*ConnectionTracker
}
// NewDialer creates a new Dialer instance.
// Equivalent to NewDialerWithTracker(dial, nil).
func NewDialer(dial DialFunc) *Dialer {
return NewDialerWithTracker(dial, nil)
}
// NewDialerWithTracker creates a new Dialer instance.
//
// If dial is not nil, it will be used to create new underlying connections.
// Otherwise net.DialContext is used.
// If tracker is not nil, it will be used to track new underlying connections.
// Otherwise NewConnectionTracker() is used.
func NewDialerWithTracker(dial DialFunc, tracker *ConnectionTracker) *Dialer {
if tracker == nil {
tracker = NewConnectionTracker()
}
return &Dialer{
dial: dial,
ConnectionTracker: tracker,
}
}
// ConnectionTracker keeps track of opened connections
type ConnectionTracker struct {
mu sync.Mutex
conns map[*closableConn]struct{}
}
// NewConnectionTracker returns a connection tracker for use with NewDialerWithTracker
func NewConnectionTracker() *ConnectionTracker {
return &ConnectionTracker{
conns: make(map[*closableConn]struct{}),
}
}
// CloseAll forcibly closes all tracked connections.
//
// Note: new connections may get created before CloseAll returns.
func (c *ConnectionTracker) CloseAll() {
c.mu.Lock()
conns := c.conns
c.conns = make(map[*closableConn]struct{})
c.mu.Unlock()
for conn := range conns {
conn.Close()
}
}
// Track adds the connection to the list of tracked connections,
// and returns a wrapped copy of the connection that stops tracking the connection
// when it is closed.
func (c *ConnectionTracker) Track(conn net.Conn) net.Conn {
closable := &closableConn{Conn: conn}
// When the connection is closed, remove it from the map. This will
// be no-op if the connection isn't in the map, e.g. if CloseAll()
// is called.
closable.onClose = func() {
c.mu.Lock()
delete(c.conns, closable)
c.mu.Unlock()
}
// Start tracking the connection
c.mu.Lock()
c.conns[closable] = struct{}{}
c.mu.Unlock()
return closable
}
// Dial creates a new tracked connection.
func (d *Dialer) Dial(network, address string) (net.Conn, error) {
return d.DialContext(context.Background(), network, address)
}
// DialContext creates a new tracked connection.
func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
conn, err := d.dial(ctx, network, address)
if err != nil {
return nil, err
}
return d.ConnectionTracker.Track(conn), nil
}
type closableConn struct {
onClose func()
net.Conn
}
func (c *closableConn) Close() error {
go c.onClose()
return c.Conn.Close()
}

181
vendor/k8s.io/client-go/util/flowcontrol/backoff.go generated vendored Normal file
View File

@@ -0,0 +1,181 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flowcontrol
import (
"math/rand"
"sync"
"time"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
"k8s.io/utils/integer"
)
type backoffEntry struct {
backoff time.Duration
lastUpdate time.Time
}
type Backoff struct {
sync.RWMutex
Clock clock.Clock
defaultDuration time.Duration
maxDuration time.Duration
perItemBackoff map[string]*backoffEntry
rand *rand.Rand
// maxJitterFactor adds jitter to the exponentially backed off delay.
// if maxJitterFactor is zero, no jitter is added to the delay in
// order to maintain current behavior.
maxJitterFactor float64
}
func NewFakeBackOff(initial, max time.Duration, tc *testingclock.FakeClock) *Backoff {
return newBackoff(tc, initial, max, 0.0)
}
func NewBackOff(initial, max time.Duration) *Backoff {
return NewBackOffWithJitter(initial, max, 0.0)
}
func NewFakeBackOffWithJitter(initial, max time.Duration, tc *testingclock.FakeClock, maxJitterFactor float64) *Backoff {
return newBackoff(tc, initial, max, maxJitterFactor)
}
func NewBackOffWithJitter(initial, max time.Duration, maxJitterFactor float64) *Backoff {
clock := clock.RealClock{}
return newBackoff(clock, initial, max, maxJitterFactor)
}
func newBackoff(clock clock.Clock, initial, max time.Duration, maxJitterFactor float64) *Backoff {
var random *rand.Rand
if maxJitterFactor > 0 {
random = rand.New(rand.NewSource(clock.Now().UnixNano()))
}
return &Backoff{
perItemBackoff: map[string]*backoffEntry{},
Clock: clock,
defaultDuration: initial,
maxDuration: max,
maxJitterFactor: maxJitterFactor,
rand: random,
}
}
// Get the current backoff Duration
func (p *Backoff) Get(id string) time.Duration {
p.RLock()
defer p.RUnlock()
var delay time.Duration
entry, ok := p.perItemBackoff[id]
if ok {
delay = entry.backoff
}
return delay
}
// move backoff to the next mark, capping at maxDuration
func (p *Backoff) Next(id string, eventTime time.Time) {
p.Lock()
defer p.Unlock()
entry, ok := p.perItemBackoff[id]
if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
entry = p.initEntryUnsafe(id)
entry.backoff += p.jitter(entry.backoff)
} else {
delay := entry.backoff * 2 // exponential
delay += p.jitter(entry.backoff) // add some jitter to the delay
entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration)))
}
entry.lastUpdate = p.Clock.Now()
}
// Reset forces clearing of all backoff data for a given key.
func (p *Backoff) Reset(id string) {
p.Lock()
defer p.Unlock()
delete(p.perItemBackoff, id)
}
// Returns True if the elapsed time since eventTime is smaller than the current backoff window
func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
p.RLock()
defer p.RUnlock()
entry, ok := p.perItemBackoff[id]
if !ok {
return false
}
if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
return false
}
return p.Clock.Since(eventTime) < entry.backoff
}
// Returns True if time since lastupdate is less than the current backoff window.
func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool {
p.RLock()
defer p.RUnlock()
entry, ok := p.perItemBackoff[id]
if !ok {
return false
}
if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
return false
}
return eventTime.Sub(entry.lastUpdate) < entry.backoff
}
// Garbage collect records that have aged past maxDuration. Backoff users are expected
// to invoke this periodically.
func (p *Backoff) GC() {
p.Lock()
defer p.Unlock()
now := p.Clock.Now()
for id, entry := range p.perItemBackoff {
if now.Sub(entry.lastUpdate) > p.maxDuration*2 {
// GC when entry has not been updated for 2*maxDuration
delete(p.perItemBackoff, id)
}
}
}
func (p *Backoff) DeleteEntry(id string) {
p.Lock()
defer p.Unlock()
delete(p.perItemBackoff, id)
}
// Take a lock on *Backoff, before calling initEntryUnsafe
func (p *Backoff) initEntryUnsafe(id string) *backoffEntry {
entry := &backoffEntry{backoff: p.defaultDuration}
p.perItemBackoff[id] = entry
return entry
}
func (p *Backoff) jitter(delay time.Duration) time.Duration {
if p.rand == nil {
return 0
}
return time.Duration(p.rand.Float64() * p.maxJitterFactor * float64(delay))
}
// After 2*maxDuration we restart the backoff factor to the beginning
func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration
}

192
vendor/k8s.io/client-go/util/flowcontrol/throttle.go generated vendored Normal file
View File

@@ -0,0 +1,192 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flowcontrol
import (
"context"
"errors"
"sync"
"time"
"golang.org/x/time/rate"
"k8s.io/utils/clock"
)
type PassiveRateLimiter interface {
// TryAccept returns true if a token is taken immediately. Otherwise,
// it returns false.
TryAccept() bool
// Stop stops the rate limiter, subsequent calls to CanAccept will return false
Stop()
// QPS returns QPS of this rate limiter
QPS() float32
}
type RateLimiter interface {
PassiveRateLimiter
// Accept returns once a token becomes available.
Accept()
// Wait returns nil if a token is taken before the Context is done.
Wait(ctx context.Context) error
}
type tokenBucketPassiveRateLimiter struct {
limiter *rate.Limiter
qps float32
clock clock.PassiveClock
}
type tokenBucketRateLimiter struct {
tokenBucketPassiveRateLimiter
clock Clock
}
// NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach.
// The rate limiter allows bursts of up to 'burst' to exceed the QPS, while still maintaining a
// smoothed qps rate of 'qps'.
// The bucket is initially filled with 'burst' tokens, and refills at a rate of 'qps'.
// The maximum number of tokens in the bucket is capped at 'burst'.
func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter {
limiter := rate.NewLimiter(rate.Limit(qps), burst)
return newTokenBucketRateLimiterWithClock(limiter, clock.RealClock{}, qps)
}
// NewTokenBucketPassiveRateLimiter is similar to NewTokenBucketRateLimiter except that it returns
// a PassiveRateLimiter which does not have Accept() and Wait() methods.
func NewTokenBucketPassiveRateLimiter(qps float32, burst int) PassiveRateLimiter {
limiter := rate.NewLimiter(rate.Limit(qps), burst)
return newTokenBucketRateLimiterWithPassiveClock(limiter, clock.RealClock{}, qps)
}
// An injectable, mockable clock interface.
type Clock interface {
clock.PassiveClock
Sleep(time.Duration)
}
var _ Clock = (*clock.RealClock)(nil)
// NewTokenBucketRateLimiterWithClock is identical to NewTokenBucketRateLimiter
// but allows an injectable clock, for testing.
func NewTokenBucketRateLimiterWithClock(qps float32, burst int, c Clock) RateLimiter {
limiter := rate.NewLimiter(rate.Limit(qps), burst)
return newTokenBucketRateLimiterWithClock(limiter, c, qps)
}
// NewTokenBucketPassiveRateLimiterWithClock is similar to NewTokenBucketRateLimiterWithClock
// except that it returns a PassiveRateLimiter which does not have Accept() and Wait() methods
// and uses a PassiveClock.
func NewTokenBucketPassiveRateLimiterWithClock(qps float32, burst int, c clock.PassiveClock) PassiveRateLimiter {
limiter := rate.NewLimiter(rate.Limit(qps), burst)
return newTokenBucketRateLimiterWithPassiveClock(limiter, c, qps)
}
func newTokenBucketRateLimiterWithClock(limiter *rate.Limiter, c Clock, qps float32) *tokenBucketRateLimiter {
return &tokenBucketRateLimiter{
tokenBucketPassiveRateLimiter: *newTokenBucketRateLimiterWithPassiveClock(limiter, c, qps),
clock: c,
}
}
func newTokenBucketRateLimiterWithPassiveClock(limiter *rate.Limiter, c clock.PassiveClock, qps float32) *tokenBucketPassiveRateLimiter {
return &tokenBucketPassiveRateLimiter{
limiter: limiter,
qps: qps,
clock: c,
}
}
func (tbprl *tokenBucketPassiveRateLimiter) Stop() {
}
func (tbprl *tokenBucketPassiveRateLimiter) QPS() float32 {
return tbprl.qps
}
func (tbprl *tokenBucketPassiveRateLimiter) TryAccept() bool {
return tbprl.limiter.AllowN(tbprl.clock.Now(), 1)
}
// Accept will block until a token becomes available
func (tbrl *tokenBucketRateLimiter) Accept() {
now := tbrl.clock.Now()
tbrl.clock.Sleep(tbrl.limiter.ReserveN(now, 1).DelayFrom(now))
}
func (tbrl *tokenBucketRateLimiter) Wait(ctx context.Context) error {
return tbrl.limiter.Wait(ctx)
}
type fakeAlwaysRateLimiter struct{}
func NewFakeAlwaysRateLimiter() RateLimiter {
return &fakeAlwaysRateLimiter{}
}
func (t *fakeAlwaysRateLimiter) TryAccept() bool {
return true
}
func (t *fakeAlwaysRateLimiter) Stop() {}
func (t *fakeAlwaysRateLimiter) Accept() {}
func (t *fakeAlwaysRateLimiter) QPS() float32 {
return 1
}
func (t *fakeAlwaysRateLimiter) Wait(ctx context.Context) error {
return nil
}
type fakeNeverRateLimiter struct {
wg sync.WaitGroup
}
func NewFakeNeverRateLimiter() RateLimiter {
rl := fakeNeverRateLimiter{}
rl.wg.Add(1)
return &rl
}
func (t *fakeNeverRateLimiter) TryAccept() bool {
return false
}
func (t *fakeNeverRateLimiter) Stop() {
t.wg.Done()
}
func (t *fakeNeverRateLimiter) Accept() {
t.wg.Wait()
}
func (t *fakeNeverRateLimiter) QPS() float32 {
return 1
}
func (t *fakeNeverRateLimiter) Wait(ctx context.Context) error {
return errors.New("can not be accept")
}
var (
_ RateLimiter = (*tokenBucketRateLimiter)(nil)
_ RateLimiter = (*fakeAlwaysRateLimiter)(nil)
_ RateLimiter = (*fakeNeverRateLimiter)(nil)
)
var _ PassiveRateLimiter = (*tokenBucketPassiveRateLimiter)(nil)

92
vendor/k8s.io/client-go/util/homedir/homedir.go generated vendored Normal file
View File

@@ -0,0 +1,92 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package homedir
import (
"os"
"path/filepath"
"runtime"
)
// HomeDir returns the home directory for the current user.
// On Windows:
// 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned.
// 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned.
// 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned.
// 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned.
func HomeDir() string {
if runtime.GOOS == "windows" {
home := os.Getenv("HOME")
homeDriveHomePath := ""
if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 {
homeDriveHomePath = homeDrive + homePath
}
userProfile := os.Getenv("USERPROFILE")
// Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file.
// %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility.
for _, p := range []string{home, homeDriveHomePath, userProfile} {
if len(p) == 0 {
continue
}
if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil {
continue
}
return p
}
firstSetPath := ""
firstExistingPath := ""
// Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools
for _, p := range []string{home, userProfile, homeDriveHomePath} {
if len(p) == 0 {
continue
}
if len(firstSetPath) == 0 {
// remember the first path that is set
firstSetPath = p
}
info, err := os.Stat(p)
if err != nil {
continue
}
if len(firstExistingPath) == 0 {
// remember the first path that exists
firstExistingPath = p
}
if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 {
// return first path that is writeable
return p
}
}
// If none are writeable, return first location that exists
if len(firstExistingPath) > 0 {
return firstExistingPath
}
// If none exist, return first location that is set
if len(firstSetPath) > 0 {
return firstSetPath
}
// We've got nothing
return ""
}
return os.Getenv("HOME")
}

20
vendor/k8s.io/client-go/util/jsonpath/doc.go generated vendored Normal file
View File

@@ -0,0 +1,20 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// package jsonpath is a template engine using jsonpath syntax,
// which can be seen at http://goessner.net/articles/JsonPath/.
// In addition, it has {range} {end} function to iterate list and slice.
package jsonpath // import "k8s.io/client-go/util/jsonpath"

579
vendor/k8s.io/client-go/util/jsonpath/jsonpath.go generated vendored Normal file
View File

@@ -0,0 +1,579 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonpath
import (
"bytes"
"encoding/json"
"fmt"
"io"
"reflect"
"strings"
"k8s.io/client-go/third_party/forked/golang/template"
)
type JSONPath struct {
name string
parser *Parser
beginRange int
inRange int
endRange int
lastEndNode *Node
allowMissingKeys bool
outputJSON bool
}
// New creates a new JSONPath with the given name.
func New(name string) *JSONPath {
return &JSONPath{
name: name,
beginRange: 0,
inRange: 0,
endRange: 0,
}
}
// AllowMissingKeys allows a caller to specify whether they want an error if a field or map key
// cannot be located, or simply an empty result. The receiver is returned for chaining.
func (j *JSONPath) AllowMissingKeys(allow bool) *JSONPath {
j.allowMissingKeys = allow
return j
}
// Parse parses the given template and returns an error.
func (j *JSONPath) Parse(text string) error {
var err error
j.parser, err = Parse(j.name, text)
return err
}
// Execute bounds data into template and writes the result.
func (j *JSONPath) Execute(wr io.Writer, data interface{}) error {
fullResults, err := j.FindResults(data)
if err != nil {
return err
}
for ix := range fullResults {
if err := j.PrintResults(wr, fullResults[ix]); err != nil {
return err
}
}
return nil
}
func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) {
if j.parser == nil {
return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name)
}
cur := []reflect.Value{reflect.ValueOf(data)}
nodes := j.parser.Root.Nodes
fullResult := [][]reflect.Value{}
for i := 0; i < len(nodes); i++ {
node := nodes[i]
results, err := j.walk(cur, node)
if err != nil {
return nil, err
}
// encounter an end node, break the current block
if j.endRange > 0 && j.endRange <= j.inRange {
j.endRange--
j.lastEndNode = &nodes[i]
break
}
// encounter a range node, start a range loop
if j.beginRange > 0 {
j.beginRange--
j.inRange++
if len(results) > 0 {
for _, value := range results {
j.parser.Root.Nodes = nodes[i+1:]
nextResults, err := j.FindResults(value.Interface())
if err != nil {
return nil, err
}
fullResult = append(fullResult, nextResults...)
}
} else {
// If the range has no results, we still need to process the nodes within the range
// so the position will advance to the end node
j.parser.Root.Nodes = nodes[i+1:]
_, err := j.FindResults(nil)
if err != nil {
return nil, err
}
}
j.inRange--
// Fast forward to resume processing after the most recent end node that was encountered
for k := i + 1; k < len(nodes); k++ {
if &nodes[k] == j.lastEndNode {
i = k
break
}
}
continue
}
fullResult = append(fullResult, results)
}
return fullResult, nil
}
// EnableJSONOutput changes the PrintResults behavior to return a JSON array of results
func (j *JSONPath) EnableJSONOutput(v bool) {
j.outputJSON = v
}
// PrintResults writes the results into writer
func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error {
if j.outputJSON {
// convert the []reflect.Value to something that json
// will be able to marshal
r := make([]interface{}, 0, len(results))
for i := range results {
r = append(r, results[i].Interface())
}
results = []reflect.Value{reflect.ValueOf(r)}
}
for i, r := range results {
var text []byte
var err error
outputJSON := true
kind := r.Kind()
if kind == reflect.Interface {
kind = r.Elem().Kind()
}
switch kind {
case reflect.Map:
case reflect.Array:
case reflect.Slice:
case reflect.Struct:
default:
outputJSON = false
}
switch {
case outputJSON || j.outputJSON:
if j.outputJSON {
text, err = json.MarshalIndent(r.Interface(), "", " ")
text = append(text, '\n')
} else {
text, err = json.Marshal(r.Interface())
}
default:
text, err = j.evalToText(r)
}
if err != nil {
return err
}
if i != len(results)-1 {
text = append(text, ' ')
}
if _, err = wr.Write(text); err != nil {
return err
}
}
return nil
}
// walk visits tree rooted at the given node in DFS order
func (j *JSONPath) walk(value []reflect.Value, node Node) ([]reflect.Value, error) {
switch node := node.(type) {
case *ListNode:
return j.evalList(value, node)
case *TextNode:
return []reflect.Value{reflect.ValueOf(node.Text)}, nil
case *FieldNode:
return j.evalField(value, node)
case *ArrayNode:
return j.evalArray(value, node)
case *FilterNode:
return j.evalFilter(value, node)
case *IntNode:
return j.evalInt(value, node)
case *BoolNode:
return j.evalBool(value, node)
case *FloatNode:
return j.evalFloat(value, node)
case *WildcardNode:
return j.evalWildcard(value, node)
case *RecursiveNode:
return j.evalRecursive(value, node)
case *UnionNode:
return j.evalUnion(value, node)
case *IdentifierNode:
return j.evalIdentifier(value, node)
default:
return value, fmt.Errorf("unexpected Node %v", node)
}
}
// evalInt evaluates IntNode
func (j *JSONPath) evalInt(input []reflect.Value, node *IntNode) ([]reflect.Value, error) {
result := make([]reflect.Value, len(input))
for i := range input {
result[i] = reflect.ValueOf(node.Value)
}
return result, nil
}
// evalFloat evaluates FloatNode
func (j *JSONPath) evalFloat(input []reflect.Value, node *FloatNode) ([]reflect.Value, error) {
result := make([]reflect.Value, len(input))
for i := range input {
result[i] = reflect.ValueOf(node.Value)
}
return result, nil
}
// evalBool evaluates BoolNode
func (j *JSONPath) evalBool(input []reflect.Value, node *BoolNode) ([]reflect.Value, error) {
result := make([]reflect.Value, len(input))
for i := range input {
result[i] = reflect.ValueOf(node.Value)
}
return result, nil
}
// evalList evaluates ListNode
func (j *JSONPath) evalList(value []reflect.Value, node *ListNode) ([]reflect.Value, error) {
var err error
curValue := value
for _, node := range node.Nodes {
curValue, err = j.walk(curValue, node)
if err != nil {
return curValue, err
}
}
return curValue, nil
}
// evalIdentifier evaluates IdentifierNode
func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ([]reflect.Value, error) {
results := []reflect.Value{}
switch node.Name {
case "range":
j.beginRange++
results = input
case "end":
if j.inRange > 0 {
j.endRange++
} else {
return results, fmt.Errorf("not in range, nothing to end")
}
default:
return input, fmt.Errorf("unrecognized identifier %v", node.Name)
}
return results, nil
}
// evalArray evaluates ArrayNode
func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect.Value, error) {
result := []reflect.Value{}
for _, value := range input {
value, isNil := template.Indirect(value)
if isNil {
continue
}
if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
return input, fmt.Errorf("%v is not array or slice", value.Type())
}
params := node.Params
if !params[0].Known {
params[0].Value = 0
}
if params[0].Value < 0 {
params[0].Value += value.Len()
}
if !params[1].Known {
params[1].Value = value.Len()
}
if params[1].Value < 0 || (params[1].Value == 0 && params[1].Derived) {
params[1].Value += value.Len()
}
sliceLength := value.Len()
if params[1].Value != params[0].Value { // if you're requesting zero elements, allow it through.
if params[0].Value >= sliceLength || params[0].Value < 0 {
return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[0].Value, sliceLength)
}
if params[1].Value > sliceLength || params[1].Value < 0 {
return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[1].Value-1, sliceLength)
}
if params[0].Value > params[1].Value {
return input, fmt.Errorf("starting index %d is greater than ending index %d", params[0].Value, params[1].Value)
}
} else {
return result, nil
}
value = value.Slice(params[0].Value, params[1].Value)
step := 1
if params[2].Known {
if params[2].Value <= 0 {
return input, fmt.Errorf("step must be > 0")
}
step = params[2].Value
}
for i := 0; i < value.Len(); i += step {
result = append(result, value.Index(i))
}
}
return result, nil
}
// evalUnion evaluates UnionNode
func (j *JSONPath) evalUnion(input []reflect.Value, node *UnionNode) ([]reflect.Value, error) {
result := []reflect.Value{}
for _, listNode := range node.Nodes {
temp, err := j.evalList(input, listNode)
if err != nil {
return input, err
}
result = append(result, temp...)
}
return result, nil
}
func (j *JSONPath) findFieldInValue(value *reflect.Value, node *FieldNode) (reflect.Value, error) {
t := value.Type()
var inlineValue *reflect.Value
for ix := 0; ix < t.NumField(); ix++ {
f := t.Field(ix)
jsonTag := f.Tag.Get("json")
parts := strings.Split(jsonTag, ",")
if len(parts) == 0 {
continue
}
if parts[0] == node.Value {
return value.Field(ix), nil
}
if len(parts[0]) == 0 {
val := value.Field(ix)
inlineValue = &val
}
}
if inlineValue != nil {
if inlineValue.Kind() == reflect.Struct {
// handle 'inline'
match, err := j.findFieldInValue(inlineValue, node)
if err != nil {
return reflect.Value{}, err
}
if match.IsValid() {
return match, nil
}
}
}
return value.FieldByName(node.Value), nil
}
// evalField evaluates field of struct or key of map.
func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect.Value, error) {
results := []reflect.Value{}
// If there's no input, there's no output
if len(input) == 0 {
return results, nil
}
for _, value := range input {
var result reflect.Value
value, isNil := template.Indirect(value)
if isNil {
continue
}
if value.Kind() == reflect.Struct {
var err error
if result, err = j.findFieldInValue(&value, node); err != nil {
return nil, err
}
} else if value.Kind() == reflect.Map {
mapKeyType := value.Type().Key()
nodeValue := reflect.ValueOf(node.Value)
// node value type must be convertible to map key type
if !nodeValue.Type().ConvertibleTo(mapKeyType) {
return results, fmt.Errorf("%s is not convertible to %s", nodeValue, mapKeyType)
}
result = value.MapIndex(nodeValue.Convert(mapKeyType))
}
if result.IsValid() {
results = append(results, result)
}
}
if len(results) == 0 {
if j.allowMissingKeys {
return results, nil
}
return results, fmt.Errorf("%s is not found", node.Value)
}
return results, nil
}
// evalWildcard extracts all contents of the given value
func (j *JSONPath) evalWildcard(input []reflect.Value, node *WildcardNode) ([]reflect.Value, error) {
results := []reflect.Value{}
for _, value := range input {
value, isNil := template.Indirect(value)
if isNil {
continue
}
kind := value.Kind()
if kind == reflect.Struct {
for i := 0; i < value.NumField(); i++ {
results = append(results, value.Field(i))
}
} else if kind == reflect.Map {
for _, key := range value.MapKeys() {
results = append(results, value.MapIndex(key))
}
} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
for i := 0; i < value.Len(); i++ {
results = append(results, value.Index(i))
}
}
}
return results, nil
}
// evalRecursive visits the given value recursively and pushes all of them to result
func (j *JSONPath) evalRecursive(input []reflect.Value, node *RecursiveNode) ([]reflect.Value, error) {
result := []reflect.Value{}
for _, value := range input {
results := []reflect.Value{}
value, isNil := template.Indirect(value)
if isNil {
continue
}
kind := value.Kind()
if kind == reflect.Struct {
for i := 0; i < value.NumField(); i++ {
results = append(results, value.Field(i))
}
} else if kind == reflect.Map {
for _, key := range value.MapKeys() {
results = append(results, value.MapIndex(key))
}
} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
for i := 0; i < value.Len(); i++ {
results = append(results, value.Index(i))
}
}
if len(results) != 0 {
result = append(result, value)
output, err := j.evalRecursive(results, node)
if err != nil {
return result, err
}
result = append(result, output...)
}
}
return result, nil
}
// evalFilter filters array according to FilterNode
func (j *JSONPath) evalFilter(input []reflect.Value, node *FilterNode) ([]reflect.Value, error) {
results := []reflect.Value{}
for _, value := range input {
value, _ = template.Indirect(value)
if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
return input, fmt.Errorf("%v is not array or slice and cannot be filtered", value)
}
for i := 0; i < value.Len(); i++ {
temp := []reflect.Value{value.Index(i)}
lefts, err := j.evalList(temp, node.Left)
//case exists
if node.Operator == "exists" {
if len(lefts) > 0 {
results = append(results, value.Index(i))
}
continue
}
if err != nil {
return input, err
}
var left, right interface{}
switch {
case len(lefts) == 0:
continue
case len(lefts) > 1:
return input, fmt.Errorf("can only compare one element at a time")
}
left = lefts[0].Interface()
rights, err := j.evalList(temp, node.Right)
if err != nil {
return input, err
}
switch {
case len(rights) == 0:
continue
case len(rights) > 1:
return input, fmt.Errorf("can only compare one element at a time")
}
right = rights[0].Interface()
pass := false
switch node.Operator {
case "<":
pass, err = template.Less(left, right)
case ">":
pass, err = template.Greater(left, right)
case "==":
pass, err = template.Equal(left, right)
case "!=":
pass, err = template.NotEqual(left, right)
case "<=":
pass, err = template.LessEqual(left, right)
case ">=":
pass, err = template.GreaterEqual(left, right)
default:
return results, fmt.Errorf("unrecognized filter operator %s", node.Operator)
}
if err != nil {
return results, err
}
if pass {
results = append(results, value.Index(i))
}
}
}
return results, nil
}
// evalToText translates reflect value to corresponding text
func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) {
iface, ok := template.PrintableValue(v)
if !ok {
return nil, fmt.Errorf("can't print type %s", v.Type())
}
var buffer bytes.Buffer
fmt.Fprint(&buffer, iface)
return buffer.Bytes(), nil
}

256
vendor/k8s.io/client-go/util/jsonpath/node.go generated vendored Normal file
View File

@@ -0,0 +1,256 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonpath
import "fmt"
// NodeType identifies the type of a parse tree node.
type NodeType int
// Type returns itself and provides an easy default implementation
func (t NodeType) Type() NodeType {
return t
}
func (t NodeType) String() string {
return NodeTypeName[t]
}
const (
NodeText NodeType = iota
NodeArray
NodeList
NodeField
NodeIdentifier
NodeFilter
NodeInt
NodeFloat
NodeWildcard
NodeRecursive
NodeUnion
NodeBool
)
var NodeTypeName = map[NodeType]string{
NodeText: "NodeText",
NodeArray: "NodeArray",
NodeList: "NodeList",
NodeField: "NodeField",
NodeIdentifier: "NodeIdentifier",
NodeFilter: "NodeFilter",
NodeInt: "NodeInt",
NodeFloat: "NodeFloat",
NodeWildcard: "NodeWildcard",
NodeRecursive: "NodeRecursive",
NodeUnion: "NodeUnion",
NodeBool: "NodeBool",
}
type Node interface {
Type() NodeType
String() string
}
// ListNode holds a sequence of nodes.
type ListNode struct {
NodeType
Nodes []Node // The element nodes in lexical order.
}
func newList() *ListNode {
return &ListNode{NodeType: NodeList}
}
func (l *ListNode) append(n Node) {
l.Nodes = append(l.Nodes, n)
}
func (l *ListNode) String() string {
return l.Type().String()
}
// TextNode holds plain text.
type TextNode struct {
NodeType
Text string // The text; may span newlines.
}
func newText(text string) *TextNode {
return &TextNode{NodeType: NodeText, Text: text}
}
func (t *TextNode) String() string {
return fmt.Sprintf("%s: %s", t.Type(), t.Text)
}
// FieldNode holds field of struct
type FieldNode struct {
NodeType
Value string
}
func newField(value string) *FieldNode {
return &FieldNode{NodeType: NodeField, Value: value}
}
func (f *FieldNode) String() string {
return fmt.Sprintf("%s: %s", f.Type(), f.Value)
}
// IdentifierNode holds an identifier
type IdentifierNode struct {
NodeType
Name string
}
func newIdentifier(value string) *IdentifierNode {
return &IdentifierNode{
NodeType: NodeIdentifier,
Name: value,
}
}
func (f *IdentifierNode) String() string {
return fmt.Sprintf("%s: %s", f.Type(), f.Name)
}
// ParamsEntry holds param information for ArrayNode
type ParamsEntry struct {
Value int
Known bool // whether the value is known when parse it
Derived bool
}
// ArrayNode holds start, end, step information for array index selection
type ArrayNode struct {
NodeType
Params [3]ParamsEntry // start, end, step
}
func newArray(params [3]ParamsEntry) *ArrayNode {
return &ArrayNode{
NodeType: NodeArray,
Params: params,
}
}
func (a *ArrayNode) String() string {
return fmt.Sprintf("%s: %v", a.Type(), a.Params)
}
// FilterNode holds operand and operator information for filter
type FilterNode struct {
NodeType
Left *ListNode
Right *ListNode
Operator string
}
func newFilter(left, right *ListNode, operator string) *FilterNode {
return &FilterNode{
NodeType: NodeFilter,
Left: left,
Right: right,
Operator: operator,
}
}
func (f *FilterNode) String() string {
return fmt.Sprintf("%s: %s %s %s", f.Type(), f.Left, f.Operator, f.Right)
}
// IntNode holds integer value
type IntNode struct {
NodeType
Value int
}
func newInt(num int) *IntNode {
return &IntNode{NodeType: NodeInt, Value: num}
}
func (i *IntNode) String() string {
return fmt.Sprintf("%s: %d", i.Type(), i.Value)
}
// FloatNode holds float value
type FloatNode struct {
NodeType
Value float64
}
func newFloat(num float64) *FloatNode {
return &FloatNode{NodeType: NodeFloat, Value: num}
}
func (i *FloatNode) String() string {
return fmt.Sprintf("%s: %f", i.Type(), i.Value)
}
// WildcardNode means a wildcard
type WildcardNode struct {
NodeType
}
func newWildcard() *WildcardNode {
return &WildcardNode{NodeType: NodeWildcard}
}
func (i *WildcardNode) String() string {
return i.Type().String()
}
// RecursiveNode means a recursive descent operator
type RecursiveNode struct {
NodeType
}
func newRecursive() *RecursiveNode {
return &RecursiveNode{NodeType: NodeRecursive}
}
func (r *RecursiveNode) String() string {
return r.Type().String()
}
// UnionNode is union of ListNode
type UnionNode struct {
NodeType
Nodes []*ListNode
}
func newUnion(nodes []*ListNode) *UnionNode {
return &UnionNode{NodeType: NodeUnion, Nodes: nodes}
}
func (u *UnionNode) String() string {
return u.Type().String()
}
// BoolNode holds bool value
type BoolNode struct {
NodeType
Value bool
}
func newBool(value bool) *BoolNode {
return &BoolNode{NodeType: NodeBool, Value: value}
}
func (b *BoolNode) String() string {
return fmt.Sprintf("%s: %t", b.Type(), b.Value)
}

527
vendor/k8s.io/client-go/util/jsonpath/parser.go generated vendored Normal file
View File

@@ -0,0 +1,527 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonpath
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
const eof = -1
const (
leftDelim = "{"
rightDelim = "}"
)
type Parser struct {
Name string
Root *ListNode
input string
pos int
start int
width int
}
var (
ErrSyntax = errors.New("invalid syntax")
dictKeyRex = regexp.MustCompile(`^'([^']*)'$`)
sliceOperatorRex = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:-?[\d]*)?$`)
)
// Parse parsed the given text and return a node Parser.
// If an error is encountered, parsing stops and an empty
// Parser is returned with the error
func Parse(name, text string) (*Parser, error) {
p := NewParser(name)
err := p.Parse(text)
if err != nil {
p = nil
}
return p, err
}
func NewParser(name string) *Parser {
return &Parser{
Name: name,
}
}
// parseAction parsed the expression inside delimiter
func parseAction(name, text string) (*Parser, error) {
p, err := Parse(name, fmt.Sprintf("%s%s%s", leftDelim, text, rightDelim))
// when error happens, p will be nil, so we need to return here
if err != nil {
return p, err
}
p.Root = p.Root.Nodes[0].(*ListNode)
return p, nil
}
func (p *Parser) Parse(text string) error {
p.input = text
p.Root = newList()
p.pos = 0
return p.parseText(p.Root)
}
// consumeText return the parsed text since last cosumeText
func (p *Parser) consumeText() string {
value := p.input[p.start:p.pos]
p.start = p.pos
return value
}
// next returns the next rune in the input.
func (p *Parser) next() rune {
if p.pos >= len(p.input) {
p.width = 0
return eof
}
r, w := utf8.DecodeRuneInString(p.input[p.pos:])
p.width = w
p.pos += p.width
return r
}
// peek returns but does not consume the next rune in the input.
func (p *Parser) peek() rune {
r := p.next()
p.backup()
return r
}
// backup steps back one rune. Can only be called once per call of next.
func (p *Parser) backup() {
p.pos -= p.width
}
func (p *Parser) parseText(cur *ListNode) error {
for {
if strings.HasPrefix(p.input[p.pos:], leftDelim) {
if p.pos > p.start {
cur.append(newText(p.consumeText()))
}
return p.parseLeftDelim(cur)
}
if p.next() == eof {
break
}
}
// Correctly reached EOF.
if p.pos > p.start {
cur.append(newText(p.consumeText()))
}
return nil
}
// parseLeftDelim scans the left delimiter, which is known to be present.
func (p *Parser) parseLeftDelim(cur *ListNode) error {
p.pos += len(leftDelim)
p.consumeText()
newNode := newList()
cur.append(newNode)
cur = newNode
return p.parseInsideAction(cur)
}
func (p *Parser) parseInsideAction(cur *ListNode) error {
prefixMap := map[string]func(*ListNode) error{
rightDelim: p.parseRightDelim,
"[?(": p.parseFilter,
"..": p.parseRecursive,
}
for prefix, parseFunc := range prefixMap {
if strings.HasPrefix(p.input[p.pos:], prefix) {
return parseFunc(cur)
}
}
switch r := p.next(); {
case r == eof || isEndOfLine(r):
return fmt.Errorf("unclosed action")
case r == ' ':
p.consumeText()
case r == '@' || r == '$': //the current object, just pass it
p.consumeText()
case r == '[':
return p.parseArray(cur)
case r == '"' || r == '\'':
return p.parseQuote(cur, r)
case r == '.':
return p.parseField(cur)
case r == '+' || r == '-' || unicode.IsDigit(r):
p.backup()
return p.parseNumber(cur)
case isAlphaNumeric(r):
p.backup()
return p.parseIdentifier(cur)
default:
return fmt.Errorf("unrecognized character in action: %#U", r)
}
return p.parseInsideAction(cur)
}
// parseRightDelim scans the right delimiter, which is known to be present.
func (p *Parser) parseRightDelim(cur *ListNode) error {
p.pos += len(rightDelim)
p.consumeText()
return p.parseText(p.Root)
}
// parseIdentifier scans build-in keywords, like "range" "end"
func (p *Parser) parseIdentifier(cur *ListNode) error {
var r rune
for {
r = p.next()
if isTerminator(r) {
p.backup()
break
}
}
value := p.consumeText()
if isBool(value) {
v, err := strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("can not parse bool '%s': %s", value, err.Error())
}
cur.append(newBool(v))
} else {
cur.append(newIdentifier(value))
}
return p.parseInsideAction(cur)
}
// parseRecursive scans the recursive descent operator ..
func (p *Parser) parseRecursive(cur *ListNode) error {
if lastIndex := len(cur.Nodes) - 1; lastIndex >= 0 && cur.Nodes[lastIndex].Type() == NodeRecursive {
return fmt.Errorf("invalid multiple recursive descent")
}
p.pos += len("..")
p.consumeText()
cur.append(newRecursive())
if r := p.peek(); isAlphaNumeric(r) {
return p.parseField(cur)
}
return p.parseInsideAction(cur)
}
// parseNumber scans number
func (p *Parser) parseNumber(cur *ListNode) error {
r := p.peek()
if r == '+' || r == '-' {
p.next()
}
for {
r = p.next()
if r != '.' && !unicode.IsDigit(r) {
p.backup()
break
}
}
value := p.consumeText()
i, err := strconv.Atoi(value)
if err == nil {
cur.append(newInt(i))
return p.parseInsideAction(cur)
}
d, err := strconv.ParseFloat(value, 64)
if err == nil {
cur.append(newFloat(d))
return p.parseInsideAction(cur)
}
return fmt.Errorf("cannot parse number %s", value)
}
// parseArray scans array index selection
func (p *Parser) parseArray(cur *ListNode) error {
Loop:
for {
switch p.next() {
case eof, '\n':
return fmt.Errorf("unterminated array")
case ']':
break Loop
}
}
text := p.consumeText()
text = text[1 : len(text)-1]
if text == "*" {
text = ":"
}
//union operator
strs := strings.Split(text, ",")
if len(strs) > 1 {
union := []*ListNode{}
for _, str := range strs {
parser, err := parseAction("union", fmt.Sprintf("[%s]", strings.Trim(str, " ")))
if err != nil {
return err
}
union = append(union, parser.Root)
}
cur.append(newUnion(union))
return p.parseInsideAction(cur)
}
// dict key
value := dictKeyRex.FindStringSubmatch(text)
if value != nil {
parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1]))
if err != nil {
return err
}
for _, node := range parser.Root.Nodes {
cur.append(node)
}
return p.parseInsideAction(cur)
}
//slice operator
value = sliceOperatorRex.FindStringSubmatch(text)
if value == nil {
return fmt.Errorf("invalid array index %s", text)
}
value = value[1:]
params := [3]ParamsEntry{}
for i := 0; i < 3; i++ {
if value[i] != "" {
if i > 0 {
value[i] = value[i][1:]
}
if i > 0 && value[i] == "" {
params[i].Known = false
} else {
var err error
params[i].Known = true
params[i].Value, err = strconv.Atoi(value[i])
if err != nil {
return fmt.Errorf("array index %s is not a number", value[i])
}
}
} else {
if i == 1 {
params[i].Known = true
params[i].Value = params[0].Value + 1
params[i].Derived = true
} else {
params[i].Known = false
params[i].Value = 0
}
}
}
cur.append(newArray(params))
return p.parseInsideAction(cur)
}
// parseFilter scans filter inside array selection
func (p *Parser) parseFilter(cur *ListNode) error {
p.pos += len("[?(")
p.consumeText()
begin := false
end := false
var pair rune
Loop:
for {
r := p.next()
switch r {
case eof, '\n':
return fmt.Errorf("unterminated filter")
case '"', '\'':
if begin == false {
//save the paired rune
begin = true
pair = r
continue
}
//only add when met paired rune
if p.input[p.pos-2] != '\\' && r == pair {
end = true
}
case ')':
//in rightParser below quotes only appear zero or once
//and must be paired at the beginning and end
if begin == end {
break Loop
}
}
}
if p.next() != ']' {
return fmt.Errorf("unclosed array expect ]")
}
reg := regexp.MustCompile(`^([^!<>=]+)([!<>=]+)(.+?)$`)
text := p.consumeText()
text = text[:len(text)-2]
value := reg.FindStringSubmatch(text)
if value == nil {
parser, err := parseAction("text", text)
if err != nil {
return err
}
cur.append(newFilter(parser.Root, newList(), "exists"))
} else {
leftParser, err := parseAction("left", value[1])
if err != nil {
return err
}
rightParser, err := parseAction("right", value[3])
if err != nil {
return err
}
cur.append(newFilter(leftParser.Root, rightParser.Root, value[2]))
}
return p.parseInsideAction(cur)
}
// parseQuote unquotes string inside double or single quote
func (p *Parser) parseQuote(cur *ListNode, end rune) error {
Loop:
for {
switch p.next() {
case eof, '\n':
return fmt.Errorf("unterminated quoted string")
case end:
//if it's not escape break the Loop
if p.input[p.pos-2] != '\\' {
break Loop
}
}
}
value := p.consumeText()
s, err := UnquoteExtend(value)
if err != nil {
return fmt.Errorf("unquote string %s error %v", value, err)
}
cur.append(newText(s))
return p.parseInsideAction(cur)
}
// parseField scans a field until a terminator
func (p *Parser) parseField(cur *ListNode) error {
p.consumeText()
for p.advance() {
}
value := p.consumeText()
if value == "*" {
cur.append(newWildcard())
} else {
cur.append(newField(strings.Replace(value, "\\", "", -1)))
}
return p.parseInsideAction(cur)
}
// advance scans until next non-escaped terminator
func (p *Parser) advance() bool {
r := p.next()
if r == '\\' {
p.next()
} else if isTerminator(r) {
p.backup()
return false
}
return true
}
// isTerminator reports whether the input is at valid termination character to appear after an identifier.
func isTerminator(r rune) bool {
if isSpace(r) || isEndOfLine(r) {
return true
}
switch r {
case eof, '.', ',', '[', ']', '$', '@', '{', '}':
return true
}
return false
}
// isSpace reports whether r is a space character.
func isSpace(r rune) bool {
return r == ' ' || r == '\t'
}
// isEndOfLine reports whether r is an end-of-line character.
func isEndOfLine(r rune) bool {
return r == '\r' || r == '\n'
}
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
func isAlphaNumeric(r rune) bool {
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
}
// isBool reports whether s is a boolean value.
func isBool(s string) bool {
return s == "true" || s == "false"
}
//UnquoteExtend is almost same as strconv.Unquote(), but it support parse single quotes as a string
func UnquoteExtend(s string) (string, error) {
n := len(s)
if n < 2 {
return "", ErrSyntax
}
quote := s[0]
if quote != s[n-1] {
return "", ErrSyntax
}
s = s[1 : n-1]
if quote != '"' && quote != '\'' {
return "", ErrSyntax
}
// Is it trivial? Avoid allocation.
if !contains(s, '\\') && !contains(s, quote) {
return s, nil
}
var runeTmp [utf8.UTFMax]byte
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
for len(s) > 0 {
c, multibyte, ss, err := strconv.UnquoteChar(s, quote)
if err != nil {
return "", err
}
s = ss
if c < utf8.RuneSelf || !multibyte {
buf = append(buf, byte(c))
} else {
n := utf8.EncodeRune(runeTmp[:], c)
buf = append(buf, runeTmp[:n]...)
}
}
return string(buf), nil
}
func contains(s string, c byte) bool {
for i := 0; i < len(s); i++ {
if s[i] == c {
return true
}
}
return false
}

6
vendor/k8s.io/client-go/util/keyutil/OWNERS generated vendored Normal file
View File

@@ -0,0 +1,6 @@
approvers:
- sig-auth-certificates-approvers
reviewers:
- sig-auth-certificates-reviewers
labels:
- sig/auth

323
vendor/k8s.io/client-go/util/keyutil/key.go generated vendored Normal file
View File

@@ -0,0 +1,323 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package keyutil contains utilities for managing public/private key pairs.
package keyutil
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
cryptorand "crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"fmt"
"io/ioutil"
"os"
"path/filepath"
)
const (
// ECPrivateKeyBlockType is a possible value for pem.Block.Type.
ECPrivateKeyBlockType = "EC PRIVATE KEY"
// RSAPrivateKeyBlockType is a possible value for pem.Block.Type.
RSAPrivateKeyBlockType = "RSA PRIVATE KEY"
// PrivateKeyBlockType is a possible value for pem.Block.Type.
PrivateKeyBlockType = "PRIVATE KEY"
// PublicKeyBlockType is a possible value for pem.Block.Type.
PublicKeyBlockType = "PUBLIC KEY"
)
// MakeEllipticPrivateKeyPEM creates an ECDSA private key
func MakeEllipticPrivateKeyPEM() ([]byte, error) {
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
if err != nil {
return nil, err
}
derBytes, err := x509.MarshalECPrivateKey(privateKey)
if err != nil {
return nil, err
}
privateKeyPemBlock := &pem.Block{
Type: ECPrivateKeyBlockType,
Bytes: derBytes,
}
return pem.EncodeToMemory(privateKeyPemBlock), nil
}
// WriteKey writes the pem-encoded key data to keyPath.
// The key file will be created with file mode 0600.
// If the key file already exists, it will be overwritten.
// The parent directory of the keyPath will be created as needed with file mode 0755.
func WriteKey(keyPath string, data []byte) error {
if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil {
return err
}
return ioutil.WriteFile(keyPath, data, os.FileMode(0600))
}
// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it
// can't find one, it will generate a new key and store it there.
func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) {
loadedData, err := ioutil.ReadFile(keyPath)
// Call verifyKeyData to ensure the file wasn't empty/corrupt.
if err == nil && verifyKeyData(loadedData) {
return loadedData, false, err
}
if !os.IsNotExist(err) {
return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err)
}
generatedData, err := MakeEllipticPrivateKeyPEM()
if err != nil {
return nil, false, fmt.Errorf("error generating key: %v", err)
}
if err := WriteKey(keyPath, generatedData); err != nil {
return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err)
}
return generatedData, true, nil
}
// MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to
// a PEM encoded block or returns an error.
func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) {
switch t := privateKey.(type) {
case *ecdsa.PrivateKey:
derBytes, err := x509.MarshalECPrivateKey(t)
if err != nil {
return nil, err
}
block := &pem.Block{
Type: ECPrivateKeyBlockType,
Bytes: derBytes,
}
return pem.EncodeToMemory(block), nil
case *rsa.PrivateKey:
block := &pem.Block{
Type: RSAPrivateKeyBlockType,
Bytes: x509.MarshalPKCS1PrivateKey(t),
}
return pem.EncodeToMemory(block), nil
default:
return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey)
}
}
// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file.
// Returns an error if the file could not be read or if the private key could not be parsed.
func PrivateKeyFromFile(file string) (interface{}, error) {
data, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
key, err := ParsePrivateKeyPEM(data)
if err != nil {
return nil, fmt.Errorf("error reading private key file %s: %v", file, err)
}
return key, nil
}
// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file.
// Reads public keys from both public and private key files.
func PublicKeysFromFile(file string) ([]interface{}, error) {
data, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
keys, err := ParsePublicKeysPEM(data)
if err != nil {
return nil, fmt.Errorf("error reading public key file %s: %v", file, err)
}
return keys, nil
}
// verifyKeyData returns true if the provided data appears to be a valid private key.
func verifyKeyData(data []byte) bool {
if len(data) == 0 {
return false
}
_, err := ParsePrivateKeyPEM(data)
return err == nil
}
// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data.
// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY"
func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) {
var privateKeyPemBlock *pem.Block
for {
privateKeyPemBlock, keyData = pem.Decode(keyData)
if privateKeyPemBlock == nil {
break
}
switch privateKeyPemBlock.Type {
case ECPrivateKeyBlockType:
// ECDSA Private Key in ASN.1 format
if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil {
return key, nil
}
case RSAPrivateKeyBlockType:
// RSA Private Key in PKCS#1 format
if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil {
return key, nil
}
case PrivateKeyBlockType:
// RSA or ECDSA Private Key in unencrypted PKCS#8 format
if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil {
return key, nil
}
}
// tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks
// originally, only the first PEM block was parsed and expected to be a key block
}
// we read all the PEM blocks and didn't recognize one
return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key")
}
// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array.
// Reads public keys from both public and private key files.
func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) {
var block *pem.Block
keys := []interface{}{}
for {
// read the next block
block, keyData = pem.Decode(keyData)
if block == nil {
break
}
// test block against parsing functions
if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil {
keys = append(keys, &privateKey.PublicKey)
continue
}
if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil {
keys = append(keys, publicKey)
continue
}
if privateKey, err := parseECPrivateKey(block.Bytes); err == nil {
keys = append(keys, &privateKey.PublicKey)
continue
}
if publicKey, err := parseECPublicKey(block.Bytes); err == nil {
keys = append(keys, publicKey)
continue
}
// tolerate non-key PEM blocks for backwards compatibility
// originally, only the first PEM block was parsed and expected to be a key block
}
if len(keys) == 0 {
return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys")
}
return keys, nil
}
// parseRSAPublicKey parses a single RSA public key from the provided data
func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) {
var err error
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
if cert, err := x509.ParseCertificate(data); err == nil {
parsedKey = cert.PublicKey
} else {
return nil, err
}
}
// Test if parsed key is an RSA Public Key
var pubKey *rsa.PublicKey
var ok bool
if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok {
return nil, fmt.Errorf("data doesn't contain valid RSA Public Key")
}
return pubKey, nil
}
// parseRSAPrivateKey parses a single RSA private key from the provided data
func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) {
var err error
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil {
if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil {
return nil, err
}
}
// Test if parsed key is an RSA Private Key
var privKey *rsa.PrivateKey
var ok bool
if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok {
return nil, fmt.Errorf("data doesn't contain valid RSA Private Key")
}
return privKey, nil
}
// parseECPublicKey parses a single ECDSA public key from the provided data
func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) {
var err error
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
if cert, err := x509.ParseCertificate(data); err == nil {
parsedKey = cert.PublicKey
} else {
return nil, err
}
}
// Test if parsed key is an ECDSA Public Key
var pubKey *ecdsa.PublicKey
var ok bool
if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key")
}
return pubKey, nil
}
// parseECPrivateKey parses a single ECDSA private key from the provided data
func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) {
var err error
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParseECPrivateKey(data); err != nil {
return nil, err
}
// Test if parsed key is an ECDSA Private Key
var privKey *ecdsa.PrivateKey
var ok bool
if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key")
}
return privKey, nil
}

View File

@@ -0,0 +1,238 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"math"
"sync"
"time"
"golang.org/x/time/rate"
)
type RateLimiter interface {
// When gets an item and gets to decide how long that item should wait
When(item interface{}) time.Duration
// Forget indicates that an item is finished being retried. Doesn't matter whether it's for failing
// or for success, we'll stop tracking it
Forget(item interface{})
// NumRequeues returns back how many failures the item has had
NumRequeues(item interface{}) int
}
// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has
// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential
func DefaultControllerRateLimiter() RateLimiter {
return NewMaxOfRateLimiter(
NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),
// 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item)
&BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
)
}
// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API
type BucketRateLimiter struct {
*rate.Limiter
}
var _ RateLimiter = &BucketRateLimiter{}
func (r *BucketRateLimiter) When(item interface{}) time.Duration {
return r.Limiter.Reserve().Delay()
}
func (r *BucketRateLimiter) NumRequeues(item interface{}) int {
return 0
}
func (r *BucketRateLimiter) Forget(item interface{}) {
}
// ItemExponentialFailureRateLimiter does a simple baseDelay*2^<num-failures> limit
// dealing with max failures and expiration are up to the caller
type ItemExponentialFailureRateLimiter struct {
failuresLock sync.Mutex
failures map[interface{}]int
baseDelay time.Duration
maxDelay time.Duration
}
var _ RateLimiter = &ItemExponentialFailureRateLimiter{}
func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter {
return &ItemExponentialFailureRateLimiter{
failures: map[interface{}]int{},
baseDelay: baseDelay,
maxDelay: maxDelay,
}
}
func DefaultItemBasedRateLimiter() RateLimiter {
return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second)
}
func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
exp := r.failures[item]
r.failures[item] = r.failures[item] + 1
// The backoff is capped such that 'calculated' value never overflows.
backoff := float64(r.baseDelay.Nanoseconds()) * math.Pow(2, float64(exp))
if backoff > math.MaxInt64 {
return r.maxDelay
}
calculated := time.Duration(backoff)
if calculated > r.maxDelay {
return r.maxDelay
}
return calculated
}
func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
return r.failures[item]
}
func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
delete(r.failures, item)
}
// ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that
type ItemFastSlowRateLimiter struct {
failuresLock sync.Mutex
failures map[interface{}]int
maxFastAttempts int
fastDelay time.Duration
slowDelay time.Duration
}
var _ RateLimiter = &ItemFastSlowRateLimiter{}
func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter {
return &ItemFastSlowRateLimiter{
failures: map[interface{}]int{},
fastDelay: fastDelay,
slowDelay: slowDelay,
maxFastAttempts: maxFastAttempts,
}
}
func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
r.failures[item] = r.failures[item] + 1
if r.failures[item] <= r.maxFastAttempts {
return r.fastDelay
}
return r.slowDelay
}
func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
return r.failures[item]
}
func (r *ItemFastSlowRateLimiter) Forget(item interface{}) {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
delete(r.failures, item)
}
// MaxOfRateLimiter calls every RateLimiter and returns the worst case response
// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items
// were separately delayed a longer time.
type MaxOfRateLimiter struct {
limiters []RateLimiter
}
func (r *MaxOfRateLimiter) When(item interface{}) time.Duration {
ret := time.Duration(0)
for _, limiter := range r.limiters {
curr := limiter.When(item)
if curr > ret {
ret = curr
}
}
return ret
}
func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter {
return &MaxOfRateLimiter{limiters: limiters}
}
func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int {
ret := 0
for _, limiter := range r.limiters {
curr := limiter.NumRequeues(item)
if curr > ret {
ret = curr
}
}
return ret
}
func (r *MaxOfRateLimiter) Forget(item interface{}) {
for _, limiter := range r.limiters {
limiter.Forget(item)
}
}
// WithMaxWaitRateLimiter have maxDelay which avoids waiting too long
type WithMaxWaitRateLimiter struct {
limiter RateLimiter
maxDelay time.Duration
}
func NewWithMaxWaitRateLimiter(limiter RateLimiter, maxDelay time.Duration) RateLimiter {
return &WithMaxWaitRateLimiter{limiter: limiter, maxDelay: maxDelay}
}
func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration {
delay := w.limiter.When(item)
if delay > w.maxDelay {
return w.maxDelay
}
return delay
}
func (w WithMaxWaitRateLimiter) Forget(item interface{}) {
w.limiter.Forget(item)
}
func (w WithMaxWaitRateLimiter) NumRequeues(item interface{}) int {
return w.limiter.NumRequeues(item)
}

View File

@@ -0,0 +1,282 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"container/heap"
"sync"
"time"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/utils/clock"
)
// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
// requeue items after failures without ending up in a hot-loop.
type DelayingInterface interface {
Interface
// AddAfter adds an item to the workqueue after the indicated duration has passed
AddAfter(item interface{}, duration time.Duration)
}
// NewDelayingQueue constructs a new workqueue with delayed queuing ability.
// NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use
// NewNamedDelayingQueue instead.
func NewDelayingQueue() DelayingInterface {
return NewDelayingQueueWithCustomClock(clock.RealClock{}, "")
}
// NewDelayingQueueWithCustomQueue constructs a new workqueue with ability to
// inject custom queue Interface instead of the default one
func NewDelayingQueueWithCustomQueue(q Interface, name string) DelayingInterface {
return newDelayingQueue(clock.RealClock{}, q, name)
}
// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability
func NewNamedDelayingQueue(name string) DelayingInterface {
return NewDelayingQueueWithCustomClock(clock.RealClock{}, name)
}
// NewDelayingQueueWithCustomClock constructs a new named workqueue
// with ability to inject real or fake clock for testing purposes
func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) DelayingInterface {
return newDelayingQueue(clock, NewNamed(name), name)
}
func newDelayingQueue(clock clock.WithTicker, q Interface, name string) *delayingType {
ret := &delayingType{
Interface: q,
clock: clock,
heartbeat: clock.NewTicker(maxWait),
stopCh: make(chan struct{}),
waitingForAddCh: make(chan *waitFor, 1000),
metrics: newRetryMetrics(name),
}
go ret.waitingLoop()
return ret
}
// delayingType wraps an Interface and provides delayed re-enquing
type delayingType struct {
Interface
// clock tracks time for delayed firing
clock clock.Clock
// stopCh lets us signal a shutdown to the waiting loop
stopCh chan struct{}
// stopOnce guarantees we only signal shutdown a single time
stopOnce sync.Once
// heartbeat ensures we wait no more than maxWait before firing
heartbeat clock.Ticker
// waitingForAddCh is a buffered channel that feeds waitingForAdd
waitingForAddCh chan *waitFor
// metrics counts the number of retries
metrics retryMetrics
}
// waitFor holds the data to add and the time it should be added
type waitFor struct {
data t
readyAt time.Time
// index in the priority queue (heap)
index int
}
// waitForPriorityQueue implements a priority queue for waitFor items.
//
// waitForPriorityQueue implements heap.Interface. The item occurring next in
// time (i.e., the item with the smallest readyAt) is at the root (index 0).
// Peek returns this minimum item at index 0. Pop returns the minimum item after
// it has been removed from the queue and placed at index Len()-1 by
// container/heap. Push adds an item at index Len(), and container/heap
// percolates it into the correct location.
type waitForPriorityQueue []*waitFor
func (pq waitForPriorityQueue) Len() int {
return len(pq)
}
func (pq waitForPriorityQueue) Less(i, j int) bool {
return pq[i].readyAt.Before(pq[j].readyAt)
}
func (pq waitForPriorityQueue) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
pq[i].index = i
pq[j].index = j
}
// Push adds an item to the queue. Push should not be called directly; instead,
// use `heap.Push`.
func (pq *waitForPriorityQueue) Push(x interface{}) {
n := len(*pq)
item := x.(*waitFor)
item.index = n
*pq = append(*pq, item)
}
// Pop removes an item from the queue. Pop should not be called directly;
// instead, use `heap.Pop`.
func (pq *waitForPriorityQueue) Pop() interface{} {
n := len(*pq)
item := (*pq)[n-1]
item.index = -1
*pq = (*pq)[0:(n - 1)]
return item
}
// Peek returns the item at the beginning of the queue, without removing the
// item or otherwise mutating the queue. It is safe to call directly.
func (pq waitForPriorityQueue) Peek() interface{} {
return pq[0]
}
// ShutDown stops the queue. After the queue drains, the returned shutdown bool
// on Get() will be true. This method may be invoked more than once.
func (q *delayingType) ShutDown() {
q.stopOnce.Do(func() {
q.Interface.ShutDown()
close(q.stopCh)
q.heartbeat.Stop()
})
}
// AddAfter adds the given item to the work queue after the given delay
func (q *delayingType) AddAfter(item interface{}, duration time.Duration) {
// don't add if we're already shutting down
if q.ShuttingDown() {
return
}
q.metrics.retry()
// immediately add things with no delay
if duration <= 0 {
q.Add(item)
return
}
select {
case <-q.stopCh:
// unblock if ShutDown() is called
case q.waitingForAddCh <- &waitFor{data: item, readyAt: q.clock.Now().Add(duration)}:
}
}
// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening.
// Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an
// expired item sitting for more than 10 seconds.
const maxWait = 10 * time.Second
// waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added.
func (q *delayingType) waitingLoop() {
defer utilruntime.HandleCrash()
// Make a placeholder channel to use when there are no items in our list
never := make(<-chan time.Time)
// Make a timer that expires when the item at the head of the waiting queue is ready
var nextReadyAtTimer clock.Timer
waitingForQueue := &waitForPriorityQueue{}
heap.Init(waitingForQueue)
waitingEntryByData := map[t]*waitFor{}
for {
if q.Interface.ShuttingDown() {
return
}
now := q.clock.Now()
// Add ready entries
for waitingForQueue.Len() > 0 {
entry := waitingForQueue.Peek().(*waitFor)
if entry.readyAt.After(now) {
break
}
entry = heap.Pop(waitingForQueue).(*waitFor)
q.Add(entry.data)
delete(waitingEntryByData, entry.data)
}
// Set up a wait for the first item's readyAt (if one exists)
nextReadyAt := never
if waitingForQueue.Len() > 0 {
if nextReadyAtTimer != nil {
nextReadyAtTimer.Stop()
}
entry := waitingForQueue.Peek().(*waitFor)
nextReadyAtTimer = q.clock.NewTimer(entry.readyAt.Sub(now))
nextReadyAt = nextReadyAtTimer.C()
}
select {
case <-q.stopCh:
return
case <-q.heartbeat.C():
// continue the loop, which will add ready items
case <-nextReadyAt:
// continue the loop, which will add ready items
case waitEntry := <-q.waitingForAddCh:
if waitEntry.readyAt.After(q.clock.Now()) {
insert(waitingForQueue, waitingEntryByData, waitEntry)
} else {
q.Add(waitEntry.data)
}
drained := false
for !drained {
select {
case waitEntry := <-q.waitingForAddCh:
if waitEntry.readyAt.After(q.clock.Now()) {
insert(waitingForQueue, waitingEntryByData, waitEntry)
} else {
q.Add(waitEntry.data)
}
default:
drained = true
}
}
}
}
}
// insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue
func insert(q *waitForPriorityQueue, knownEntries map[t]*waitFor, entry *waitFor) {
// if the entry already exists, update the time only if it would cause the item to be queued sooner
existing, exists := knownEntries[entry.data]
if exists {
if existing.readyAt.After(entry.readyAt) {
existing.readyAt = entry.readyAt
heap.Fix(q, existing.index)
}
return
}
heap.Push(q, entry)
knownEntries[entry.data] = entry
}

26
vendor/k8s.io/client-go/util/workqueue/doc.go generated vendored Normal file
View File

@@ -0,0 +1,26 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package workqueue provides a simple queue that supports the following
// features:
// * Fair: items processed in the order in which they are added.
// * Stingy: a single item will not be processed multiple times concurrently,
// and if an item is added multiple times before it can be processed, it
// will only be processed once.
// * Multiple consumers and producers. In particular, it is allowed for an
// item to be reenqueued while it is being processed.
// * Shutdown notifications.
package workqueue // import "k8s.io/client-go/util/workqueue"

261
vendor/k8s.io/client-go/util/workqueue/metrics.go generated vendored Normal file
View File

@@ -0,0 +1,261 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"sync"
"time"
"k8s.io/utils/clock"
)
// This file provides abstractions for setting the provider (e.g., prometheus)
// of metrics.
type queueMetrics interface {
add(item t)
get(item t)
done(item t)
updateUnfinishedWork()
}
// GaugeMetric represents a single numerical value that can arbitrarily go up
// and down.
type GaugeMetric interface {
Inc()
Dec()
}
// SettableGaugeMetric represents a single numerical value that can arbitrarily go up
// and down. (Separate from GaugeMetric to preserve backwards compatibility.)
type SettableGaugeMetric interface {
Set(float64)
}
// CounterMetric represents a single numerical value that only ever
// goes up.
type CounterMetric interface {
Inc()
}
// SummaryMetric captures individual observations.
type SummaryMetric interface {
Observe(float64)
}
// HistogramMetric counts individual observations.
type HistogramMetric interface {
Observe(float64)
}
type noopMetric struct{}
func (noopMetric) Inc() {}
func (noopMetric) Dec() {}
func (noopMetric) Set(float64) {}
func (noopMetric) Observe(float64) {}
// defaultQueueMetrics expects the caller to lock before setting any metrics.
type defaultQueueMetrics struct {
clock clock.Clock
// current depth of a workqueue
depth GaugeMetric
// total number of adds handled by a workqueue
adds CounterMetric
// how long an item stays in a workqueue
latency HistogramMetric
// how long processing an item from a workqueue takes
workDuration HistogramMetric
addTimes map[t]time.Time
processingStartTimes map[t]time.Time
// how long have current threads been working?
unfinishedWorkSeconds SettableGaugeMetric
longestRunningProcessor SettableGaugeMetric
}
func (m *defaultQueueMetrics) add(item t) {
if m == nil {
return
}
m.adds.Inc()
m.depth.Inc()
if _, exists := m.addTimes[item]; !exists {
m.addTimes[item] = m.clock.Now()
}
}
func (m *defaultQueueMetrics) get(item t) {
if m == nil {
return
}
m.depth.Dec()
m.processingStartTimes[item] = m.clock.Now()
if startTime, exists := m.addTimes[item]; exists {
m.latency.Observe(m.sinceInSeconds(startTime))
delete(m.addTimes, item)
}
}
func (m *defaultQueueMetrics) done(item t) {
if m == nil {
return
}
if startTime, exists := m.processingStartTimes[item]; exists {
m.workDuration.Observe(m.sinceInSeconds(startTime))
delete(m.processingStartTimes, item)
}
}
func (m *defaultQueueMetrics) updateUnfinishedWork() {
// Note that a summary metric would be better for this, but prometheus
// doesn't seem to have non-hacky ways to reset the summary metrics.
var total float64
var oldest float64
for _, t := range m.processingStartTimes {
age := m.sinceInSeconds(t)
total += age
if age > oldest {
oldest = age
}
}
m.unfinishedWorkSeconds.Set(total)
m.longestRunningProcessor.Set(oldest)
}
type noMetrics struct{}
func (noMetrics) add(item t) {}
func (noMetrics) get(item t) {}
func (noMetrics) done(item t) {}
func (noMetrics) updateUnfinishedWork() {}
// Gets the time since the specified start in seconds.
func (m *defaultQueueMetrics) sinceInSeconds(start time.Time) float64 {
return m.clock.Since(start).Seconds()
}
type retryMetrics interface {
retry()
}
type defaultRetryMetrics struct {
retries CounterMetric
}
func (m *defaultRetryMetrics) retry() {
if m == nil {
return
}
m.retries.Inc()
}
// MetricsProvider generates various metrics used by the queue.
type MetricsProvider interface {
NewDepthMetric(name string) GaugeMetric
NewAddsMetric(name string) CounterMetric
NewLatencyMetric(name string) HistogramMetric
NewWorkDurationMetric(name string) HistogramMetric
NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric
NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric
NewRetriesMetric(name string) CounterMetric
}
type noopMetricsProvider struct{}
func (_ noopMetricsProvider) NewDepthMetric(name string) GaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewLatencyMetric(name string) HistogramMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewWorkDurationMetric(name string) HistogramMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric {
return noopMetric{}
}
var globalMetricsFactory = queueMetricsFactory{
metricsProvider: noopMetricsProvider{},
}
type queueMetricsFactory struct {
metricsProvider MetricsProvider
onlyOnce sync.Once
}
func (f *queueMetricsFactory) setProvider(mp MetricsProvider) {
f.onlyOnce.Do(func() {
f.metricsProvider = mp
})
}
func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) queueMetrics {
mp := f.metricsProvider
if len(name) == 0 || mp == (noopMetricsProvider{}) {
return noMetrics{}
}
return &defaultQueueMetrics{
clock: clock,
depth: mp.NewDepthMetric(name),
adds: mp.NewAddsMetric(name),
latency: mp.NewLatencyMetric(name),
workDuration: mp.NewWorkDurationMetric(name),
unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name),
longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name),
addTimes: map[t]time.Time{},
processingStartTimes: map[t]time.Time{},
}
}
func newRetryMetrics(name string) retryMetrics {
var ret *defaultRetryMetrics
if len(name) == 0 {
return ret
}
return &defaultRetryMetrics{
retries: globalMetricsFactory.metricsProvider.NewRetriesMetric(name),
}
}
// SetProvider sets the metrics provider for all subsequently created work
// queues. Only the first call has an effect.
func SetProvider(metricsProvider MetricsProvider) {
globalMetricsFactory.setProvider(metricsProvider)
}

101
vendor/k8s.io/client-go/util/workqueue/parallelizer.go generated vendored Normal file
View File

@@ -0,0 +1,101 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"context"
"sync"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
type DoWorkPieceFunc func(piece int)
type options struct {
chunkSize int
}
type Options func(*options)
// WithChunkSize allows to set chunks of work items to the workers, rather than
// processing one by one.
// It is recommended to use this option if the number of pieces significantly
// higher than the number of workers and the work done for each item is small.
func WithChunkSize(c int) func(*options) {
return func(o *options) {
o.chunkSize = c
}
}
// ParallelizeUntil is a framework that allows for parallelizing N
// independent pieces of work until done or the context is canceled.
func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc, opts ...Options) {
if pieces == 0 {
return
}
o := options{}
for _, opt := range opts {
opt(&o)
}
chunkSize := o.chunkSize
if chunkSize < 1 {
chunkSize = 1
}
chunks := ceilDiv(pieces, chunkSize)
toProcess := make(chan int, chunks)
for i := 0; i < chunks; i++ {
toProcess <- i
}
close(toProcess)
var stop <-chan struct{}
if ctx != nil {
stop = ctx.Done()
}
if chunks < workers {
workers = chunks
}
wg := sync.WaitGroup{}
wg.Add(workers)
for i := 0; i < workers; i++ {
go func() {
defer utilruntime.HandleCrash()
defer wg.Done()
for chunk := range toProcess {
start := chunk * chunkSize
end := start + chunkSize
if end > pieces {
end = pieces
}
for p := start; p < end; p++ {
select {
case <-stop:
return
default:
doWorkPiece(p)
}
}
}
}()
}
wg.Wait()
}
func ceilDiv(a, b int) int {
return (a + b - 1) / b
}

285
vendor/k8s.io/client-go/util/workqueue/queue.go generated vendored Normal file
View File

@@ -0,0 +1,285 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"sync"
"time"
"k8s.io/utils/clock"
)
type Interface interface {
Add(item interface{})
Len() int
Get() (item interface{}, shutdown bool)
Done(item interface{})
ShutDown()
ShutDownWithDrain()
ShuttingDown() bool
}
// New constructs a new work queue (see the package comment).
func New() *Type {
return NewNamed("")
}
func NewNamed(name string) *Type {
rc := clock.RealClock{}
return newQueue(
rc,
globalMetricsFactory.newQueueMetrics(name, rc),
defaultUnfinishedWorkUpdatePeriod,
)
}
func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Duration) *Type {
t := &Type{
clock: c,
dirty: set{},
processing: set{},
cond: sync.NewCond(&sync.Mutex{}),
metrics: metrics,
unfinishedWorkUpdatePeriod: updatePeriod,
}
// Don't start the goroutine for a type of noMetrics so we don't consume
// resources unnecessarily
if _, ok := metrics.(noMetrics); !ok {
go t.updateUnfinishedWorkLoop()
}
return t
}
const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond
// Type is a work queue (see the package comment).
type Type struct {
// queue defines the order in which we will work on items. Every
// element of queue should be in the dirty set and not in the
// processing set.
queue []t
// dirty defines all of the items that need to be processed.
dirty set
// Things that are currently being processed are in the processing set.
// These things may be simultaneously in the dirty set. When we finish
// processing something and remove it from this set, we'll check if
// it's in the dirty set, and if so, add it to the queue.
processing set
cond *sync.Cond
shuttingDown bool
drain bool
metrics queueMetrics
unfinishedWorkUpdatePeriod time.Duration
clock clock.WithTicker
}
type empty struct{}
type t interface{}
type set map[t]empty
func (s set) has(item t) bool {
_, exists := s[item]
return exists
}
func (s set) insert(item t) {
s[item] = empty{}
}
func (s set) delete(item t) {
delete(s, item)
}
func (s set) len() int {
return len(s)
}
// Add marks item as needing processing.
func (q *Type) Add(item interface{}) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
if q.shuttingDown {
return
}
if q.dirty.has(item) {
return
}
q.metrics.add(item)
q.dirty.insert(item)
if q.processing.has(item) {
return
}
q.queue = append(q.queue, item)
q.cond.Signal()
}
// Len returns the current queue length, for informational purposes only. You
// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular
// value, that can't be synchronized properly.
func (q *Type) Len() int {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return len(q.queue)
}
// Get blocks until it can return an item to be processed. If shutdown = true,
// the caller should end their goroutine. You must call Done with item when you
// have finished processing it.
func (q *Type) Get() (item interface{}, shutdown bool) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
for len(q.queue) == 0 && !q.shuttingDown {
q.cond.Wait()
}
if len(q.queue) == 0 {
// We must be shutting down.
return nil, true
}
item = q.queue[0]
// The underlying array still exists and reference this object, so the object will not be garbage collected.
q.queue[0] = nil
q.queue = q.queue[1:]
q.metrics.get(item)
q.processing.insert(item)
q.dirty.delete(item)
return item, false
}
// Done marks item as done processing, and if it has been marked as dirty again
// while it was being processed, it will be re-added to the queue for
// re-processing.
func (q *Type) Done(item interface{}) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
q.metrics.done(item)
q.processing.delete(item)
if q.dirty.has(item) {
q.queue = append(q.queue, item)
q.cond.Signal()
} else if q.processing.len() == 0 {
q.cond.Signal()
}
}
// ShutDown will cause q to ignore all new items added to it and
// immediately instruct the worker goroutines to exit.
func (q *Type) ShutDown() {
q.setDrain(false)
q.shutdown()
}
// ShutDownWithDrain will cause q to ignore all new items added to it. As soon
// as the worker goroutines have "drained", i.e: finished processing and called
// Done on all existing items in the queue; they will be instructed to exit and
// ShutDownWithDrain will return. Hence: a strict requirement for using this is;
// your workers must ensure that Done is called on all items in the queue once
// the shut down has been initiated, if that is not the case: this will block
// indefinitely. It is, however, safe to call ShutDown after having called
// ShutDownWithDrain, as to force the queue shut down to terminate immediately
// without waiting for the drainage.
func (q *Type) ShutDownWithDrain() {
q.setDrain(true)
q.shutdown()
for q.isProcessing() && q.shouldDrain() {
q.waitForProcessing()
}
}
// isProcessing indicates if there are still items on the work queue being
// processed. It's used to drain the work queue on an eventual shutdown.
func (q *Type) isProcessing() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return q.processing.len() != 0
}
// waitForProcessing waits for the worker goroutines to finish processing items
// and call Done on them.
func (q *Type) waitForProcessing() {
q.cond.L.Lock()
defer q.cond.L.Unlock()
// Ensure that we do not wait on a queue which is already empty, as that
// could result in waiting for Done to be called on items in an empty queue
// which has already been shut down, which will result in waiting
// indefinitely.
if q.processing.len() == 0 {
return
}
q.cond.Wait()
}
func (q *Type) setDrain(shouldDrain bool) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
q.drain = shouldDrain
}
func (q *Type) shouldDrain() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return q.drain
}
func (q *Type) shutdown() {
q.cond.L.Lock()
defer q.cond.L.Unlock()
q.shuttingDown = true
q.cond.Broadcast()
}
func (q *Type) ShuttingDown() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return q.shuttingDown
}
func (q *Type) updateUnfinishedWorkLoop() {
t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod)
defer t.Stop()
for range t.C() {
if !func() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
if !q.shuttingDown {
q.metrics.updateUnfinishedWork()
return true
}
return false
}() {
return
}
}
}

View File

@@ -0,0 +1,71 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
// RateLimitingInterface is an interface that rate limits items being added to the queue.
type RateLimitingInterface interface {
DelayingInterface
// AddRateLimited adds an item to the workqueue after the rate limiter says it's ok
AddRateLimited(item interface{})
// Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing
// or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you
// still have to call `Done` on the queue.
Forget(item interface{})
// NumRequeues returns back how many times the item was requeued
NumRequeues(item interface{}) int
}
// NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability
// Remember to call Forget! If you don't, you may end up tracking failures forever.
// NewRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use
// NewNamedRateLimitingQueue instead.
func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface {
return &rateLimitingType{
DelayingInterface: NewDelayingQueue(),
rateLimiter: rateLimiter,
}
}
func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface {
return &rateLimitingType{
DelayingInterface: NewNamedDelayingQueue(name),
rateLimiter: rateLimiter,
}
}
// rateLimitingType wraps an Interface and provides rateLimited re-enquing
type rateLimitingType struct {
DelayingInterface
rateLimiter RateLimiter
}
// AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok
func (q *rateLimitingType) AddRateLimited(item interface{}) {
q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item))
}
func (q *rateLimitingType) NumRequeues(item interface{}) int {
return q.rateLimiter.NumRequeues(item)
}
func (q *rateLimitingType) Forget(item interface{}) {
q.rateLimiter.Forget(item)
}