mirror of
https://github.com/TECHNOFAB11/zfs-localpv.git
synced 2025-12-12 14:30:12 +01:00
refact(deps): bump k8s and client-go deps to version v0.20.2 (#294)
Signed-off-by: prateekpandey14 <prateek.pandey@mayadata.io>
This commit is contained in:
parent
533e17a9aa
commit
b1aa6ab51a
2196 changed files with 306727 additions and 251810 deletions
6
vendor/k8s.io/client-go/util/cert/cert.go
generated
vendored
6
vendor/k8s.io/client-go/util/cert/cert.go
generated
vendored
|
|
@ -28,7 +28,7 @@ import (
|
|||
"io/ioutil"
|
||||
"math/big"
|
||||
"net"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
|
@ -96,8 +96,8 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a
|
|||
maxAge := time.Hour * 24 * 365 // one year self-signed certs
|
||||
|
||||
baseName := fmt.Sprintf("%s_%s_%s", host, strings.Join(ipsToStrings(alternateIPs), "-"), strings.Join(alternateDNS, "-"))
|
||||
certFixturePath := path.Join(fixtureDirectory, baseName+".crt")
|
||||
keyFixturePath := path.Join(fixtureDirectory, baseName+".key")
|
||||
certFixturePath := filepath.Join(fixtureDirectory, baseName+".crt")
|
||||
keyFixturePath := filepath.Join(fixtureDirectory, baseName+".key")
|
||||
if len(fixtureDirectory) > 0 {
|
||||
cert, err := ioutil.ReadFile(certFixturePath)
|
||||
if err == nil {
|
||||
|
|
|
|||
17
vendor/k8s.io/client-go/util/cert/io.go
generated
vendored
17
vendor/k8s.io/client-go/util/cert/io.go
generated
vendored
|
|
@ -72,7 +72,22 @@ func WriteCert(certPath string, data []byte) error {
|
|||
// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file.
|
||||
// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
|
||||
func NewPool(filename string) (*x509.CertPool, error) {
|
||||
certs, err := CertsFromFile(filename)
|
||||
pemBlock, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pool, err := NewPoolFromBytes(pemBlock)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating pool from %s: %s", filename, err)
|
||||
}
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// NewPoolFromBytes returns an x509.CertPool containing the certificates in the given PEM-encoded bytes.
|
||||
// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
|
||||
func NewPoolFromBytes(pemBlock []byte) (*x509.CertPool, error) {
|
||||
certs, err := ParseCertsPEM(pemBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
12
vendor/k8s.io/client-go/util/cert/pem.go
generated
vendored
12
vendor/k8s.io/client-go/util/cert/pem.go
generated
vendored
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package cert
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
|
|
@ -59,3 +60,14 @@ func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) {
|
|||
}
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
// EncodeCertificates returns the PEM-encoded byte array that represents by the specified certs.
|
||||
func EncodeCertificates(certs ...*x509.Certificate) ([]byte, error) {
|
||||
b := bytes.Buffer{}
|
||||
for _, cert := range certs {
|
||||
if err := pem.Encode(&b, &pem.Block{Type: CertificateBlockType, Bytes: cert.Raw}); err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
|
|
|||
102
vendor/k8s.io/client-go/util/cert/server_inspection.go
generated
vendored
Normal file
102
vendor/k8s.io/client-go/util/cert/server_inspection.go
generated
vendored
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cert
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GetClientCANames gets the CA names for client certs that a server accepts. This is useful when inspecting the
|
||||
// state of particular servers. apiHost is "host:port"
|
||||
func GetClientCANames(apiHost string) ([]string, error) {
|
||||
// when we run this the second time, we know which one we are expecting
|
||||
acceptableCAs := []string{}
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: true, // this is insecure to always get to the GetClientCertificate
|
||||
GetClientCertificate: func(hello *tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||
acceptableCAs = []string{}
|
||||
for _, curr := range hello.AcceptableCAs {
|
||||
acceptableCAs = append(acceptableCAs, string(curr))
|
||||
}
|
||||
return &tls.Certificate{}, nil
|
||||
},
|
||||
}
|
||||
|
||||
conn, err := tls.Dial("tcp", apiHost, tlsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := conn.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return acceptableCAs, nil
|
||||
}
|
||||
|
||||
// GetClientCANamesForURL is GetClientCANames against a URL string like we use in kubeconfigs
|
||||
func GetClientCANamesForURL(kubeConfigURL string) ([]string, error) {
|
||||
apiserverURL, err := url.Parse(kubeConfigURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return GetClientCANames(apiserverURL.Host)
|
||||
}
|
||||
|
||||
// GetServingCertificates returns the x509 certs used by a server as certificates and pem encoded bytes.
|
||||
// The serverName is optional for specifying a different name to get SNI certificates. apiHost is "host:port"
|
||||
func GetServingCertificates(apiHost, serverName string) ([]*x509.Certificate, [][]byte, error) {
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: true, // this is insecure so that we always get connected
|
||||
}
|
||||
// if a name is specified for SNI, set it.
|
||||
if len(serverName) > 0 {
|
||||
tlsConfig.ServerName = serverName
|
||||
}
|
||||
|
||||
conn, err := tls.Dial("tcp", apiHost, tlsConfig)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err = conn.Close(); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to close connection : %v", err)
|
||||
}
|
||||
|
||||
peerCerts := conn.ConnectionState().PeerCertificates
|
||||
peerCertBytes := [][]byte{}
|
||||
for _, a := range peerCerts {
|
||||
actualCert, err := EncodeCertificates(a)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
peerCertBytes = append(peerCertBytes, []byte(strings.TrimSpace(string(actualCert))))
|
||||
}
|
||||
|
||||
return peerCerts, peerCertBytes, err
|
||||
}
|
||||
|
||||
// GetServingCertificatesForURL is GetServingCertificates against a URL string like we use in kubeconfigs
|
||||
func GetServingCertificatesForURL(kubeConfigURL, serverName string) ([]*x509.Certificate, [][]byte, error) {
|
||||
apiserverURL, err := url.Parse(kubeConfigURL)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return GetServingCertificates(apiserverURL.Host, serverName)
|
||||
}
|
||||
14
vendor/k8s.io/client-go/util/flowcontrol/backoff.go
generated
vendored
14
vendor/k8s.io/client-go/util/flowcontrol/backoff.go
generated
vendored
|
|
@ -30,7 +30,7 @@ type backoffEntry struct {
|
|||
}
|
||||
|
||||
type Backoff struct {
|
||||
sync.Mutex
|
||||
sync.RWMutex
|
||||
Clock clock.Clock
|
||||
defaultDuration time.Duration
|
||||
maxDuration time.Duration
|
||||
|
|
@ -57,8 +57,8 @@ func NewBackOff(initial, max time.Duration) *Backoff {
|
|||
|
||||
// Get the current backoff Duration
|
||||
func (p *Backoff) Get(id string) time.Duration {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
var delay time.Duration
|
||||
entry, ok := p.perItemBackoff[id]
|
||||
if ok {
|
||||
|
|
@ -90,8 +90,8 @@ func (p *Backoff) Reset(id string) {
|
|||
|
||||
// Returns True if the elapsed time since eventTime is smaller than the current backoff window
|
||||
func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
entry, ok := p.perItemBackoff[id]
|
||||
if !ok {
|
||||
return false
|
||||
|
|
@ -104,8 +104,8 @@ func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
|
|||
|
||||
// Returns True if time since lastupdate is less than the current backoff window.
|
||||
func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
entry, ok := p.perItemBackoff[id]
|
||||
if !ok {
|
||||
return false
|
||||
|
|
|
|||
16
vendor/k8s.io/client-go/util/flowcontrol/throttle.go
generated
vendored
16
vendor/k8s.io/client-go/util/flowcontrol/throttle.go
generated
vendored
|
|
@ -17,6 +17,8 @@ limitations under the License.
|
|||
package flowcontrol
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
|
@ -33,6 +35,8 @@ type RateLimiter interface {
|
|||
Stop()
|
||||
// QPS returns QPS of this rate limiter
|
||||
QPS() float32
|
||||
// Wait returns nil if a token is taken before the Context is done.
|
||||
Wait(ctx context.Context) error
|
||||
}
|
||||
|
||||
type tokenBucketRateLimiter struct {
|
||||
|
|
@ -98,6 +102,10 @@ func (t *tokenBucketRateLimiter) QPS() float32 {
|
|||
return t.qps
|
||||
}
|
||||
|
||||
func (t *tokenBucketRateLimiter) Wait(ctx context.Context) error {
|
||||
return t.limiter.Wait(ctx)
|
||||
}
|
||||
|
||||
type fakeAlwaysRateLimiter struct{}
|
||||
|
||||
func NewFakeAlwaysRateLimiter() RateLimiter {
|
||||
|
|
@ -116,6 +124,10 @@ func (t *fakeAlwaysRateLimiter) QPS() float32 {
|
|||
return 1
|
||||
}
|
||||
|
||||
func (t *fakeAlwaysRateLimiter) Wait(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeNeverRateLimiter struct {
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
|
@ -141,3 +153,7 @@ func (t *fakeNeverRateLimiter) Accept() {
|
|||
func (t *fakeNeverRateLimiter) QPS() float32 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (t *fakeNeverRateLimiter) Wait(ctx context.Context) error {
|
||||
return errors.New("can not be accept")
|
||||
}
|
||||
|
|
|
|||
75
vendor/k8s.io/client-go/util/homedir/homedir.go
generated
vendored
75
vendor/k8s.io/client-go/util/homedir/homedir.go
generated
vendored
|
|
@ -18,30 +18,75 @@ package homedir
|
|||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// HomeDir returns the home directory for the current user
|
||||
// HomeDir returns the home directory for the current user.
|
||||
// On Windows:
|
||||
// 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned.
|
||||
// 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned.
|
||||
// 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned.
|
||||
// 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned.
|
||||
func HomeDir() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
|
||||
// First prefer the HOME environmental variable
|
||||
if home := os.Getenv("HOME"); len(home) > 0 {
|
||||
if _, err := os.Stat(home); err == nil {
|
||||
return home
|
||||
}
|
||||
}
|
||||
home := os.Getenv("HOME")
|
||||
homeDriveHomePath := ""
|
||||
if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 {
|
||||
homeDir := homeDrive + homePath
|
||||
if _, err := os.Stat(homeDir); err == nil {
|
||||
return homeDir
|
||||
homeDriveHomePath = homeDrive + homePath
|
||||
}
|
||||
userProfile := os.Getenv("USERPROFILE")
|
||||
|
||||
// Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file.
|
||||
// %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility.
|
||||
for _, p := range []string{home, homeDriveHomePath, userProfile} {
|
||||
if len(p) == 0 {
|
||||
continue
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil {
|
||||
continue
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
firstSetPath := ""
|
||||
firstExistingPath := ""
|
||||
|
||||
// Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools
|
||||
for _, p := range []string{home, userProfile, homeDriveHomePath} {
|
||||
if len(p) == 0 {
|
||||
continue
|
||||
}
|
||||
if len(firstSetPath) == 0 {
|
||||
// remember the first path that is set
|
||||
firstSetPath = p
|
||||
}
|
||||
info, err := os.Stat(p)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if len(firstExistingPath) == 0 {
|
||||
// remember the first path that exists
|
||||
firstExistingPath = p
|
||||
}
|
||||
if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 {
|
||||
// return first path that is writeable
|
||||
return p
|
||||
}
|
||||
}
|
||||
if userProfile := os.Getenv("USERPROFILE"); len(userProfile) > 0 {
|
||||
if _, err := os.Stat(userProfile); err == nil {
|
||||
return userProfile
|
||||
}
|
||||
|
||||
// If none are writeable, return first location that exists
|
||||
if len(firstExistingPath) > 0 {
|
||||
return firstExistingPath
|
||||
}
|
||||
|
||||
// If none exist, return first location that is set
|
||||
if len(firstSetPath) > 0 {
|
||||
return firstSetPath
|
||||
}
|
||||
|
||||
// We've got nothing
|
||||
return ""
|
||||
}
|
||||
return os.Getenv("HOME")
|
||||
}
|
||||
|
|
|
|||
92
vendor/k8s.io/client-go/util/jsonpath/jsonpath.go
generated
vendored
92
vendor/k8s.io/client-go/util/jsonpath/jsonpath.go
generated
vendored
|
|
@ -18,6 +18,7 @@ package jsonpath
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
|
|
@ -29,13 +30,14 @@ import (
|
|||
type JSONPath struct {
|
||||
name string
|
||||
parser *Parser
|
||||
stack [][]reflect.Value // push and pop values in different scopes
|
||||
cur []reflect.Value // current scope values
|
||||
beginRange int
|
||||
inRange int
|
||||
endRange int
|
||||
|
||||
lastEndNode *Node
|
||||
|
||||
allowMissingKeys bool
|
||||
outputJSON bool
|
||||
}
|
||||
|
||||
// New creates a new JSONPath with the given name.
|
||||
|
|
@ -81,12 +83,12 @@ func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) {
|
|||
return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name)
|
||||
}
|
||||
|
||||
j.cur = []reflect.Value{reflect.ValueOf(data)}
|
||||
cur := []reflect.Value{reflect.ValueOf(data)}
|
||||
nodes := j.parser.Root.Nodes
|
||||
fullResult := [][]reflect.Value{}
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
node := nodes[i]
|
||||
results, err := j.walk(j.cur, node)
|
||||
results, err := j.walk(cur, node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -94,34 +96,90 @@ func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) {
|
|||
// encounter an end node, break the current block
|
||||
if j.endRange > 0 && j.endRange <= j.inRange {
|
||||
j.endRange--
|
||||
j.lastEndNode = &nodes[i]
|
||||
break
|
||||
}
|
||||
// encounter a range node, start a range loop
|
||||
if j.beginRange > 0 {
|
||||
j.beginRange--
|
||||
j.inRange++
|
||||
for k, value := range results {
|
||||
j.parser.Root.Nodes = nodes[i+1:]
|
||||
if k == len(results)-1 {
|
||||
j.inRange--
|
||||
if len(results) > 0 {
|
||||
for _, value := range results {
|
||||
j.parser.Root.Nodes = nodes[i+1:]
|
||||
nextResults, err := j.FindResults(value.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullResult = append(fullResult, nextResults...)
|
||||
}
|
||||
nextResults, err := j.FindResults(value.Interface())
|
||||
} else {
|
||||
// If the range has no results, we still need to process the nodes within the range
|
||||
// so the position will advance to the end node
|
||||
j.parser.Root.Nodes = nodes[i+1:]
|
||||
_, err := j.FindResults(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullResult = append(fullResult, nextResults...)
|
||||
}
|
||||
break
|
||||
j.inRange--
|
||||
|
||||
// Fast forward to resume processing after the most recent end node that was encountered
|
||||
for k := i + 1; k < len(nodes); k++ {
|
||||
if &nodes[k] == j.lastEndNode {
|
||||
i = k
|
||||
break
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
fullResult = append(fullResult, results)
|
||||
}
|
||||
return fullResult, nil
|
||||
}
|
||||
|
||||
// EnableJSONOutput changes the PrintResults behavior to return a JSON array of results
|
||||
func (j *JSONPath) EnableJSONOutput(v bool) {
|
||||
j.outputJSON = v
|
||||
}
|
||||
|
||||
// PrintResults writes the results into writer
|
||||
func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error {
|
||||
if j.outputJSON {
|
||||
// convert the []reflect.Value to something that json
|
||||
// will be able to marshal
|
||||
r := make([]interface{}, 0, len(results))
|
||||
for i := range results {
|
||||
r = append(r, results[i].Interface())
|
||||
}
|
||||
results = []reflect.Value{reflect.ValueOf(r)}
|
||||
}
|
||||
for i, r := range results {
|
||||
text, err := j.evalToText(r)
|
||||
var text []byte
|
||||
var err error
|
||||
outputJSON := true
|
||||
kind := r.Kind()
|
||||
if kind == reflect.Interface {
|
||||
kind = r.Elem().Kind()
|
||||
}
|
||||
switch kind {
|
||||
case reflect.Map:
|
||||
case reflect.Array:
|
||||
case reflect.Slice:
|
||||
case reflect.Struct:
|
||||
default:
|
||||
outputJSON = false
|
||||
}
|
||||
switch {
|
||||
case outputJSON || j.outputJSON:
|
||||
if j.outputJSON {
|
||||
text, err = json.MarshalIndent(r.Interface(), "", " ")
|
||||
text = append(text, '\n')
|
||||
} else {
|
||||
text, err = json.Marshal(r.Interface())
|
||||
}
|
||||
default:
|
||||
text, err = j.evalToText(r)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -132,7 +190,9 @@ func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// walk visits tree rooted at the given node in DFS order
|
||||
|
|
@ -212,17 +272,11 @@ func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) (
|
|||
results := []reflect.Value{}
|
||||
switch node.Name {
|
||||
case "range":
|
||||
j.stack = append(j.stack, j.cur)
|
||||
j.beginRange++
|
||||
results = input
|
||||
case "end":
|
||||
if j.endRange < j.inRange { // inside a loop, break the current block
|
||||
if j.inRange > 0 {
|
||||
j.endRange++
|
||||
break
|
||||
}
|
||||
// the loop is about to end, pop value and continue the following execution
|
||||
if len(j.stack) > 0 {
|
||||
j.cur, j.stack = j.stack[len(j.stack)-1], j.stack[:len(j.stack)-1]
|
||||
} else {
|
||||
return results, fmt.Errorf("not in range, nothing to end")
|
||||
}
|
||||
|
|
|
|||
11
vendor/k8s.io/client-go/util/jsonpath/parser.go
generated
vendored
11
vendor/k8s.io/client-go/util/jsonpath/parser.go
generated
vendored
|
|
@ -37,7 +37,6 @@ type Parser struct {
|
|||
Name string
|
||||
Root *ListNode
|
||||
input string
|
||||
cur *ListNode
|
||||
pos int
|
||||
start int
|
||||
width int
|
||||
|
|
@ -186,8 +185,7 @@ func (p *Parser) parseInsideAction(cur *ListNode) error {
|
|||
func (p *Parser) parseRightDelim(cur *ListNode) error {
|
||||
p.pos += len(rightDelim)
|
||||
p.consumeText()
|
||||
cur = p.Root
|
||||
return p.parseText(cur)
|
||||
return p.parseText(p.Root)
|
||||
}
|
||||
|
||||
// parseIdentifier scans build-in keywords, like "range" "end"
|
||||
|
|
@ -216,8 +214,11 @@ func (p *Parser) parseIdentifier(cur *ListNode) error {
|
|||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// parseRecursive scans the recursive desent operator ..
|
||||
// parseRecursive scans the recursive descent operator ..
|
||||
func (p *Parser) parseRecursive(cur *ListNode) error {
|
||||
if lastIndex := len(cur.Nodes) - 1; lastIndex >= 0 && cur.Nodes[lastIndex].Type() == NodeRecursive {
|
||||
return fmt.Errorf("invalid multiple recursive descent")
|
||||
}
|
||||
p.pos += len("..")
|
||||
p.consumeText()
|
||||
cur.append(newRecursive())
|
||||
|
|
@ -231,7 +232,7 @@ func (p *Parser) parseRecursive(cur *ListNode) error {
|
|||
func (p *Parser) parseNumber(cur *ListNode) error {
|
||||
r := p.peek()
|
||||
if r == '+' || r == '-' {
|
||||
r = p.next()
|
||||
p.next()
|
||||
}
|
||||
for {
|
||||
r = p.next()
|
||||
|
|
|
|||
4
vendor/k8s.io/client-go/util/retry/OWNERS
generated
vendored
4
vendor/k8s.io/client-go/util/retry/OWNERS
generated
vendored
|
|
@ -1,4 +0,0 @@
|
|||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- caesarxuchao
|
||||
79
vendor/k8s.io/client-go/util/retry/util.go
generated
vendored
79
vendor/k8s.io/client-go/util/retry/util.go
generated
vendored
|
|
@ -1,79 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package retry
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
// DefaultRetry is the recommended retry for a conflict where multiple clients
|
||||
// are making changes to the same resource.
|
||||
var DefaultRetry = wait.Backoff{
|
||||
Steps: 5,
|
||||
Duration: 10 * time.Millisecond,
|
||||
Factor: 1.0,
|
||||
Jitter: 0.1,
|
||||
}
|
||||
|
||||
// DefaultBackoff is the recommended backoff for a conflict where a client
|
||||
// may be attempting to make an unrelated modification to a resource under
|
||||
// active management by one or more controllers.
|
||||
var DefaultBackoff = wait.Backoff{
|
||||
Steps: 4,
|
||||
Duration: 10 * time.Millisecond,
|
||||
Factor: 5.0,
|
||||
Jitter: 0.1,
|
||||
}
|
||||
|
||||
// RetryConflict executes the provided function repeatedly, retrying if the server returns a conflicting
|
||||
// write. Callers should preserve previous executions if they wish to retry changes. It performs an
|
||||
// exponential backoff.
|
||||
//
|
||||
// var pod *api.Pod
|
||||
// err := RetryOnConflict(DefaultBackoff, func() (err error) {
|
||||
// pod, err = c.Pods("mynamespace").UpdateStatus(podStatus)
|
||||
// return
|
||||
// })
|
||||
// if err != nil {
|
||||
// // may be conflict if max retries were hit
|
||||
// return err
|
||||
// }
|
||||
// ...
|
||||
//
|
||||
// TODO: Make Backoff an interface?
|
||||
func RetryOnConflict(backoff wait.Backoff, fn func() error) error {
|
||||
var lastConflictErr error
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
err := fn()
|
||||
switch {
|
||||
case err == nil:
|
||||
return true, nil
|
||||
case errors.IsConflict(err):
|
||||
lastConflictErr = err
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = lastConflictErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
46
vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
generated
vendored
46
vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
generated
vendored
|
|
@ -35,26 +35,37 @@ type DelayingInterface interface {
|
|||
|
||||
// NewDelayingQueue constructs a new workqueue with delayed queuing ability
|
||||
func NewDelayingQueue() DelayingInterface {
|
||||
return newDelayingQueue(clock.RealClock{}, "")
|
||||
return NewDelayingQueueWithCustomClock(clock.RealClock{}, "")
|
||||
}
|
||||
|
||||
// NewDelayingQueueWithCustomQueue constructs a new workqueue with ability to
|
||||
// inject custom queue Interface instead of the default one
|
||||
func NewDelayingQueueWithCustomQueue(q Interface, name string) DelayingInterface {
|
||||
return newDelayingQueue(clock.RealClock{}, q, name)
|
||||
}
|
||||
|
||||
// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability
|
||||
func NewNamedDelayingQueue(name string) DelayingInterface {
|
||||
return newDelayingQueue(clock.RealClock{}, name)
|
||||
return NewDelayingQueueWithCustomClock(clock.RealClock{}, name)
|
||||
}
|
||||
|
||||
func newDelayingQueue(clock clock.Clock, name string) DelayingInterface {
|
||||
// NewDelayingQueueWithCustomClock constructs a new named workqueue
|
||||
// with ability to inject real or fake clock for testing purposes
|
||||
func NewDelayingQueueWithCustomClock(clock clock.Clock, name string) DelayingInterface {
|
||||
return newDelayingQueue(clock, NewNamed(name), name)
|
||||
}
|
||||
|
||||
func newDelayingQueue(clock clock.Clock, q Interface, name string) *delayingType {
|
||||
ret := &delayingType{
|
||||
Interface: NewNamed(name),
|
||||
clock: clock,
|
||||
heartbeat: clock.NewTicker(maxWait),
|
||||
stopCh: make(chan struct{}),
|
||||
waitingForAddCh: make(chan *waitFor, 1000),
|
||||
metrics: newRetryMetrics(name),
|
||||
deprecatedMetrics: newDeprecatedRetryMetrics(name),
|
||||
Interface: q,
|
||||
clock: clock,
|
||||
heartbeat: clock.NewTicker(maxWait),
|
||||
stopCh: make(chan struct{}),
|
||||
waitingForAddCh: make(chan *waitFor, 1000),
|
||||
metrics: newRetryMetrics(name),
|
||||
}
|
||||
|
||||
go ret.waitingLoop()
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
|
|
@ -77,8 +88,7 @@ type delayingType struct {
|
|||
waitingForAddCh chan *waitFor
|
||||
|
||||
// metrics counts the number of retries
|
||||
metrics retryMetrics
|
||||
deprecatedMetrics retryMetrics
|
||||
metrics retryMetrics
|
||||
}
|
||||
|
||||
// waitFor holds the data to add and the time it should be added
|
||||
|
|
@ -154,7 +164,6 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) {
|
|||
}
|
||||
|
||||
q.metrics.retry()
|
||||
q.deprecatedMetrics.retry()
|
||||
|
||||
// immediately add things with no delay
|
||||
if duration <= 0 {
|
||||
|
|
@ -181,6 +190,9 @@ func (q *delayingType) waitingLoop() {
|
|||
// Make a placeholder channel to use when there are no items in our list
|
||||
never := make(<-chan time.Time)
|
||||
|
||||
// Make a timer that expires when the item at the head of the waiting queue is ready
|
||||
var nextReadyAtTimer clock.Timer
|
||||
|
||||
waitingForQueue := &waitForPriorityQueue{}
|
||||
heap.Init(waitingForQueue)
|
||||
|
||||
|
|
@ -208,8 +220,12 @@ func (q *delayingType) waitingLoop() {
|
|||
// Set up a wait for the first item's readyAt (if one exists)
|
||||
nextReadyAt := never
|
||||
if waitingForQueue.Len() > 0 {
|
||||
if nextReadyAtTimer != nil {
|
||||
nextReadyAtTimer.Stop()
|
||||
}
|
||||
entry := waitingForQueue.Peek().(*waitFor)
|
||||
nextReadyAt = q.clock.After(entry.readyAt.Sub(now))
|
||||
nextReadyAtTimer = q.clock.NewTimer(entry.readyAt.Sub(now))
|
||||
nextReadyAt = nextReadyAtTimer.C()
|
||||
}
|
||||
|
||||
select {
|
||||
|
|
|
|||
2
vendor/k8s.io/client-go/util/workqueue/doc.go
generated
vendored
2
vendor/k8s.io/client-go/util/workqueue/doc.go
generated
vendored
|
|
@ -23,4 +23,4 @@ limitations under the License.
|
|||
// * Multiple consumers and producers. In particular, it is allowed for an
|
||||
// item to be reenqueued while it is being processed.
|
||||
// * Shutdown notifications.
|
||||
package workqueue
|
||||
package workqueue // import "k8s.io/client-go/util/workqueue"
|
||||
|
|
|
|||
95
vendor/k8s.io/client-go/util/workqueue/metrics.go
generated
vendored
95
vendor/k8s.io/client-go/util/workqueue/metrics.go
generated
vendored
|
|
@ -87,14 +87,6 @@ type defaultQueueMetrics struct {
|
|||
// how long have current threads been working?
|
||||
unfinishedWorkSeconds SettableGaugeMetric
|
||||
longestRunningProcessor SettableGaugeMetric
|
||||
|
||||
// TODO(danielqsj): Remove the following metrics, they are deprecated
|
||||
deprecatedDepth GaugeMetric
|
||||
deprecatedAdds CounterMetric
|
||||
deprecatedLatency SummaryMetric
|
||||
deprecatedWorkDuration SummaryMetric
|
||||
deprecatedUnfinishedWorkSeconds SettableGaugeMetric
|
||||
deprecatedLongestRunningProcessor SettableGaugeMetric
|
||||
}
|
||||
|
||||
func (m *defaultQueueMetrics) add(item t) {
|
||||
|
|
@ -103,9 +95,7 @@ func (m *defaultQueueMetrics) add(item t) {
|
|||
}
|
||||
|
||||
m.adds.Inc()
|
||||
m.deprecatedAdds.Inc()
|
||||
m.depth.Inc()
|
||||
m.deprecatedDepth.Inc()
|
||||
if _, exists := m.addTimes[item]; !exists {
|
||||
m.addTimes[item] = m.clock.Now()
|
||||
}
|
||||
|
|
@ -117,11 +107,9 @@ func (m *defaultQueueMetrics) get(item t) {
|
|||
}
|
||||
|
||||
m.depth.Dec()
|
||||
m.deprecatedDepth.Dec()
|
||||
m.processingStartTimes[item] = m.clock.Now()
|
||||
if startTime, exists := m.addTimes[item]; exists {
|
||||
m.latency.Observe(m.sinceInSeconds(startTime))
|
||||
m.deprecatedLatency.Observe(m.sinceInMicroseconds(startTime))
|
||||
delete(m.addTimes, item)
|
||||
}
|
||||
}
|
||||
|
|
@ -133,7 +121,6 @@ func (m *defaultQueueMetrics) done(item t) {
|
|||
|
||||
if startTime, exists := m.processingStartTimes[item]; exists {
|
||||
m.workDuration.Observe(m.sinceInSeconds(startTime))
|
||||
m.deprecatedWorkDuration.Observe(m.sinceInMicroseconds(startTime))
|
||||
delete(m.processingStartTimes, item)
|
||||
}
|
||||
}
|
||||
|
|
@ -144,18 +131,14 @@ func (m *defaultQueueMetrics) updateUnfinishedWork() {
|
|||
var total float64
|
||||
var oldest float64
|
||||
for _, t := range m.processingStartTimes {
|
||||
age := m.sinceInMicroseconds(t)
|
||||
age := m.sinceInSeconds(t)
|
||||
total += age
|
||||
if age > oldest {
|
||||
oldest = age
|
||||
}
|
||||
}
|
||||
// Convert to seconds; microseconds is unhelpfully granular for this.
|
||||
total /= 1000000
|
||||
m.unfinishedWorkSeconds.Set(total)
|
||||
m.deprecatedUnfinishedWorkSeconds.Set(total)
|
||||
m.longestRunningProcessor.Set(oldest / 1000000)
|
||||
m.deprecatedLongestRunningProcessor.Set(oldest) // in microseconds.
|
||||
m.longestRunningProcessor.Set(oldest)
|
||||
}
|
||||
|
||||
type noMetrics struct{}
|
||||
|
|
@ -165,11 +148,6 @@ func (noMetrics) get(item t) {}
|
|||
func (noMetrics) done(item t) {}
|
||||
func (noMetrics) updateUnfinishedWork() {}
|
||||
|
||||
// Gets the time since the specified start in microseconds.
|
||||
func (m *defaultQueueMetrics) sinceInMicroseconds(start time.Time) float64 {
|
||||
return float64(m.clock.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
|
||||
}
|
||||
|
||||
// Gets the time since the specified start in seconds.
|
||||
func (m *defaultQueueMetrics) sinceInSeconds(start time.Time) float64 {
|
||||
return m.clock.Since(start).Seconds()
|
||||
|
|
@ -200,13 +178,6 @@ type MetricsProvider interface {
|
|||
NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric
|
||||
NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric
|
||||
NewRetriesMetric(name string) CounterMetric
|
||||
NewDeprecatedDepthMetric(name string) GaugeMetric
|
||||
NewDeprecatedAddsMetric(name string) CounterMetric
|
||||
NewDeprecatedLatencyMetric(name string) SummaryMetric
|
||||
NewDeprecatedWorkDurationMetric(name string) SummaryMetric
|
||||
NewDeprecatedUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric
|
||||
NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric
|
||||
NewDeprecatedRetriesMetric(name string) CounterMetric
|
||||
}
|
||||
|
||||
type noopMetricsProvider struct{}
|
||||
|
|
@ -239,34 +210,6 @@ func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric {
|
|||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewDeprecatedDepthMetric(name string) GaugeMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewDeprecatedAddsMetric(name string) CounterMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewDeprecatedLatencyMetric(name string) SummaryMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewDeprecatedWorkDurationMetric(name string) SummaryMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewDeprecatedUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewDeprecatedRetriesMetric(name string) CounterMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
var globalMetricsFactory = queueMetricsFactory{
|
||||
metricsProvider: noopMetricsProvider{},
|
||||
}
|
||||
|
|
@ -289,21 +232,15 @@ func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) qu
|
|||
return noMetrics{}
|
||||
}
|
||||
return &defaultQueueMetrics{
|
||||
clock: clock,
|
||||
depth: mp.NewDepthMetric(name),
|
||||
adds: mp.NewAddsMetric(name),
|
||||
latency: mp.NewLatencyMetric(name),
|
||||
workDuration: mp.NewWorkDurationMetric(name),
|
||||
unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name),
|
||||
longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name),
|
||||
deprecatedDepth: mp.NewDeprecatedDepthMetric(name),
|
||||
deprecatedAdds: mp.NewDeprecatedAddsMetric(name),
|
||||
deprecatedLatency: mp.NewDeprecatedLatencyMetric(name),
|
||||
deprecatedWorkDuration: mp.NewDeprecatedWorkDurationMetric(name),
|
||||
deprecatedUnfinishedWorkSeconds: mp.NewDeprecatedUnfinishedWorkSecondsMetric(name),
|
||||
deprecatedLongestRunningProcessor: mp.NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name),
|
||||
addTimes: map[t]time.Time{},
|
||||
processingStartTimes: map[t]time.Time{},
|
||||
clock: clock,
|
||||
depth: mp.NewDepthMetric(name),
|
||||
adds: mp.NewAddsMetric(name),
|
||||
latency: mp.NewLatencyMetric(name),
|
||||
workDuration: mp.NewWorkDurationMetric(name),
|
||||
unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name),
|
||||
longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name),
|
||||
addTimes: map[t]time.Time{},
|
||||
processingStartTimes: map[t]time.Time{},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -317,16 +254,6 @@ func newRetryMetrics(name string) retryMetrics {
|
|||
}
|
||||
}
|
||||
|
||||
func newDeprecatedRetryMetrics(name string) retryMetrics {
|
||||
var ret *defaultRetryMetrics
|
||||
if len(name) == 0 {
|
||||
return ret
|
||||
}
|
||||
return &defaultRetryMetrics{
|
||||
retries: globalMetricsFactory.metricsProvider.NewDeprecatedRetriesMetric(name),
|
||||
}
|
||||
}
|
||||
|
||||
// SetProvider sets the metrics provider for all subsequently created work
|
||||
// queues. Only the first call has an effect.
|
||||
func SetProvider(metricsProvider MetricsProvider) {
|
||||
|
|
|
|||
68
vendor/k8s.io/client-go/util/workqueue/parallelizer.go
generated
vendored
68
vendor/k8s.io/client-go/util/workqueue/parallelizer.go
generated
vendored
|
|
@ -25,39 +25,77 @@ import (
|
|||
|
||||
type DoWorkPieceFunc func(piece int)
|
||||
|
||||
type options struct {
|
||||
chunkSize int
|
||||
}
|
||||
|
||||
type Options func(*options)
|
||||
|
||||
// WithChunkSize allows to set chunks of work items to the workers, rather than
|
||||
// processing one by one.
|
||||
// It is recommended to use this option if the number of pieces significantly
|
||||
// higher than the number of workers and the work done for each item is small.
|
||||
func WithChunkSize(c int) func(*options) {
|
||||
return func(o *options) {
|
||||
o.chunkSize = c
|
||||
}
|
||||
}
|
||||
|
||||
// ParallelizeUntil is a framework that allows for parallelizing N
|
||||
// independent pieces of work until done or the context is canceled.
|
||||
func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc) {
|
||||
var stop <-chan struct{}
|
||||
if ctx != nil {
|
||||
stop = ctx.Done()
|
||||
func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc, opts ...Options) {
|
||||
if pieces == 0 {
|
||||
return
|
||||
}
|
||||
o := options{}
|
||||
for _, opt := range opts {
|
||||
opt(&o)
|
||||
}
|
||||
chunkSize := o.chunkSize
|
||||
if chunkSize < 1 {
|
||||
chunkSize = 1
|
||||
}
|
||||
|
||||
toProcess := make(chan int, pieces)
|
||||
for i := 0; i < pieces; i++ {
|
||||
chunks := ceilDiv(pieces, chunkSize)
|
||||
toProcess := make(chan int, chunks)
|
||||
for i := 0; i < chunks; i++ {
|
||||
toProcess <- i
|
||||
}
|
||||
close(toProcess)
|
||||
|
||||
if pieces < workers {
|
||||
workers = pieces
|
||||
var stop <-chan struct{}
|
||||
if ctx != nil {
|
||||
stop = ctx.Done()
|
||||
}
|
||||
if chunks < workers {
|
||||
workers = chunks
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(workers)
|
||||
for i := 0; i < workers; i++ {
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer wg.Done()
|
||||
for piece := range toProcess {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
default:
|
||||
doWorkPiece(piece)
|
||||
for chunk := range toProcess {
|
||||
start := chunk * chunkSize
|
||||
end := start + chunkSize
|
||||
if end > pieces {
|
||||
end = pieces
|
||||
}
|
||||
for p := start; p < end; p++ {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
default:
|
||||
doWorkPiece(p)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func ceilDiv(a, b int) int {
|
||||
return (a + b - 1) / b
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue