mirror of
https://github.com/TECHNOFAB11/zfs-localpv.git
synced 2025-12-12 14:30:12 +01:00
feat(modules): migrate to go modules and bump go version 1.14.4
- migrate to go module - bump go version 1.14.4 Signed-off-by: prateekpandey14 <prateek.pandey@mayadata.io>
This commit is contained in:
parent
f5ae3ff476
commit
fa76b346a0
837 changed files with 104140 additions and 158314 deletions
10
vendor/k8s.io/client-go/util/connrotation/connrotation.go
generated
vendored
10
vendor/k8s.io/client-go/util/connrotation/connrotation.go
generated
vendored
|
|
@ -77,11 +77,6 @@ func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.
|
|||
|
||||
closable := &closableConn{Conn: conn}
|
||||
|
||||
// Start tracking the connection
|
||||
d.mu.Lock()
|
||||
d.conns[closable] = struct{}{}
|
||||
d.mu.Unlock()
|
||||
|
||||
// When the connection is closed, remove it from the map. This will
|
||||
// be no-op if the connection isn't in the map, e.g. if CloseAll()
|
||||
// is called.
|
||||
|
|
@ -91,6 +86,11 @@ func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.
|
|||
d.mu.Unlock()
|
||||
}
|
||||
|
||||
// Start tracking the connection
|
||||
d.mu.Lock()
|
||||
d.conns[closable] = struct{}{}
|
||||
d.mu.Unlock()
|
||||
|
||||
return closable, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
2
vendor/k8s.io/client-go/util/flowcontrol/backoff.go
generated
vendored
2
vendor/k8s.io/client-go/util/flowcontrol/backoff.go
generated
vendored
|
|
@ -99,7 +99,7 @@ func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
|
|||
if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
|
||||
return false
|
||||
}
|
||||
return p.Clock.Now().Sub(eventTime) < entry.backoff
|
||||
return p.Clock.Since(eventTime) < entry.backoff
|
||||
}
|
||||
|
||||
// Returns True if time since lastupdate is less than the current backoff window.
|
||||
|
|
|
|||
12
vendor/k8s.io/client-go/util/jsonpath/jsonpath.go
generated
vendored
12
vendor/k8s.io/client-go/util/jsonpath/jsonpath.go
generated
vendored
|
|
@ -93,17 +93,17 @@ func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) {
|
|||
|
||||
// encounter an end node, break the current block
|
||||
if j.endRange > 0 && j.endRange <= j.inRange {
|
||||
j.endRange -= 1
|
||||
j.endRange--
|
||||
break
|
||||
}
|
||||
// encounter a range node, start a range loop
|
||||
if j.beginRange > 0 {
|
||||
j.beginRange -= 1
|
||||
j.inRange += 1
|
||||
j.beginRange--
|
||||
j.inRange++
|
||||
for k, value := range results {
|
||||
j.parser.Root.Nodes = nodes[i+1:]
|
||||
if k == len(results)-1 {
|
||||
j.inRange -= 1
|
||||
j.inRange--
|
||||
}
|
||||
nextResults, err := j.FindResults(value.Interface())
|
||||
if err != nil {
|
||||
|
|
@ -213,11 +213,11 @@ func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) (
|
|||
switch node.Name {
|
||||
case "range":
|
||||
j.stack = append(j.stack, j.cur)
|
||||
j.beginRange += 1
|
||||
j.beginRange++
|
||||
results = input
|
||||
case "end":
|
||||
if j.endRange < j.inRange { // inside a loop, break the current block
|
||||
j.endRange += 1
|
||||
j.endRange++
|
||||
break
|
||||
}
|
||||
// the loop is about to end, pop value and continue the following execution
|
||||
|
|
|
|||
14
vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
generated
vendored
14
vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
generated
vendored
|
|
@ -18,6 +18,7 @@ package workqueue
|
|||
|
||||
import (
|
||||
"container/heap"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
|
|
@ -66,6 +67,8 @@ type delayingType struct {
|
|||
|
||||
// stopCh lets us signal a shutdown to the waiting loop
|
||||
stopCh chan struct{}
|
||||
// stopOnce guarantees we only signal shutdown a single time
|
||||
stopOnce sync.Once
|
||||
|
||||
// heartbeat ensures we wait no more than maxWait before firing
|
||||
heartbeat clock.Ticker
|
||||
|
|
@ -133,11 +136,14 @@ func (pq waitForPriorityQueue) Peek() interface{} {
|
|||
return pq[0]
|
||||
}
|
||||
|
||||
// ShutDown gives a way to shut off this queue
|
||||
// ShutDown stops the queue. After the queue drains, the returned shutdown bool
|
||||
// on Get() will be true. This method may be invoked more than once.
|
||||
func (q *delayingType) ShutDown() {
|
||||
q.Interface.ShutDown()
|
||||
close(q.stopCh)
|
||||
q.heartbeat.Stop()
|
||||
q.stopOnce.Do(func() {
|
||||
q.Interface.ShutDown()
|
||||
close(q.stopCh)
|
||||
q.heartbeat.Stop()
|
||||
})
|
||||
}
|
||||
|
||||
// AddAfter adds the given item to the work queue after the given delay
|
||||
|
|
|
|||
8
vendor/k8s.io/client-go/util/workqueue/parallelizer.go
generated
vendored
8
vendor/k8s.io/client-go/util/workqueue/parallelizer.go
generated
vendored
|
|
@ -25,14 +25,6 @@ import (
|
|||
|
||||
type DoWorkPieceFunc func(piece int)
|
||||
|
||||
// Parallelize is a very simple framework that allows for parallelizing
|
||||
// N independent pieces of work.
|
||||
//
|
||||
// Deprecated: Use ParallelizeUntil instead.
|
||||
func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) {
|
||||
ParallelizeUntil(nil, workers, pieces, doWorkPiece)
|
||||
}
|
||||
|
||||
// ParallelizeUntil is a framework that allows for parallelizing N
|
||||
// independent pieces of work until done or the context is canceled.
|
||||
func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc) {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue