mirror of
https://github.com/kubernetes/client-go.git
synced 2025-10-21 22:09:33 +00:00
published by bot
(https://github.com/kubernetes/contrib/tree/master/mungegithub) copied from https://github.com/kubernetes/kubernetes.git, branch master, last commit is a2b65f03bf83013d1af05c99b7aa22049ca63de3
This commit is contained in:
@@ -51,20 +51,21 @@ func init() {
|
||||
}
|
||||
|
||||
var fileDescriptorGenerated = []byte{
|
||||
// 236 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x8f, 0xa1, 0x4e, 0x03, 0x41,
|
||||
0x10, 0x86, 0x67, 0x0d, 0x29, 0x27, 0x1b, 0x42, 0x48, 0xc5, 0x5e, 0x53, 0x45, 0x48, 0xd8, 0x09,
|
||||
0xa8, 0x06, 0xc9, 0x1b, 0x80, 0xc4, 0xdd, 0x95, 0x61, 0x99, 0x1c, 0xec, 0x6e, 0x76, 0x67, 0x05,
|
||||
0xae, 0x12, 0x59, 0x89, 0xec, 0xbd, 0x4d, 0x65, 0x25, 0x02, 0xc1, 0x1d, 0x2f, 0x42, 0x72, 0xa5,
|
||||
0x21, 0x21, 0xb8, 0xf9, 0xc4, 0x37, 0xf9, 0xfe, 0xe2, 0xb2, 0x99, 0x27, 0xc3, 0x1e, 0x9b, 0x5c,
|
||||
0x53, 0x74, 0x24, 0x94, 0x30, 0x34, 0x16, 0xab, 0xc0, 0x18, 0x29, 0xf9, 0x1c, 0x17, 0x84, 0x96,
|
||||
0x1c, 0xc5, 0x4a, 0xe8, 0xde, 0x84, 0xe8, 0xc5, 0x8f, 0x67, 0x3b, 0xc7, 0xfc, 0x3a, 0x26, 0x34,
|
||||
0xd6, 0x54, 0x81, 0xcd, 0xde, 0x99, 0x9c, 0x5b, 0x96, 0xc7, 0x5c, 0x9b, 0x85, 0x7f, 0x46, 0xeb,
|
||||
0xad, 0xc7, 0x41, 0xad, 0xf3, 0xc3, 0x40, 0x03, 0x0c, 0xd7, 0xee, 0xe5, 0xe4, 0xe2, 0xff, 0x8c,
|
||||
0x2c, 0xfc, 0x84, 0xec, 0x24, 0x49, 0xfc, 0x5b, 0x31, 0x9b, 0x17, 0xa3, 0x9b, 0x5c, 0x39, 0x61,
|
||||
0x79, 0x19, 0x1f, 0x17, 0x07, 0x49, 0x22, 0x3b, 0x7b, 0xa2, 0xa6, 0xea, 0xf4, 0xf0, 0xf6, 0x87,
|
||||
0xae, 0x8e, 0xde, 0xd6, 0x25, 0xbc, 0xb6, 0x25, 0xac, 0xda, 0x12, 0xd6, 0x6d, 0x09, 0xcb, 0x8f,
|
||||
0x29, 0x5c, 0x9f, 0x6d, 0x3a, 0x0d, 0xdb, 0x4e, 0xc3, 0x7b, 0xa7, 0x61, 0xd9, 0x6b, 0xb5, 0xe9,
|
||||
0xb5, 0xda, 0xf6, 0x5a, 0x7d, 0xf6, 0x5a, 0xad, 0xbe, 0x34, 0xdc, 0x8d, 0xf6, 0x3b, 0xbe, 0x03,
|
||||
0x00, 0x00, 0xff, 0xff, 0x90, 0x1c, 0x7f, 0xff, 0x20, 0x01, 0x00, 0x00,
|
||||
// 245 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0x8f, 0xb1, 0x4e, 0xc3, 0x30,
|
||||
0x10, 0x86, 0xed, 0x05, 0x95, 0x8c, 0x15, 0x42, 0xa8, 0x83, 0x53, 0x75, 0x42, 0x48, 0xd8, 0x12,
|
||||
0x2c, 0x15, 0x23, 0x6f, 0x00, 0x23, 0x9b, 0x13, 0x0e, 0xf7, 0x14, 0x6a, 0x5b, 0xf6, 0x79, 0xe8,
|
||||
0xd6, 0x91, 0xb1, 0x23, 0x63, 0xf3, 0x36, 0x1d, 0x3b, 0x32, 0x30, 0x90, 0xf0, 0x22, 0x48, 0x6e,
|
||||
0x23, 0xa4, 0x6e, 0xf7, 0x0d, 0xdf, 0xe9, 0xfb, 0x8b, 0xbb, 0x66, 0x1e, 0x25, 0x3a, 0xd5, 0xa4,
|
||||
0x0a, 0x82, 0x05, 0x82, 0xa8, 0x7c, 0x63, 0x94, 0xf6, 0xa8, 0x02, 0x44, 0x97, 0x42, 0x0d, 0xca,
|
||||
0x80, 0x85, 0xa0, 0x09, 0x5e, 0xa5, 0x0f, 0x8e, 0xdc, 0x78, 0x76, 0x70, 0xe4, 0xbf, 0x23, 0x7d,
|
||||
0x63, 0xa4, 0xf6, 0x28, 0x07, 0x67, 0x72, 0x6b, 0x90, 0x16, 0xa9, 0x92, 0xb5, 0x5b, 0x2a, 0xe3,
|
||||
0x8c, 0x53, 0x59, 0xad, 0xd2, 0x5b, 0xa6, 0x0c, 0xf9, 0x3a, 0xbc, 0x9c, 0xdc, 0x1f, 0x33, 0xb4,
|
||||
0xc7, 0xa5, 0xae, 0x17, 0x68, 0x21, 0xac, 0x72, 0x48, 0x22, 0x7c, 0x57, 0x68, 0x29, 0x52, 0x38,
|
||||
0xed, 0x98, 0xcd, 0x8b, 0xd1, 0x53, 0xd2, 0x96, 0x90, 0x56, 0xe3, 0xcb, 0xe2, 0x2c, 0x52, 0x40,
|
||||
0x6b, 0xae, 0xf8, 0x94, 0x5f, 0x9f, 0x3f, 0x1f, 0xe9, 0xe1, 0xe2, 0x73, 0x5b, 0xb2, 0x8f, 0xb6,
|
||||
0x64, 0x9b, 0xb6, 0x64, 0xdb, 0xb6, 0x64, 0xeb, 0xef, 0x29, 0x7b, 0xbc, 0xd9, 0x75, 0x82, 0xed,
|
||||
0x3b, 0xc1, 0xbe, 0x3a, 0xc1, 0xd6, 0xbd, 0xe0, 0xbb, 0x5e, 0xf0, 0x7d, 0x2f, 0xf8, 0x4f, 0x2f,
|
||||
0xf8, 0xe6, 0x57, 0xb0, 0x97, 0xd1, 0xb0, 0xe4, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x55, 0x28, 0xd5,
|
||||
0x78, 0x22, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
@@ -21,7 +21,7 @@ syntax = 'proto2';
|
||||
|
||||
package k8s.io.kubernetes.pkg.api.resource;
|
||||
|
||||
import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
|
||||
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "resource";
|
||||
|
501
pkg/third_party/forked/golang/json/fields.go
vendored
501
pkg/third_party/forked/golang/json/fields.go
vendored
@@ -1,501 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package json is forked from the Go standard library to enable us to find the
|
||||
// field of a struct that a given JSON key maps to.
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Finds the patchStrategy and patchMergeKey struct tag fields on a given
|
||||
// struct field given the struct type and the JSON name of the field.
|
||||
// TODO: fix the returned errors to be introspectable.
|
||||
func LookupPatchMetadata(t reflect.Type, jsonField string) (reflect.Type, string, string, error) {
|
||||
if t.Kind() == reflect.Map {
|
||||
return t.Elem(), "", "", nil
|
||||
}
|
||||
if t.Kind() != reflect.Struct {
|
||||
return nil, "", "", fmt.Errorf("merging an object in json but data type is not map or struct, instead is: %s",
|
||||
t.Kind().String())
|
||||
}
|
||||
jf := []byte(jsonField)
|
||||
// Find the field that the JSON library would use.
|
||||
var f *field
|
||||
fields := cachedTypeFields(t)
|
||||
for i := range fields {
|
||||
ff := &fields[i]
|
||||
if bytes.Equal(ff.nameBytes, jf) {
|
||||
f = ff
|
||||
break
|
||||
}
|
||||
// Do case-insensitive comparison.
|
||||
if f == nil && ff.equalFold(ff.nameBytes, jf) {
|
||||
f = ff
|
||||
}
|
||||
}
|
||||
if f != nil {
|
||||
// Find the reflect.Value of the most preferential struct field.
|
||||
tjf := t.Field(f.index[0])
|
||||
// we must navigate down all the anonymously included structs in the chain
|
||||
for i := 1; i < len(f.index); i++ {
|
||||
tjf = tjf.Type.Field(f.index[i])
|
||||
}
|
||||
patchStrategy := tjf.Tag.Get("patchStrategy")
|
||||
patchMergeKey := tjf.Tag.Get("patchMergeKey")
|
||||
return tjf.Type, patchStrategy, patchMergeKey, nil
|
||||
}
|
||||
return nil, "", "", fmt.Errorf("unable to find api field in struct %s for the json field %q", t.Name(), jsonField)
|
||||
}
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
name string
|
||||
nameBytes []byte // []byte(name)
|
||||
equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
|
||||
|
||||
tag bool
|
||||
// index is the sequence of indexes from the containing type fields to this field.
|
||||
// it is a slice because anonymous structs will need multiple navigation steps to correctly
|
||||
// resolve the proper fields
|
||||
index []int
|
||||
typ reflect.Type
|
||||
omitEmpty bool
|
||||
quoted bool
|
||||
}
|
||||
|
||||
func (f field) String() string {
|
||||
return fmt.Sprintf("{name: %s, type: %v, tag: %v, index: %v, omitEmpty: %v, quoted: %v}", f.name, f.typ, f.tag, f.index, f.omitEmpty, f.quoted)
|
||||
}
|
||||
|
||||
func fillField(f field) field {
|
||||
f.nameBytes = []byte(f.name)
|
||||
f.equalFold = foldFunc(f.nameBytes)
|
||||
return f
|
||||
}
|
||||
|
||||
// byName sorts field by name, breaking ties with depth,
|
||||
// then breaking ties with "name came from json tag", then
|
||||
// breaking ties with index sequence.
|
||||
type byName []field
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
|
||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byName) Less(i, j int) bool {
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
}
|
||||
|
||||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that JSON should recognize for the given type.
|
||||
// The algorithm is breadth-first search over the set of structs to include - the top struct
|
||||
// and then any reachable anonymous structs.
|
||||
func typeFields(t reflect.Type) []field {
|
||||
// Anonymous fields to explore at the current level and the next.
|
||||
current := []field{}
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if visited[f.typ] {
|
||||
continue
|
||||
}
|
||||
visited[f.typ] = true
|
||||
|
||||
// Scan f.typ for fields to include.
|
||||
for i := 0; i < f.typ.NumField(); i++ {
|
||||
sf := f.typ.Field(i)
|
||||
if sf.PkgPath != "" { // unexported
|
||||
continue
|
||||
}
|
||||
tag := sf.Tag.Get("json")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
name, opts := parseTag(tag)
|
||||
if !isValidTag(name) {
|
||||
name = ""
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
copy(index, f.index)
|
||||
index[len(f.index)] = i
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||
// Follow pointer.
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := name != ""
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
fields = append(fields, fillField(field{
|
||||
name: name,
|
||||
tag: tagged,
|
||||
index: index,
|
||||
typ: ft,
|
||||
omitEmpty: opts.Contains("omitempty"),
|
||||
quoted: opts.Contains("string"),
|
||||
}))
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so that the annihilation code will see a duplicate.
|
||||
// It only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Record new anonymous struct to explore in next round.
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byName(fields))
|
||||
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
// except that fields with JSON tags are promoted.
|
||||
|
||||
// The fields are sorted in primary order of name, secondary order
|
||||
// of field index length. Loop over names; for each name, delete
|
||||
// hidden fields by choosing the one dominant field that survives.
|
||||
out := fields[:0]
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with the name of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
out = append(out, fi)
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if ok {
|
||||
out = append(out, dominant)
|
||||
}
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(byIndex(fields))
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to
|
||||
// have the same name, to find the single field that dominates the
|
||||
// others using Go's embedding rules, modified by the presence of
|
||||
// JSON tags. If there are multiple top-level fields, the boolean
|
||||
// will be false: This condition is an error in Go and we skip all
|
||||
// the fields.
|
||||
func dominantField(fields []field) (field, bool) {
|
||||
// The fields are sorted in increasing index-length order. The winner
|
||||
// must therefore be one with the shortest index length. Drop all
|
||||
// longer entries, which is easy: just truncate the slice.
|
||||
length := len(fields[0].index)
|
||||
tagged := -1 // Index of first tagged field.
|
||||
for i, f := range fields {
|
||||
if len(f.index) > length {
|
||||
fields = fields[:i]
|
||||
break
|
||||
}
|
||||
if f.tag {
|
||||
if tagged >= 0 {
|
||||
// Multiple tagged fields at the same level: conflict.
|
||||
// Return no field.
|
||||
return field{}, false
|
||||
}
|
||||
tagged = i
|
||||
}
|
||||
}
|
||||
if tagged >= 0 {
|
||||
return fields[tagged], true
|
||||
}
|
||||
// All remaining fields have the same length. If there's more than one,
|
||||
// we have a conflict (two fields named "X" at the same level) and we
|
||||
// return no field.
|
||||
if len(fields) > 1 {
|
||||
return field{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
var fieldCache struct {
|
||||
sync.RWMutex
|
||||
m map[reflect.Type][]field
|
||||
}
|
||||
|
||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||
func cachedTypeFields(t reflect.Type) []field {
|
||||
fieldCache.RLock()
|
||||
f := fieldCache.m[t]
|
||||
fieldCache.RUnlock()
|
||||
if f != nil {
|
||||
return f
|
||||
}
|
||||
|
||||
// Compute fields without lock.
|
||||
// Might duplicate effort but won't hold other computations back.
|
||||
f = typeFields(t)
|
||||
if f == nil {
|
||||
f = []field{}
|
||||
}
|
||||
|
||||
fieldCache.Lock()
|
||||
if fieldCache.m == nil {
|
||||
fieldCache.m = map[reflect.Type][]field{}
|
||||
}
|
||||
fieldCache.m[t] = f
|
||||
fieldCache.Unlock()
|
||||
return f
|
||||
}
|
||||
|
||||
func isValidTag(s string) bool {
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
for _, c := range s {
|
||||
switch {
|
||||
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
|
||||
// Backslash and quote chars are reserved, but
|
||||
// otherwise any punctuation chars are allowed
|
||||
// in a tag name.
|
||||
default:
|
||||
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
const (
|
||||
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
|
||||
kelvin = '\u212a'
|
||||
smallLongEss = '\u017f'
|
||||
)
|
||||
|
||||
// foldFunc returns one of four different case folding equivalence
|
||||
// functions, from most general (and slow) to fastest:
|
||||
//
|
||||
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
|
||||
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
|
||||
// 3) asciiEqualFold, no special, but includes non-letters (including _)
|
||||
// 4) simpleLetterEqualFold, no specials, no non-letters.
|
||||
//
|
||||
// The letters S and K are special because they map to 3 runes, not just 2:
|
||||
// * S maps to s and to U+017F 'ſ' Latin small letter long s
|
||||
// * k maps to K and to U+212A 'K' Kelvin sign
|
||||
// See http://play.golang.org/p/tTxjOc0OGo
|
||||
//
|
||||
// The returned function is specialized for matching against s and
|
||||
// should only be given s. It's not curried for performance reasons.
|
||||
func foldFunc(s []byte) func(s, t []byte) bool {
|
||||
nonLetter := false
|
||||
special := false // special letter
|
||||
for _, b := range s {
|
||||
if b >= utf8.RuneSelf {
|
||||
return bytes.EqualFold
|
||||
}
|
||||
upper := b & caseMask
|
||||
if upper < 'A' || upper > 'Z' {
|
||||
nonLetter = true
|
||||
} else if upper == 'K' || upper == 'S' {
|
||||
// See above for why these letters are special.
|
||||
special = true
|
||||
}
|
||||
}
|
||||
if special {
|
||||
return equalFoldRight
|
||||
}
|
||||
if nonLetter {
|
||||
return asciiEqualFold
|
||||
}
|
||||
return simpleLetterEqualFold
|
||||
}
|
||||
|
||||
// equalFoldRight is a specialization of bytes.EqualFold when s is
|
||||
// known to be all ASCII (including punctuation), but contains an 's',
|
||||
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
|
||||
// See comments on foldFunc.
|
||||
func equalFoldRight(s, t []byte) bool {
|
||||
for _, sb := range s {
|
||||
if len(t) == 0 {
|
||||
return false
|
||||
}
|
||||
tb := t[0]
|
||||
if tb < utf8.RuneSelf {
|
||||
if sb != tb {
|
||||
sbUpper := sb & caseMask
|
||||
if 'A' <= sbUpper && sbUpper <= 'Z' {
|
||||
if sbUpper != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
t = t[1:]
|
||||
continue
|
||||
}
|
||||
// sb is ASCII and t is not. t must be either kelvin
|
||||
// sign or long s; sb must be s, S, k, or K.
|
||||
tr, size := utf8.DecodeRune(t)
|
||||
switch sb {
|
||||
case 's', 'S':
|
||||
if tr != smallLongEss {
|
||||
return false
|
||||
}
|
||||
case 'k', 'K':
|
||||
if tr != kelvin {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
t = t[size:]
|
||||
|
||||
}
|
||||
if len(t) > 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// asciiEqualFold is a specialization of bytes.EqualFold for use when
|
||||
// s is all ASCII (but may contain non-letters) and contains no
|
||||
// special-folding letters.
|
||||
// See comments on foldFunc.
|
||||
func asciiEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, sb := range s {
|
||||
tb := t[i]
|
||||
if sb == tb {
|
||||
continue
|
||||
}
|
||||
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
|
||||
if sb&caseMask != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
|
||||
// use when s is all ASCII letters (no underscores, etc) and also
|
||||
// doesn't contain 'k', 'K', 's', or 'S'.
|
||||
// See comments on foldFunc.
|
||||
func simpleLetterEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, b := range s {
|
||||
if b&caseMask != t[i]&caseMask {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// tagOptions is the string following a comma in a struct field's "json"
|
||||
// tag, or the empty string. It does not include the leading comma.
|
||||
type tagOptions string
|
||||
|
||||
// parseTag splits a struct field's json tag into its name and
|
||||
// comma-separated options.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
if idx := strings.Index(tag, ","); idx != -1 {
|
||||
return tag[:idx], tagOptions(tag[idx+1:])
|
||||
}
|
||||
return tag, tagOptions("")
|
||||
}
|
||||
|
||||
// Contains reports whether a comma-separated list of options
|
||||
// contains a particular substr flag. substr must be surrounded by a
|
||||
// string boundary or commas.
|
||||
func (o tagOptions) Contains(optionName string) bool {
|
||||
if len(o) == 0 {
|
||||
return false
|
||||
}
|
||||
s := string(o)
|
||||
for s != "" {
|
||||
var next string
|
||||
i := strings.Index(s, ",")
|
||||
if i >= 0 {
|
||||
s, next = s[:i], s[i+1:]
|
||||
}
|
||||
if s == optionName {
|
||||
return true
|
||||
}
|
||||
s = next
|
||||
}
|
||||
return false
|
||||
}
|
27
pkg/third_party/forked/golang/netutil/addr.go
vendored
27
pkg/third_party/forked/golang/netutil/addr.go
vendored
@@ -1,27 +0,0 @@
|
||||
package netutil
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FROM: http://golang.org/src/net/http/client.go
|
||||
// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
|
||||
// return true if the string includes a port.
|
||||
func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
|
||||
|
||||
// FROM: http://golang.org/src/net/http/transport.go
|
||||
var portMap = map[string]string{
|
||||
"http": "80",
|
||||
"https": "443",
|
||||
}
|
||||
|
||||
// FROM: http://golang.org/src/net/http/transport.go
|
||||
// canonicalAddr returns url.Host but always with a ":port" suffix
|
||||
func CanonicalAddr(url *url.URL) string {
|
||||
addr := url.Host
|
||||
if !hasPort(addr) {
|
||||
return addr + ":" + portMap[url.Scheme]
|
||||
}
|
||||
return addr
|
||||
}
|
94
pkg/third_party/forked/golang/template/exec.go
vendored
94
pkg/third_party/forked/golang/template/exec.go
vendored
@@ -1,94 +0,0 @@
|
||||
//This package is copied from Go library text/template.
|
||||
//The original private functions indirect and printableValue
|
||||
//are exported as public functions.
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var Indirect = indirect
|
||||
var PrintableValue = printableValue
|
||||
|
||||
var (
|
||||
errorType = reflect.TypeOf((*error)(nil)).Elem()
|
||||
fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
|
||||
)
|
||||
|
||||
// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
|
||||
// We indirect through pointers and empty interfaces (only) because
|
||||
// non-empty interfaces have methods we might need.
|
||||
func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
|
||||
for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
|
||||
if v.IsNil() {
|
||||
return v, true
|
||||
}
|
||||
if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return v, false
|
||||
}
|
||||
|
||||
// printableValue returns the, possibly indirected, interface value inside v that
|
||||
// is best for a call to formatted printer.
|
||||
func printableValue(v reflect.Value) (interface{}, bool) {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v, _ = indirect(v) // fmt.Fprint handles nil.
|
||||
}
|
||||
if !v.IsValid() {
|
||||
return "<no value>", true
|
||||
}
|
||||
|
||||
if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
|
||||
if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
|
||||
v = v.Addr()
|
||||
} else {
|
||||
switch v.Kind() {
|
||||
case reflect.Chan, reflect.Func:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
}
|
||||
return v.Interface(), true
|
||||
}
|
||||
|
||||
// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
|
||||
func canBeNil(typ reflect.Type) bool {
|
||||
switch typ.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
|
||||
// and whether the value has a meaningful truth value.
|
||||
func isTrue(val reflect.Value) (truth, ok bool) {
|
||||
if !val.IsValid() {
|
||||
// Something like var x interface{}, never set. It's a form of nil.
|
||||
return false, true
|
||||
}
|
||||
switch val.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
truth = val.Len() > 0
|
||||
case reflect.Bool:
|
||||
truth = val.Bool()
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
truth = val.Complex() != 0
|
||||
case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
|
||||
truth = !val.IsNil()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
truth = val.Int() != 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
truth = val.Float() != 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
truth = val.Uint() != 0
|
||||
case reflect.Struct:
|
||||
truth = true // Struct values are always true.
|
||||
default:
|
||||
return
|
||||
}
|
||||
return truth, true
|
||||
}
|
599
pkg/third_party/forked/golang/template/funcs.go
vendored
599
pkg/third_party/forked/golang/template/funcs.go
vendored
@@ -1,599 +0,0 @@
|
||||
//This package is copied from Go library text/template.
|
||||
//The original private functions eq, ge, gt, le, lt, and ne
|
||||
//are exported as public functions.
|
||||
package template
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var Equal = eq
|
||||
var GreaterEqual = ge
|
||||
var Greater = gt
|
||||
var LessEqual = le
|
||||
var Less = lt
|
||||
var NotEqual = ne
|
||||
|
||||
// FuncMap is the type of the map defining the mapping from names to functions.
|
||||
// Each function must have either a single return value, or two return values of
|
||||
// which the second has type error. In that case, if the second (error)
|
||||
// return value evaluates to non-nil during execution, execution terminates and
|
||||
// Execute returns that error.
|
||||
type FuncMap map[string]interface{}
|
||||
|
||||
var builtins = FuncMap{
|
||||
"and": and,
|
||||
"call": call,
|
||||
"html": HTMLEscaper,
|
||||
"index": index,
|
||||
"js": JSEscaper,
|
||||
"len": length,
|
||||
"not": not,
|
||||
"or": or,
|
||||
"print": fmt.Sprint,
|
||||
"printf": fmt.Sprintf,
|
||||
"println": fmt.Sprintln,
|
||||
"urlquery": URLQueryEscaper,
|
||||
|
||||
// Comparisons
|
||||
"eq": eq, // ==
|
||||
"ge": ge, // >=
|
||||
"gt": gt, // >
|
||||
"le": le, // <=
|
||||
"lt": lt, // <
|
||||
"ne": ne, // !=
|
||||
}
|
||||
|
||||
var builtinFuncs = createValueFuncs(builtins)
|
||||
|
||||
// createValueFuncs turns a FuncMap into a map[string]reflect.Value
|
||||
func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
|
||||
m := make(map[string]reflect.Value)
|
||||
addValueFuncs(m, funcMap)
|
||||
return m
|
||||
}
|
||||
|
||||
// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
|
||||
func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
v := reflect.ValueOf(fn)
|
||||
if v.Kind() != reflect.Func {
|
||||
panic("value for " + name + " not a function")
|
||||
}
|
||||
if !goodFunc(v.Type()) {
|
||||
panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
|
||||
}
|
||||
out[name] = v
|
||||
}
|
||||
}
|
||||
|
||||
// AddFuncs adds to values the functions in funcs. It does no checking of the input -
|
||||
// call addValueFuncs first.
|
||||
func addFuncs(out, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
out[name] = fn
|
||||
}
|
||||
}
|
||||
|
||||
// goodFunc checks that the function or method has the right result signature.
|
||||
func goodFunc(typ reflect.Type) bool {
|
||||
// We allow functions with 1 result or 2 results where the second is an error.
|
||||
switch {
|
||||
case typ.NumOut() == 1:
|
||||
return true
|
||||
case typ.NumOut() == 2 && typ.Out(1) == errorType:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// findFunction looks for a function in the template, and global map.
|
||||
func findFunction(name string) (reflect.Value, bool) {
|
||||
if fn := builtinFuncs[name]; fn.IsValid() {
|
||||
return fn, true
|
||||
}
|
||||
return reflect.Value{}, false
|
||||
}
|
||||
|
||||
// Indexing.
|
||||
|
||||
// index returns the result of indexing its first argument by the following
|
||||
// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
|
||||
// indexed item must be a map, slice, or array.
|
||||
func index(item interface{}, indices ...interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(item)
|
||||
for _, i := range indices {
|
||||
index := reflect.ValueOf(i)
|
||||
var isNil bool
|
||||
if v, isNil = indirect(v); isNil {
|
||||
return nil, fmt.Errorf("index of nil pointer")
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.String:
|
||||
var x int64
|
||||
switch index.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
x = index.Int()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
x = int64(index.Uint())
|
||||
default:
|
||||
return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
|
||||
}
|
||||
if x < 0 || x >= int64(v.Len()) {
|
||||
return nil, fmt.Errorf("index out of range: %d", x)
|
||||
}
|
||||
v = v.Index(int(x))
|
||||
case reflect.Map:
|
||||
if !index.IsValid() {
|
||||
index = reflect.Zero(v.Type().Key())
|
||||
}
|
||||
if !index.Type().AssignableTo(v.Type().Key()) {
|
||||
return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
|
||||
}
|
||||
if x := v.MapIndex(index); x.IsValid() {
|
||||
v = x
|
||||
} else {
|
||||
v = reflect.Zero(v.Type().Elem())
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("can't index item of type %s", v.Type())
|
||||
}
|
||||
}
|
||||
return v.Interface(), nil
|
||||
}
|
||||
|
||||
// Length
|
||||
|
||||
// length returns the length of the item, with an error if it has no defined length.
|
||||
func length(item interface{}) (int, error) {
|
||||
v, isNil := indirect(reflect.ValueOf(item))
|
||||
if isNil {
|
||||
return 0, fmt.Errorf("len of nil pointer")
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len(), nil
|
||||
}
|
||||
return 0, fmt.Errorf("len of type %s", v.Type())
|
||||
}
|
||||
|
||||
// Function invocation
|
||||
|
||||
// call returns the result of evaluating the first argument as a function.
|
||||
// The function must return 1 result, or 2 results, the second of which is an error.
|
||||
func call(fn interface{}, args ...interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(fn)
|
||||
typ := v.Type()
|
||||
if typ.Kind() != reflect.Func {
|
||||
return nil, fmt.Errorf("non-function of type %s", typ)
|
||||
}
|
||||
if !goodFunc(typ) {
|
||||
return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
|
||||
}
|
||||
numIn := typ.NumIn()
|
||||
var dddType reflect.Type
|
||||
if typ.IsVariadic() {
|
||||
if len(args) < numIn-1 {
|
||||
return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
|
||||
}
|
||||
dddType = typ.In(numIn - 1).Elem()
|
||||
} else {
|
||||
if len(args) != numIn {
|
||||
return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
|
||||
}
|
||||
}
|
||||
argv := make([]reflect.Value, len(args))
|
||||
for i, arg := range args {
|
||||
value := reflect.ValueOf(arg)
|
||||
// Compute the expected type. Clumsy because of variadics.
|
||||
var argType reflect.Type
|
||||
if !typ.IsVariadic() || i < numIn-1 {
|
||||
argType = typ.In(i)
|
||||
} else {
|
||||
argType = dddType
|
||||
}
|
||||
if !value.IsValid() && canBeNil(argType) {
|
||||
value = reflect.Zero(argType)
|
||||
}
|
||||
if !value.Type().AssignableTo(argType) {
|
||||
return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
|
||||
}
|
||||
argv[i] = value
|
||||
}
|
||||
result := v.Call(argv)
|
||||
if len(result) == 2 && !result[1].IsNil() {
|
||||
return result[0].Interface(), result[1].Interface().(error)
|
||||
}
|
||||
return result[0].Interface(), nil
|
||||
}
|
||||
|
||||
// Boolean logic.
|
||||
|
||||
func truth(a interface{}) bool {
|
||||
t, _ := isTrue(reflect.ValueOf(a))
|
||||
return t
|
||||
}
|
||||
|
||||
// and computes the Boolean AND of its arguments, returning
|
||||
// the first false argument it encounters, or the last argument.
|
||||
func and(arg0 interface{}, args ...interface{}) interface{} {
|
||||
if !truth(arg0) {
|
||||
return arg0
|
||||
}
|
||||
for i := range args {
|
||||
arg0 = args[i]
|
||||
if !truth(arg0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return arg0
|
||||
}
|
||||
|
||||
// or computes the Boolean OR of its arguments, returning
|
||||
// the first true argument it encounters, or the last argument.
|
||||
func or(arg0 interface{}, args ...interface{}) interface{} {
|
||||
if truth(arg0) {
|
||||
return arg0
|
||||
}
|
||||
for i := range args {
|
||||
arg0 = args[i]
|
||||
if truth(arg0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return arg0
|
||||
}
|
||||
|
||||
// not returns the Boolean negation of its argument.
|
||||
func not(arg interface{}) (truth bool) {
|
||||
truth, _ = isTrue(reflect.ValueOf(arg))
|
||||
return !truth
|
||||
}
|
||||
|
||||
// Comparison.
|
||||
|
||||
// TODO: Perhaps allow comparison between signed and unsigned integers.
|
||||
|
||||
var (
|
||||
errBadComparisonType = errors.New("invalid type for comparison")
|
||||
errBadComparison = errors.New("incompatible types for comparison")
|
||||
errNoComparison = errors.New("missing argument for comparison")
|
||||
)
|
||||
|
||||
type kind int
|
||||
|
||||
const (
|
||||
invalidKind kind = iota
|
||||
boolKind
|
||||
complexKind
|
||||
intKind
|
||||
floatKind
|
||||
integerKind
|
||||
stringKind
|
||||
uintKind
|
||||
)
|
||||
|
||||
func basicKind(v reflect.Value) (kind, error) {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return boolKind, nil
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return intKind, nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return uintKind, nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return floatKind, nil
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return complexKind, nil
|
||||
case reflect.String:
|
||||
return stringKind, nil
|
||||
}
|
||||
return invalidKind, errBadComparisonType
|
||||
}
|
||||
|
||||
// eq evaluates the comparison a == b || a == c || ...
|
||||
func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
|
||||
v1 := reflect.ValueOf(arg1)
|
||||
k1, err := basicKind(v1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(arg2) == 0 {
|
||||
return false, errNoComparison
|
||||
}
|
||||
for _, arg := range arg2 {
|
||||
v2 := reflect.ValueOf(arg)
|
||||
k2, err := basicKind(v2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
|
||||
default:
|
||||
return false, errBadComparison
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind:
|
||||
truth = v1.Bool() == v2.Bool()
|
||||
case complexKind:
|
||||
truth = v1.Complex() == v2.Complex()
|
||||
case floatKind:
|
||||
truth = v1.Float() == v2.Float()
|
||||
case intKind:
|
||||
truth = v1.Int() == v2.Int()
|
||||
case stringKind:
|
||||
truth = v1.String() == v2.String()
|
||||
case uintKind:
|
||||
truth = v1.Uint() == v2.Uint()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
}
|
||||
if truth {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// ne evaluates the comparison a != b.
|
||||
func ne(arg1, arg2 interface{}) (bool, error) {
|
||||
// != is the inverse of ==.
|
||||
equal, err := eq(arg1, arg2)
|
||||
return !equal, err
|
||||
}
|
||||
|
||||
// lt evaluates the comparison a < b.
|
||||
func lt(arg1, arg2 interface{}) (bool, error) {
|
||||
v1 := reflect.ValueOf(arg1)
|
||||
k1, err := basicKind(v1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
v2 := reflect.ValueOf(arg2)
|
||||
k2, err := basicKind(v2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
|
||||
default:
|
||||
return false, errBadComparison
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind, complexKind:
|
||||
return false, errBadComparisonType
|
||||
case floatKind:
|
||||
truth = v1.Float() < v2.Float()
|
||||
case intKind:
|
||||
truth = v1.Int() < v2.Int()
|
||||
case stringKind:
|
||||
truth = v1.String() < v2.String()
|
||||
case uintKind:
|
||||
truth = v1.Uint() < v2.Uint()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
}
|
||||
return truth, nil
|
||||
}
|
||||
|
||||
// le evaluates the comparison <= b.
|
||||
func le(arg1, arg2 interface{}) (bool, error) {
|
||||
// <= is < or ==.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if lessThan || err != nil {
|
||||
return lessThan, err
|
||||
}
|
||||
return eq(arg1, arg2)
|
||||
}
|
||||
|
||||
// gt evaluates the comparison a > b.
|
||||
func gt(arg1, arg2 interface{}) (bool, error) {
|
||||
// > is the inverse of <=.
|
||||
lessOrEqual, err := le(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessOrEqual, nil
|
||||
}
|
||||
|
||||
// ge evaluates the comparison a >= b.
|
||||
func ge(arg1, arg2 interface{}) (bool, error) {
|
||||
// >= is the inverse of <.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessThan, nil
|
||||
}
|
||||
|
||||
// HTML escaping.
|
||||
|
||||
var (
|
||||
htmlQuot = []byte(""") // shorter than """
|
||||
htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
|
||||
htmlAmp = []byte("&")
|
||||
htmlLt = []byte("<")
|
||||
htmlGt = []byte(">")
|
||||
)
|
||||
|
||||
// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
|
||||
func HTMLEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i, c := range b {
|
||||
var html []byte
|
||||
switch c {
|
||||
case '"':
|
||||
html = htmlQuot
|
||||
case '\'':
|
||||
html = htmlApos
|
||||
case '&':
|
||||
html = htmlAmp
|
||||
case '<':
|
||||
html = htmlLt
|
||||
case '>':
|
||||
html = htmlGt
|
||||
default:
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
w.Write(html)
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
|
||||
func HTMLEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if strings.IndexAny(s, `'"&<>`) < 0 {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
HTMLEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// HTMLEscaper returns the escaped HTML equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func HTMLEscaper(args ...interface{}) string {
|
||||
return HTMLEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// JavaScript escaping.
|
||||
|
||||
var (
|
||||
jsLowUni = []byte(`\u00`)
|
||||
hex = []byte("0123456789ABCDEF")
|
||||
|
||||
jsBackslash = []byte(`\\`)
|
||||
jsApos = []byte(`\'`)
|
||||
jsQuot = []byte(`\"`)
|
||||
jsLt = []byte(`\x3C`)
|
||||
jsGt = []byte(`\x3E`)
|
||||
)
|
||||
|
||||
// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
|
||||
func JSEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i := 0; i < len(b); i++ {
|
||||
c := b[i]
|
||||
|
||||
if !jsIsSpecial(rune(c)) {
|
||||
// fast path: nothing to do
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
|
||||
if c < utf8.RuneSelf {
|
||||
// Quotes, slashes and angle brackets get quoted.
|
||||
// Control characters get written as \u00XX.
|
||||
switch c {
|
||||
case '\\':
|
||||
w.Write(jsBackslash)
|
||||
case '\'':
|
||||
w.Write(jsApos)
|
||||
case '"':
|
||||
w.Write(jsQuot)
|
||||
case '<':
|
||||
w.Write(jsLt)
|
||||
case '>':
|
||||
w.Write(jsGt)
|
||||
default:
|
||||
w.Write(jsLowUni)
|
||||
t, b := c>>4, c&0x0f
|
||||
w.Write(hex[t : t+1])
|
||||
w.Write(hex[b : b+1])
|
||||
}
|
||||
} else {
|
||||
// Unicode rune.
|
||||
r, size := utf8.DecodeRune(b[i:])
|
||||
if unicode.IsPrint(r) {
|
||||
w.Write(b[i : i+size])
|
||||
} else {
|
||||
fmt.Fprintf(w, "\\u%04X", r)
|
||||
}
|
||||
i += size - 1
|
||||
}
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
|
||||
func JSEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if strings.IndexFunc(s, jsIsSpecial) < 0 {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
JSEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func jsIsSpecial(r rune) bool {
|
||||
switch r {
|
||||
case '\\', '\'', '"', '<', '>':
|
||||
return true
|
||||
}
|
||||
return r < ' ' || utf8.RuneSelf <= r
|
||||
}
|
||||
|
||||
// JSEscaper returns the escaped JavaScript equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func JSEscaper(args ...interface{}) string {
|
||||
return JSEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// URLQueryEscaper returns the escaped value of the textual representation of
|
||||
// its arguments in a form suitable for embedding in a URL query.
|
||||
func URLQueryEscaper(args ...interface{}) string {
|
||||
return url.QueryEscape(evalArgs(args))
|
||||
}
|
||||
|
||||
// evalArgs formats the list of arguments into a string. It is therefore equivalent to
|
||||
// fmt.Sprint(args...)
|
||||
// except that each argument is indirected (if a pointer), as required,
|
||||
// using the same rules as the default string evaluation during template
|
||||
// execution.
|
||||
func evalArgs(args []interface{}) string {
|
||||
ok := false
|
||||
var s string
|
||||
// Fast path for simple common case.
|
||||
if len(args) == 1 {
|
||||
s, ok = args[0].(string)
|
||||
}
|
||||
if !ok {
|
||||
for i, arg := range args {
|
||||
a, ok := printableValue(reflect.ValueOf(arg))
|
||||
if ok {
|
||||
args[i] = a
|
||||
} // else left fmt do its thing
|
||||
}
|
||||
s = fmt.Sprint(args...)
|
||||
}
|
||||
return s
|
||||
}
|
@@ -30,9 +30,9 @@ import (
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/third_party/forked/golang/netutil"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
"k8s.io/client-go/pkg/util/httpstream"
|
||||
"k8s.io/client-go/third_party/forked/golang/netutil"
|
||||
)
|
||||
|
||||
// SpdyRoundTripper knows how to upgrade an HTTP request to one that supports
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,79 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
// A simple trie implementation with Add an HasPrefix methods only.
|
||||
type Trie struct {
|
||||
children map[byte]*Trie
|
||||
wordTail bool
|
||||
word string
|
||||
}
|
||||
|
||||
// CreateTrie creates a Trie and add all strings in the provided list to it.
|
||||
func CreateTrie(list []string) Trie {
|
||||
ret := Trie{
|
||||
children: make(map[byte]*Trie),
|
||||
wordTail: false,
|
||||
}
|
||||
for _, v := range list {
|
||||
ret.Add(v)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Add adds a word to this trie
|
||||
func (t *Trie) Add(v string) {
|
||||
root := t
|
||||
for _, b := range []byte(v) {
|
||||
child, exists := root.children[b]
|
||||
if !exists {
|
||||
child = &Trie{
|
||||
children: make(map[byte]*Trie),
|
||||
wordTail: false,
|
||||
}
|
||||
root.children[b] = child
|
||||
}
|
||||
root = child
|
||||
}
|
||||
root.wordTail = true
|
||||
root.word = v
|
||||
}
|
||||
|
||||
// HasPrefix returns true of v has any of the prefixes stored in this trie.
|
||||
func (t *Trie) HasPrefix(v string) bool {
|
||||
_, has := t.GetPrefix(v)
|
||||
return has
|
||||
}
|
||||
|
||||
// GetPrefix is like HasPrefix but return the prefix in case of match or empty string otherwise.
|
||||
func (t *Trie) GetPrefix(v string) (string, bool) {
|
||||
root := t
|
||||
if root.wordTail {
|
||||
return root.word, true
|
||||
}
|
||||
for _, b := range []byte(v) {
|
||||
child, exists := root.children[b]
|
||||
if !exists {
|
||||
return "", false
|
||||
}
|
||||
if child.wordTail {
|
||||
return child.word, true
|
||||
}
|
||||
root = child
|
||||
}
|
||||
return "", false
|
||||
}
|
@@ -1,211 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/juju/ratelimit"
|
||||
)
|
||||
|
||||
type RateLimiter interface {
|
||||
// When gets an item and gets to decide how long that item should wait
|
||||
When(item interface{}) time.Duration
|
||||
// Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing
|
||||
// or for success, we'll stop tracking it
|
||||
Forget(item interface{})
|
||||
// NumRequeues returns back how many failures the item has had
|
||||
NumRequeues(item interface{}) int
|
||||
}
|
||||
|
||||
// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has
|
||||
// both overall and per-item rate limitting. The overall is a token bucket and the per-item is exponential
|
||||
func DefaultControllerRateLimiter() RateLimiter {
|
||||
return NewMaxOfRateLimiter(
|
||||
NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),
|
||||
// 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item)
|
||||
&BucketRateLimiter{Bucket: ratelimit.NewBucketWithRate(float64(10), int64(100))},
|
||||
)
|
||||
}
|
||||
|
||||
// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API
|
||||
type BucketRateLimiter struct {
|
||||
*ratelimit.Bucket
|
||||
}
|
||||
|
||||
var _ RateLimiter = &BucketRateLimiter{}
|
||||
|
||||
func (r *BucketRateLimiter) When(item interface{}) time.Duration {
|
||||
return r.Bucket.Take(1)
|
||||
}
|
||||
|
||||
func (r *BucketRateLimiter) NumRequeues(item interface{}) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *BucketRateLimiter) Forget(item interface{}) {
|
||||
}
|
||||
|
||||
// ItemExponentialFailureRateLimiter does a simple baseDelay*10^<num-failures> limit
|
||||
// dealing with max failures and expiration are up to the caller
|
||||
type ItemExponentialFailureRateLimiter struct {
|
||||
failuresLock sync.Mutex
|
||||
failures map[interface{}]int
|
||||
|
||||
baseDelay time.Duration
|
||||
maxDelay time.Duration
|
||||
}
|
||||
|
||||
var _ RateLimiter = &ItemExponentialFailureRateLimiter{}
|
||||
|
||||
func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter {
|
||||
return &ItemExponentialFailureRateLimiter{
|
||||
failures: map[interface{}]int{},
|
||||
baseDelay: baseDelay,
|
||||
maxDelay: maxDelay,
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultItemBasedRateLimiter() RateLimiter {
|
||||
return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second)
|
||||
}
|
||||
|
||||
func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration {
|
||||
r.failuresLock.Lock()
|
||||
defer r.failuresLock.Unlock()
|
||||
|
||||
exp := r.failures[item]
|
||||
r.failures[item] = r.failures[item] + 1
|
||||
|
||||
// The backoff is capped such that 'calculated' value never overflows.
|
||||
backoff := float64(r.baseDelay.Nanoseconds()) * math.Pow(2, float64(exp))
|
||||
if backoff > math.MaxInt64 {
|
||||
return r.maxDelay
|
||||
}
|
||||
|
||||
calculated := time.Duration(backoff)
|
||||
if calculated > r.maxDelay {
|
||||
return r.maxDelay
|
||||
}
|
||||
|
||||
return calculated
|
||||
}
|
||||
|
||||
func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int {
|
||||
r.failuresLock.Lock()
|
||||
defer r.failuresLock.Unlock()
|
||||
|
||||
return r.failures[item]
|
||||
}
|
||||
|
||||
func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) {
|
||||
r.failuresLock.Lock()
|
||||
defer r.failuresLock.Unlock()
|
||||
|
||||
delete(r.failures, item)
|
||||
}
|
||||
|
||||
// ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that
|
||||
type ItemFastSlowRateLimiter struct {
|
||||
failuresLock sync.Mutex
|
||||
failures map[interface{}]int
|
||||
|
||||
maxFastAttempts int
|
||||
fastDelay time.Duration
|
||||
slowDelay time.Duration
|
||||
}
|
||||
|
||||
var _ RateLimiter = &ItemFastSlowRateLimiter{}
|
||||
|
||||
func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter {
|
||||
return &ItemFastSlowRateLimiter{
|
||||
failures: map[interface{}]int{},
|
||||
fastDelay: fastDelay,
|
||||
slowDelay: slowDelay,
|
||||
maxFastAttempts: maxFastAttempts,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration {
|
||||
r.failuresLock.Lock()
|
||||
defer r.failuresLock.Unlock()
|
||||
|
||||
r.failures[item] = r.failures[item] + 1
|
||||
|
||||
if r.failures[item] <= r.maxFastAttempts {
|
||||
return r.fastDelay
|
||||
}
|
||||
|
||||
return r.slowDelay
|
||||
}
|
||||
|
||||
func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int {
|
||||
r.failuresLock.Lock()
|
||||
defer r.failuresLock.Unlock()
|
||||
|
||||
return r.failures[item]
|
||||
}
|
||||
|
||||
func (r *ItemFastSlowRateLimiter) Forget(item interface{}) {
|
||||
r.failuresLock.Lock()
|
||||
defer r.failuresLock.Unlock()
|
||||
|
||||
delete(r.failures, item)
|
||||
}
|
||||
|
||||
// MaxOfRateLimiter calls every RateLimiter and returns the worst case response
|
||||
// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items
|
||||
// were separately delayed a longer time.
|
||||
type MaxOfRateLimiter struct {
|
||||
limiters []RateLimiter
|
||||
}
|
||||
|
||||
func (r *MaxOfRateLimiter) When(item interface{}) time.Duration {
|
||||
ret := time.Duration(0)
|
||||
for _, limiter := range r.limiters {
|
||||
curr := limiter.When(item)
|
||||
if curr > ret {
|
||||
ret = curr
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter {
|
||||
return &MaxOfRateLimiter{limiters: limiters}
|
||||
}
|
||||
|
||||
func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int {
|
||||
ret := 0
|
||||
for _, limiter := range r.limiters {
|
||||
curr := limiter.NumRequeues(item)
|
||||
if curr > ret {
|
||||
ret = curr
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (r *MaxOfRateLimiter) Forget(item interface{}) {
|
||||
for _, limiter := range r.limiters {
|
||||
limiter.Forget(item)
|
||||
}
|
||||
}
|
@@ -1,184 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestItemExponentialFailureRateLimiter(t *testing.T) {
|
||||
limiter := NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second)
|
||||
|
||||
if e, a := 1*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 2*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 4*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 8*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 16*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5, limiter.NumRequeues("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
if e, a := 1*time.Millisecond, limiter.When("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 2*time.Millisecond, limiter.When("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 2, limiter.NumRequeues("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
limiter.Forget("one")
|
||||
if e, a := 0, limiter.NumRequeues("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 1*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestItemExponentialFailureRateLimiterOverFlow(t *testing.T) {
|
||||
limiter := NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1000*time.Second)
|
||||
for i := 0; i < 5; i++ {
|
||||
limiter.When("one")
|
||||
}
|
||||
if e, a := 32*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
limiter.When("overflow1")
|
||||
}
|
||||
if e, a := 1000*time.Second, limiter.When("overflow1"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
limiter = NewItemExponentialFailureRateLimiter(1*time.Minute, 1000*time.Hour)
|
||||
for i := 0; i < 2; i++ {
|
||||
limiter.When("two")
|
||||
}
|
||||
if e, a := 4*time.Minute, limiter.When("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
limiter.When("overflow2")
|
||||
}
|
||||
if e, a := 1000*time.Hour, limiter.When("overflow2"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestItemFastSlowRateLimiter(t *testing.T) {
|
||||
limiter := NewItemFastSlowRateLimiter(5*time.Millisecond, 10*time.Second, 3)
|
||||
|
||||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 10*time.Second, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 10*time.Second, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5, limiter.NumRequeues("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
if e, a := 5*time.Millisecond, limiter.When("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5*time.Millisecond, limiter.When("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 2, limiter.NumRequeues("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
limiter.Forget("one")
|
||||
if e, a := 0, limiter.NumRequeues("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestMaxOfRateLimiter(t *testing.T) {
|
||||
limiter := NewMaxOfRateLimiter(
|
||||
NewItemFastSlowRateLimiter(5*time.Millisecond, 3*time.Second, 3),
|
||||
NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second),
|
||||
)
|
||||
|
||||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 3*time.Second, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 3*time.Second, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5, limiter.NumRequeues("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
if e, a := 5*time.Millisecond, limiter.When("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5*time.Millisecond, limiter.When("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 2, limiter.NumRequeues("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
limiter.Forget("one")
|
||||
if e, a := 0, limiter.NumRequeues("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
}
|
@@ -1,246 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/util/clock"
|
||||
)
|
||||
|
||||
// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
|
||||
// requeue items after failures without ending up in a hot-loop.
|
||||
type DelayingInterface interface {
|
||||
Interface
|
||||
// AddAfter adds an item to the workqueue after the indicated duration has passed
|
||||
AddAfter(item interface{}, duration time.Duration)
|
||||
}
|
||||
|
||||
// NewDelayingQueue constructs a new workqueue with delayed queuing ability
|
||||
func NewDelayingQueue() DelayingInterface {
|
||||
return newDelayingQueue(clock.RealClock{}, "")
|
||||
}
|
||||
|
||||
func NewNamedDelayingQueue(name string) DelayingInterface {
|
||||
return newDelayingQueue(clock.RealClock{}, name)
|
||||
}
|
||||
|
||||
func newDelayingQueue(clock clock.Clock, name string) DelayingInterface {
|
||||
ret := &delayingType{
|
||||
Interface: NewNamed(name),
|
||||
clock: clock,
|
||||
heartbeat: clock.Tick(maxWait),
|
||||
stopCh: make(chan struct{}),
|
||||
waitingTimeByEntry: map[t]time.Time{},
|
||||
waitingForAddCh: make(chan waitFor, 1000),
|
||||
metrics: newRetryMetrics(name),
|
||||
}
|
||||
|
||||
go ret.waitingLoop()
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// delayingType wraps an Interface and provides delayed re-enquing
|
||||
type delayingType struct {
|
||||
Interface
|
||||
|
||||
// clock tracks time for delayed firing
|
||||
clock clock.Clock
|
||||
|
||||
// stopCh lets us signal a shutdown to the waiting loop
|
||||
stopCh chan struct{}
|
||||
|
||||
// heartbeat ensures we wait no more than maxWait before firing
|
||||
//
|
||||
// TODO: replace with Ticker (and add to clock) so this can be cleaned up.
|
||||
// clock.Tick will leak.
|
||||
heartbeat <-chan time.Time
|
||||
|
||||
// waitingForAdd is an ordered slice of items to be added to the contained work queue
|
||||
waitingForAdd []waitFor
|
||||
// waitingTimeByEntry holds wait time by entry, so we can lookup pre-existing indexes
|
||||
waitingTimeByEntry map[t]time.Time
|
||||
// waitingForAddCh is a buffered channel that feeds waitingForAdd
|
||||
waitingForAddCh chan waitFor
|
||||
|
||||
// metrics counts the number of retries
|
||||
metrics retryMetrics
|
||||
}
|
||||
|
||||
// waitFor holds the data to add and the time it should be added
|
||||
type waitFor struct {
|
||||
data t
|
||||
readyAt time.Time
|
||||
}
|
||||
|
||||
// ShutDown gives a way to shut off this queue
|
||||
func (q *delayingType) ShutDown() {
|
||||
q.Interface.ShutDown()
|
||||
close(q.stopCh)
|
||||
}
|
||||
|
||||
// AddAfter adds the given item to the work queue after the given delay
|
||||
func (q *delayingType) AddAfter(item interface{}, duration time.Duration) {
|
||||
// don't add if we're already shutting down
|
||||
if q.ShuttingDown() {
|
||||
return
|
||||
}
|
||||
|
||||
q.metrics.retry()
|
||||
|
||||
// immediately add things with no delay
|
||||
if duration <= 0 {
|
||||
q.Add(item)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-q.stopCh:
|
||||
// unblock if ShutDown() is called
|
||||
case q.waitingForAddCh <- waitFor{data: item, readyAt: q.clock.Now().Add(duration)}:
|
||||
}
|
||||
}
|
||||
|
||||
// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening.
|
||||
// Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an
|
||||
// expired item sitting for more than 10 seconds.
|
||||
const maxWait = 10 * time.Second
|
||||
|
||||
// waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added.
|
||||
func (q *delayingType) waitingLoop() {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
// Make a placeholder channel to use when there are no items in our list
|
||||
never := make(<-chan time.Time)
|
||||
|
||||
for {
|
||||
if q.Interface.ShuttingDown() {
|
||||
// discard waiting entries
|
||||
q.waitingForAdd = nil
|
||||
q.waitingTimeByEntry = nil
|
||||
return
|
||||
}
|
||||
|
||||
now := q.clock.Now()
|
||||
|
||||
// Add ready entries
|
||||
readyEntries := 0
|
||||
for _, entry := range q.waitingForAdd {
|
||||
if entry.readyAt.After(now) {
|
||||
break
|
||||
}
|
||||
q.Add(entry.data)
|
||||
delete(q.waitingTimeByEntry, entry.data)
|
||||
readyEntries++
|
||||
}
|
||||
q.waitingForAdd = q.waitingForAdd[readyEntries:]
|
||||
|
||||
// Set up a wait for the first item's readyAt (if one exists)
|
||||
nextReadyAt := never
|
||||
if len(q.waitingForAdd) > 0 {
|
||||
nextReadyAt = q.clock.After(q.waitingForAdd[0].readyAt.Sub(now))
|
||||
}
|
||||
|
||||
select {
|
||||
case <-q.stopCh:
|
||||
return
|
||||
|
||||
case <-q.heartbeat:
|
||||
// continue the loop, which will add ready items
|
||||
|
||||
case <-nextReadyAt:
|
||||
// continue the loop, which will add ready items
|
||||
|
||||
case waitEntry := <-q.waitingForAddCh:
|
||||
if waitEntry.readyAt.After(q.clock.Now()) {
|
||||
q.waitingForAdd = insert(q.waitingForAdd, q.waitingTimeByEntry, waitEntry)
|
||||
} else {
|
||||
q.Add(waitEntry.data)
|
||||
}
|
||||
|
||||
drained := false
|
||||
for !drained {
|
||||
select {
|
||||
case waitEntry := <-q.waitingForAddCh:
|
||||
if waitEntry.readyAt.After(q.clock.Now()) {
|
||||
q.waitingForAdd = insert(q.waitingForAdd, q.waitingTimeByEntry, waitEntry)
|
||||
} else {
|
||||
q.Add(waitEntry.data)
|
||||
}
|
||||
default:
|
||||
drained = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// inserts the given entry into the sorted entries list
|
||||
// same semantics as append()... the given slice may be modified,
|
||||
// and the returned value should be used
|
||||
//
|
||||
// TODO: This should probably be converted to use container/heap to improve
|
||||
// running time for a large number of items.
|
||||
func insert(entries []waitFor, knownEntries map[t]time.Time, entry waitFor) []waitFor {
|
||||
// if the entry is already in our retry list and the existing time is before the new one, just skip it
|
||||
existingTime, exists := knownEntries[entry.data]
|
||||
if exists && existingTime.Before(entry.readyAt) {
|
||||
return entries
|
||||
}
|
||||
|
||||
// if the entry exists and is scheduled for later, go ahead and remove the entry
|
||||
if exists {
|
||||
if existingIndex := findEntryIndex(entries, existingTime, entry.data); existingIndex >= 0 && existingIndex < len(entries) {
|
||||
entries = append(entries[:existingIndex], entries[existingIndex+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
insertionIndex := sort.Search(len(entries), func(i int) bool {
|
||||
return entry.readyAt.Before(entries[i].readyAt)
|
||||
})
|
||||
|
||||
// grow by 1
|
||||
entries = append(entries, waitFor{})
|
||||
// shift items from the insertion point to the end
|
||||
copy(entries[insertionIndex+1:], entries[insertionIndex:])
|
||||
// insert the record
|
||||
entries[insertionIndex] = entry
|
||||
|
||||
knownEntries[entry.data] = entry.readyAt
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
// findEntryIndex returns the index for an existing entry
|
||||
func findEntryIndex(entries []waitFor, existingTime time.Time, data t) int {
|
||||
index := sort.Search(len(entries), func(i int) bool {
|
||||
return entries[i].readyAt.After(existingTime) || existingTime == entries[i].readyAt
|
||||
})
|
||||
|
||||
// we know this is the earliest possible index, but there could be multiple with the same time
|
||||
// iterate from here to find the dupe
|
||||
for ; index < len(entries); index++ {
|
||||
if entries[index].data == data {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return index
|
||||
}
|
@@ -1,236 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/clock"
|
||||
)
|
||||
|
||||
func TestSimpleQueue(t *testing.T) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
q := newDelayingQueue(fakeClock, "")
|
||||
|
||||
first := "foo"
|
||||
|
||||
q.AddAfter(first, 50*time.Millisecond)
|
||||
if err := waitForWaitingQueueToFill(q); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
|
||||
fakeClock.Step(60 * time.Millisecond)
|
||||
|
||||
if err := waitForAdded(q, 1); err != nil {
|
||||
t.Errorf("should have added")
|
||||
}
|
||||
item, _ := q.Get()
|
||||
q.Done(item)
|
||||
|
||||
// step past the next heartbeat
|
||||
fakeClock.Step(10 * time.Second)
|
||||
|
||||
err := wait.Poll(1*time.Millisecond, 30*time.Millisecond, func() (done bool, err error) {
|
||||
if q.Len() > 0 {
|
||||
return false, fmt.Errorf("added to queue")
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != wait.ErrWaitTimeout {
|
||||
t.Errorf("expected timeout, got: %v", err)
|
||||
}
|
||||
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeduping(t *testing.T) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
q := newDelayingQueue(fakeClock, "")
|
||||
|
||||
first := "foo"
|
||||
|
||||
q.AddAfter(first, 50*time.Millisecond)
|
||||
if err := waitForWaitingQueueToFill(q); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
q.AddAfter(first, 70*time.Millisecond)
|
||||
if err := waitForWaitingQueueToFill(q); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
|
||||
// step past the first block, we should receive now
|
||||
fakeClock.Step(60 * time.Millisecond)
|
||||
if err := waitForAdded(q, 1); err != nil {
|
||||
t.Errorf("should have added")
|
||||
}
|
||||
item, _ := q.Get()
|
||||
q.Done(item)
|
||||
|
||||
// step past the second add
|
||||
fakeClock.Step(20 * time.Millisecond)
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
|
||||
// test again, but this time the earlier should override
|
||||
q.AddAfter(first, 50*time.Millisecond)
|
||||
q.AddAfter(first, 30*time.Millisecond)
|
||||
if err := waitForWaitingQueueToFill(q); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
|
||||
fakeClock.Step(40 * time.Millisecond)
|
||||
if err := waitForAdded(q, 1); err != nil {
|
||||
t.Errorf("should have added")
|
||||
}
|
||||
item, _ = q.Get()
|
||||
q.Done(item)
|
||||
|
||||
// step past the second add
|
||||
fakeClock.Step(20 * time.Millisecond)
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddTwoFireEarly(t *testing.T) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
q := newDelayingQueue(fakeClock, "")
|
||||
|
||||
first := "foo"
|
||||
second := "bar"
|
||||
third := "baz"
|
||||
|
||||
q.AddAfter(first, 1*time.Second)
|
||||
q.AddAfter(second, 50*time.Millisecond)
|
||||
if err := waitForWaitingQueueToFill(q); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
|
||||
fakeClock.Step(60 * time.Millisecond)
|
||||
|
||||
if err := waitForAdded(q, 1); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
item, _ := q.Get()
|
||||
if !reflect.DeepEqual(item, second) {
|
||||
t.Errorf("expected %v, got %v", second, item)
|
||||
}
|
||||
|
||||
q.AddAfter(third, 2*time.Second)
|
||||
|
||||
fakeClock.Step(1 * time.Second)
|
||||
if err := waitForAdded(q, 1); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
item, _ = q.Get()
|
||||
if !reflect.DeepEqual(item, first) {
|
||||
t.Errorf("expected %v, got %v", first, item)
|
||||
}
|
||||
|
||||
fakeClock.Step(2 * time.Second)
|
||||
if err := waitForAdded(q, 1); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
item, _ = q.Get()
|
||||
if !reflect.DeepEqual(item, third) {
|
||||
t.Errorf("expected %v, got %v", third, item)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCopyShifting(t *testing.T) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
q := newDelayingQueue(fakeClock, "")
|
||||
|
||||
first := "foo"
|
||||
second := "bar"
|
||||
third := "baz"
|
||||
|
||||
q.AddAfter(first, 1*time.Second)
|
||||
q.AddAfter(second, 500*time.Millisecond)
|
||||
q.AddAfter(third, 250*time.Millisecond)
|
||||
if err := waitForWaitingQueueToFill(q); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
|
||||
fakeClock.Step(2 * time.Second)
|
||||
|
||||
if err := waitForAdded(q, 3); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
actualFirst, _ := q.Get()
|
||||
if !reflect.DeepEqual(actualFirst, third) {
|
||||
t.Errorf("expected %v, got %v", third, actualFirst)
|
||||
}
|
||||
actualSecond, _ := q.Get()
|
||||
if !reflect.DeepEqual(actualSecond, second) {
|
||||
t.Errorf("expected %v, got %v", second, actualSecond)
|
||||
}
|
||||
actualThird, _ := q.Get()
|
||||
if !reflect.DeepEqual(actualThird, first) {
|
||||
t.Errorf("expected %v, got %v", first, actualThird)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForAdded(q DelayingInterface, depth int) error {
|
||||
return wait.Poll(1*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
||||
if q.Len() == depth {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func waitForWaitingQueueToFill(q DelayingInterface) error {
|
||||
return wait.Poll(1*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
||||
if len(q.(*delayingType).waitingForAddCh) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
}
|
@@ -1,26 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package workqueue provides a simple queue that supports the following
|
||||
// features:
|
||||
// * Fair: items processed in the order in which they are added.
|
||||
// * Stingy: a single item will not be processed multiple times concurrently,
|
||||
// and if an item is added multiple times before it can be processed, it
|
||||
// will only be processed once.
|
||||
// * Multiple consumers and producers. In particular, it is allowed for an
|
||||
// item to be reenqueued while it is being processed.
|
||||
// * Shutdown notifications.
|
||||
package workqueue
|
@@ -1,195 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This file provides abstractions for setting the provider (e.g., prometheus)
|
||||
// of metrics.
|
||||
|
||||
type queueMetrics interface {
|
||||
add(item t)
|
||||
get(item t)
|
||||
done(item t)
|
||||
}
|
||||
|
||||
// GaugeMetric represents a single numerical value that can arbitrarily go up
|
||||
// and down.
|
||||
type GaugeMetric interface {
|
||||
Inc()
|
||||
Dec()
|
||||
}
|
||||
|
||||
// CounterMetric represents a single numerical value that only ever
|
||||
// goes up.
|
||||
type CounterMetric interface {
|
||||
Inc()
|
||||
}
|
||||
|
||||
// SummaryMetric captures individual observations.
|
||||
type SummaryMetric interface {
|
||||
Observe(float64)
|
||||
}
|
||||
|
||||
type noopMetric struct{}
|
||||
|
||||
func (noopMetric) Inc() {}
|
||||
func (noopMetric) Dec() {}
|
||||
func (noopMetric) Observe(float64) {}
|
||||
|
||||
type defaultQueueMetrics struct {
|
||||
// current depth of a workqueue
|
||||
depth GaugeMetric
|
||||
// total number of adds handled by a workqueue
|
||||
adds CounterMetric
|
||||
// how long an item stays in a workqueue
|
||||
latency SummaryMetric
|
||||
// how long processing an item from a workqueue takes
|
||||
workDuration SummaryMetric
|
||||
addTimes map[t]time.Time
|
||||
processingStartTimes map[t]time.Time
|
||||
}
|
||||
|
||||
func (m *defaultQueueMetrics) add(item t) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.adds.Inc()
|
||||
m.depth.Inc()
|
||||
if _, exists := m.addTimes[item]; !exists {
|
||||
m.addTimes[item] = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *defaultQueueMetrics) get(item t) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.depth.Dec()
|
||||
m.processingStartTimes[item] = time.Now()
|
||||
if startTime, exists := m.addTimes[item]; exists {
|
||||
m.latency.Observe(sinceInMicroseconds(startTime))
|
||||
delete(m.addTimes, item)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *defaultQueueMetrics) done(item t) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if startTime, exists := m.processingStartTimes[item]; exists {
|
||||
m.workDuration.Observe(sinceInMicroseconds(startTime))
|
||||
delete(m.processingStartTimes, item)
|
||||
}
|
||||
}
|
||||
|
||||
// Gets the time since the specified start in microseconds.
|
||||
func sinceInMicroseconds(start time.Time) float64 {
|
||||
return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
|
||||
}
|
||||
|
||||
type retryMetrics interface {
|
||||
retry()
|
||||
}
|
||||
|
||||
type defaultRetryMetrics struct {
|
||||
retries CounterMetric
|
||||
}
|
||||
|
||||
func (m *defaultRetryMetrics) retry() {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.retries.Inc()
|
||||
}
|
||||
|
||||
// MetricsProvider generates various metrics used by the queue.
|
||||
type MetricsProvider interface {
|
||||
NewDepthMetric(name string) GaugeMetric
|
||||
NewAddsMetric(name string) CounterMetric
|
||||
NewLatencyMetric(name string) SummaryMetric
|
||||
NewWorkDurationMetric(name string) SummaryMetric
|
||||
NewRetriesMetric(name string) CounterMetric
|
||||
}
|
||||
|
||||
type noopMetricsProvider struct{}
|
||||
|
||||
func (_ noopMetricsProvider) NewDepthMetric(name string) GaugeMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewLatencyMetric(name string) SummaryMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewWorkDurationMetric(name string) SummaryMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
var metricsFactory = struct {
|
||||
metricsProvider MetricsProvider
|
||||
setProviders sync.Once
|
||||
}{
|
||||
metricsProvider: noopMetricsProvider{},
|
||||
}
|
||||
|
||||
func newQueueMetrics(name string) queueMetrics {
|
||||
var ret *defaultQueueMetrics
|
||||
if len(name) == 0 {
|
||||
return ret
|
||||
}
|
||||
return &defaultQueueMetrics{
|
||||
depth: metricsFactory.metricsProvider.NewDepthMetric(name),
|
||||
adds: metricsFactory.metricsProvider.NewAddsMetric(name),
|
||||
latency: metricsFactory.metricsProvider.NewLatencyMetric(name),
|
||||
workDuration: metricsFactory.metricsProvider.NewWorkDurationMetric(name),
|
||||
addTimes: map[t]time.Time{},
|
||||
processingStartTimes: map[t]time.Time{},
|
||||
}
|
||||
}
|
||||
|
||||
func newRetryMetrics(name string) retryMetrics {
|
||||
var ret *defaultRetryMetrics
|
||||
if len(name) == 0 {
|
||||
return ret
|
||||
}
|
||||
return &defaultRetryMetrics{
|
||||
retries: metricsFactory.metricsProvider.NewRetriesMetric(name),
|
||||
}
|
||||
}
|
||||
|
||||
// SetProvider sets the metrics provider of the metricsFactory.
|
||||
func SetProvider(metricsProvider MetricsProvider) {
|
||||
metricsFactory.setProviders.Do(func() {
|
||||
metricsFactory.metricsProvider = metricsProvider
|
||||
})
|
||||
}
|
@@ -1,52 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
type DoWorkPieceFunc func(piece int)
|
||||
|
||||
// Parallelize is a very simple framework that allow for parallelizing
|
||||
// N independent pieces of work.
|
||||
func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) {
|
||||
toProcess := make(chan int, pieces)
|
||||
for i := 0; i < pieces; i++ {
|
||||
toProcess <- i
|
||||
}
|
||||
close(toProcess)
|
||||
|
||||
if pieces < workers {
|
||||
workers = pieces
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(workers)
|
||||
for i := 0; i < workers; i++ {
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer wg.Done()
|
||||
for piece := range toProcess {
|
||||
doWorkPiece(piece)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
@@ -1,172 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
Add(item interface{})
|
||||
Len() int
|
||||
Get() (item interface{}, shutdown bool)
|
||||
Done(item interface{})
|
||||
ShutDown()
|
||||
ShuttingDown() bool
|
||||
}
|
||||
|
||||
// New constructs a new workqueue (see the package comment).
|
||||
func New() *Type {
|
||||
return NewNamed("")
|
||||
}
|
||||
|
||||
func NewNamed(name string) *Type {
|
||||
return &Type{
|
||||
dirty: set{},
|
||||
processing: set{},
|
||||
cond: sync.NewCond(&sync.Mutex{}),
|
||||
metrics: newQueueMetrics(name),
|
||||
}
|
||||
}
|
||||
|
||||
// Type is a work queue (see the package comment).
|
||||
type Type struct {
|
||||
// queue defines the order in which we will work on items. Every
|
||||
// element of queue should be in the dirty set and not in the
|
||||
// processing set.
|
||||
queue []t
|
||||
|
||||
// dirty defines all of the items that need to be processed.
|
||||
dirty set
|
||||
|
||||
// Things that are currently being processed are in the processing set.
|
||||
// These things may be simultaneously in the dirty set. When we finish
|
||||
// processing something and remove it from this set, we'll check if
|
||||
// it's in the dirty set, and if so, add it to the queue.
|
||||
processing set
|
||||
|
||||
cond *sync.Cond
|
||||
|
||||
shuttingDown bool
|
||||
|
||||
metrics queueMetrics
|
||||
}
|
||||
|
||||
type empty struct{}
|
||||
type t interface{}
|
||||
type set map[t]empty
|
||||
|
||||
func (s set) has(item t) bool {
|
||||
_, exists := s[item]
|
||||
return exists
|
||||
}
|
||||
|
||||
func (s set) insert(item t) {
|
||||
s[item] = empty{}
|
||||
}
|
||||
|
||||
func (s set) delete(item t) {
|
||||
delete(s, item)
|
||||
}
|
||||
|
||||
// Add marks item as needing processing.
|
||||
func (q *Type) Add(item interface{}) {
|
||||
q.cond.L.Lock()
|
||||
defer q.cond.L.Unlock()
|
||||
if q.shuttingDown {
|
||||
return
|
||||
}
|
||||
if q.dirty.has(item) {
|
||||
return
|
||||
}
|
||||
|
||||
q.metrics.add(item)
|
||||
|
||||
q.dirty.insert(item)
|
||||
if q.processing.has(item) {
|
||||
return
|
||||
}
|
||||
|
||||
q.queue = append(q.queue, item)
|
||||
q.cond.Signal()
|
||||
}
|
||||
|
||||
// Len returns the current queue length, for informational purposes only. You
|
||||
// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular
|
||||
// value, that can't be synchronized properly.
|
||||
func (q *Type) Len() int {
|
||||
q.cond.L.Lock()
|
||||
defer q.cond.L.Unlock()
|
||||
return len(q.queue)
|
||||
}
|
||||
|
||||
// Get blocks until it can return an item to be processed. If shutdown = true,
|
||||
// the caller should end their goroutine. You must call Done with item when you
|
||||
// have finished processing it.
|
||||
func (q *Type) Get() (item interface{}, shutdown bool) {
|
||||
q.cond.L.Lock()
|
||||
defer q.cond.L.Unlock()
|
||||
for len(q.queue) == 0 && !q.shuttingDown {
|
||||
q.cond.Wait()
|
||||
}
|
||||
if len(q.queue) == 0 {
|
||||
// We must be shutting down.
|
||||
return nil, true
|
||||
}
|
||||
|
||||
item, q.queue = q.queue[0], q.queue[1:]
|
||||
|
||||
q.metrics.get(item)
|
||||
|
||||
q.processing.insert(item)
|
||||
q.dirty.delete(item)
|
||||
|
||||
return item, false
|
||||
}
|
||||
|
||||
// Done marks item as done processing, and if it has been marked as dirty again
|
||||
// while it was being processed, it will be re-added to the queue for
|
||||
// re-processing.
|
||||
func (q *Type) Done(item interface{}) {
|
||||
q.cond.L.Lock()
|
||||
defer q.cond.L.Unlock()
|
||||
|
||||
q.metrics.done(item)
|
||||
|
||||
q.processing.delete(item)
|
||||
if q.dirty.has(item) {
|
||||
q.queue = append(q.queue, item)
|
||||
q.cond.Signal()
|
||||
}
|
||||
}
|
||||
|
||||
// ShutDown will cause q to ignore all new items added to it. As soon as the
|
||||
// worker goroutines have drained the existing items in the queue, they will be
|
||||
// instructed to exit.
|
||||
func (q *Type) ShutDown() {
|
||||
q.cond.L.Lock()
|
||||
defer q.cond.L.Unlock()
|
||||
q.shuttingDown = true
|
||||
q.cond.Broadcast()
|
||||
}
|
||||
|
||||
func (q *Type) ShuttingDown() bool {
|
||||
q.cond.L.Lock()
|
||||
defer q.cond.L.Unlock()
|
||||
|
||||
return q.shuttingDown
|
||||
}
|
@@ -1,161 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue_test
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/pkg/util/workqueue"
|
||||
)
|
||||
|
||||
func TestBasic(t *testing.T) {
|
||||
// If something is seriously wrong this test will never complete.
|
||||
q := workqueue.New()
|
||||
|
||||
// Start producers
|
||||
const producers = 50
|
||||
producerWG := sync.WaitGroup{}
|
||||
producerWG.Add(producers)
|
||||
for i := 0; i < producers; i++ {
|
||||
go func(i int) {
|
||||
defer producerWG.Done()
|
||||
for j := 0; j < 50; j++ {
|
||||
q.Add(i)
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Start consumers
|
||||
const consumers = 10
|
||||
consumerWG := sync.WaitGroup{}
|
||||
consumerWG.Add(consumers)
|
||||
for i := 0; i < consumers; i++ {
|
||||
go func(i int) {
|
||||
defer consumerWG.Done()
|
||||
for {
|
||||
item, quit := q.Get()
|
||||
if item == "added after shutdown!" {
|
||||
t.Errorf("Got an item added after shutdown.")
|
||||
}
|
||||
if quit {
|
||||
return
|
||||
}
|
||||
t.Logf("Worker %v: begin processing %v", i, item)
|
||||
time.Sleep(3 * time.Millisecond)
|
||||
t.Logf("Worker %v: done processing %v", i, item)
|
||||
q.Done(item)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
producerWG.Wait()
|
||||
q.ShutDown()
|
||||
q.Add("added after shutdown!")
|
||||
consumerWG.Wait()
|
||||
}
|
||||
|
||||
func TestAddWhileProcessing(t *testing.T) {
|
||||
q := workqueue.New()
|
||||
|
||||
// Start producers
|
||||
const producers = 50
|
||||
producerWG := sync.WaitGroup{}
|
||||
producerWG.Add(producers)
|
||||
for i := 0; i < producers; i++ {
|
||||
go func(i int) {
|
||||
defer producerWG.Done()
|
||||
q.Add(i)
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Start consumers
|
||||
const consumers = 10
|
||||
consumerWG := sync.WaitGroup{}
|
||||
consumerWG.Add(consumers)
|
||||
for i := 0; i < consumers; i++ {
|
||||
go func(i int) {
|
||||
defer consumerWG.Done()
|
||||
// Every worker will re-add every item up to two times.
|
||||
// This tests the dirty-while-processing case.
|
||||
counters := map[interface{}]int{}
|
||||
for {
|
||||
item, quit := q.Get()
|
||||
if quit {
|
||||
return
|
||||
}
|
||||
counters[item]++
|
||||
if counters[item] < 2 {
|
||||
q.Add(item)
|
||||
}
|
||||
q.Done(item)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
producerWG.Wait()
|
||||
q.ShutDown()
|
||||
consumerWG.Wait()
|
||||
}
|
||||
|
||||
func TestLen(t *testing.T) {
|
||||
q := workqueue.New()
|
||||
q.Add("foo")
|
||||
if e, a := 1, q.Len(); e != a {
|
||||
t.Errorf("Expected %v, got %v", e, a)
|
||||
}
|
||||
q.Add("bar")
|
||||
if e, a := 2, q.Len(); e != a {
|
||||
t.Errorf("Expected %v, got %v", e, a)
|
||||
}
|
||||
q.Add("foo") // should not increase the queue length.
|
||||
if e, a := 2, q.Len(); e != a {
|
||||
t.Errorf("Expected %v, got %v", e, a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReinsert(t *testing.T) {
|
||||
q := workqueue.New()
|
||||
q.Add("foo")
|
||||
|
||||
// Start processing
|
||||
i, _ := q.Get()
|
||||
if i != "foo" {
|
||||
t.Errorf("Expected %v, got %v", "foo", i)
|
||||
}
|
||||
|
||||
// Add it back while processing
|
||||
q.Add(i)
|
||||
|
||||
// Finish it up
|
||||
q.Done(i)
|
||||
|
||||
// It should be back on the queue
|
||||
i, _ = q.Get()
|
||||
if i != "foo" {
|
||||
t.Errorf("Expected %v, got %v", "foo", i)
|
||||
}
|
||||
|
||||
// Finish that one up
|
||||
q.Done(i)
|
||||
|
||||
if a := q.Len(); a != 0 {
|
||||
t.Errorf("Expected queue to be empty. Has %v items", a)
|
||||
}
|
||||
}
|
@@ -1,69 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
// RateLimitingInterface is an interface that rate limits items being added to the queue.
|
||||
type RateLimitingInterface interface {
|
||||
DelayingInterface
|
||||
|
||||
// AddRateLimited adds an item to the workqueue after the rate limiter says its ok
|
||||
AddRateLimited(item interface{})
|
||||
|
||||
// Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing
|
||||
// or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you
|
||||
// still have to call `Done` on the queue.
|
||||
Forget(item interface{})
|
||||
|
||||
// NumRequeues returns back how many times the item was requeued
|
||||
NumRequeues(item interface{}) int
|
||||
}
|
||||
|
||||
// NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability
|
||||
// Remember to call Forget! If you don't, you may end up tracking failures forever.
|
||||
func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface {
|
||||
return &rateLimitingType{
|
||||
DelayingInterface: NewDelayingQueue(),
|
||||
rateLimiter: rateLimiter,
|
||||
}
|
||||
}
|
||||
|
||||
func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface {
|
||||
return &rateLimitingType{
|
||||
DelayingInterface: NewNamedDelayingQueue(name),
|
||||
rateLimiter: rateLimiter,
|
||||
}
|
||||
}
|
||||
|
||||
// rateLimitingType wraps an Interface and provides rateLimited re-enquing
|
||||
type rateLimitingType struct {
|
||||
DelayingInterface
|
||||
|
||||
rateLimiter RateLimiter
|
||||
}
|
||||
|
||||
// AddRateLimited AddAfter's the item based on the time when the rate limiter says its ok
|
||||
func (q *rateLimitingType) AddRateLimited(item interface{}) {
|
||||
q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item))
|
||||
}
|
||||
|
||||
func (q *rateLimitingType) NumRequeues(item interface{}) int {
|
||||
return q.rateLimiter.NumRequeues(item)
|
||||
}
|
||||
|
||||
func (q *rateLimitingType) Forget(item interface{}) {
|
||||
q.rateLimiter.Forget(item)
|
||||
}
|
@@ -1,75 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/util/clock"
|
||||
)
|
||||
|
||||
func TestRateLimitingQueue(t *testing.T) {
|
||||
limiter := NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second)
|
||||
queue := NewRateLimitingQueue(limiter).(*rateLimitingType)
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
delayingQueue := &delayingType{
|
||||
Interface: New(),
|
||||
clock: fakeClock,
|
||||
heartbeat: fakeClock.Tick(maxWait),
|
||||
stopCh: make(chan struct{}),
|
||||
waitingForAddCh: make(chan waitFor, 1000),
|
||||
metrics: newRetryMetrics(""),
|
||||
}
|
||||
queue.DelayingInterface = delayingQueue
|
||||
|
||||
queue.AddRateLimited("one")
|
||||
waitEntry := <-delayingQueue.waitingForAddCh
|
||||
if e, a := 1*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
queue.AddRateLimited("one")
|
||||
waitEntry = <-delayingQueue.waitingForAddCh
|
||||
if e, a := 2*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 2, queue.NumRequeues("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
queue.AddRateLimited("two")
|
||||
waitEntry = <-delayingQueue.waitingForAddCh
|
||||
if e, a := 1*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
queue.AddRateLimited("two")
|
||||
waitEntry = <-delayingQueue.waitingForAddCh
|
||||
if e, a := 2*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
queue.Forget("one")
|
||||
if e, a := 0, queue.NumRequeues("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
queue.AddRateLimited("one")
|
||||
waitEntry = <-delayingQueue.waitingForAddCh
|
||||
if e, a := 1*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
}
|
@@ -1,52 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import "time"
|
||||
|
||||
type TimedWorkQueue struct {
|
||||
*Type
|
||||
}
|
||||
|
||||
type TimedWorkQueueItem struct {
|
||||
StartTime time.Time
|
||||
Object interface{}
|
||||
}
|
||||
|
||||
func NewTimedWorkQueue() *TimedWorkQueue {
|
||||
return &TimedWorkQueue{New()}
|
||||
}
|
||||
|
||||
// Add adds the obj along with the current timestamp to the queue.
|
||||
func (q TimedWorkQueue) Add(timedItem *TimedWorkQueueItem) {
|
||||
q.Type.Add(timedItem)
|
||||
}
|
||||
|
||||
// Get gets the obj along with its timestamp from the queue.
|
||||
func (q TimedWorkQueue) Get() (timedItem *TimedWorkQueueItem, shutdown bool) {
|
||||
origin, shutdown := q.Type.Get()
|
||||
if origin == nil {
|
||||
return nil, shutdown
|
||||
}
|
||||
timedItem, _ = origin.(*TimedWorkQueueItem)
|
||||
return timedItem, shutdown
|
||||
}
|
||||
|
||||
func (q TimedWorkQueue) Done(timedItem *TimedWorkQueueItem) error {
|
||||
q.Type.Done(timedItem)
|
||||
return nil
|
||||
}
|
@@ -1,38 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
func TestNoMemoryLeak(t *testing.T) {
|
||||
timedQueue := NewTimedWorkQueue()
|
||||
timedQueue.Add(&TimedWorkQueueItem{Object: &v1.Pod{}, StartTime: time.Time{}})
|
||||
item, _ := timedQueue.Get()
|
||||
timedQueue.Add(item)
|
||||
// The item should still be in the timedQueue.
|
||||
timedQueue.Done(item)
|
||||
item, _ = timedQueue.Get()
|
||||
timedQueue.Done(item)
|
||||
if len(timedQueue.Type.processing) != 0 {
|
||||
t.Errorf("expect timedQueue.Type.processing to be empty!")
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user