mirror of
https://github.com/kubernetes/client-go.git
synced 2025-06-22 13:17:07 +00:00
Merge pull request #111254 from dims/update-to-golang-1.19-rc2
[golang] Update to 1.19rc2 (from 1.18.3) Kubernetes-commit: 3ffdfbe286ebcea5d75617da6accaf67f815e0cf
This commit is contained in:
commit
cc879cd5b6
@ -20,22 +20,22 @@ limitations under the License.
|
||||
Package applyconfigurations provides typesafe go representations of the apply
|
||||
configurations that are used to constructs Server-side Apply requests.
|
||||
|
||||
Basics
|
||||
# Basics
|
||||
|
||||
The Apply functions in the typed client (see the k8s.io/client-go/kubernetes/typed packages) offer
|
||||
a direct and typesafe way of calling Server-side Apply. Each Apply function takes an "apply
|
||||
configuration" type as an argument, which is a structured representation of an Apply request. For
|
||||
example:
|
||||
|
||||
import (
|
||||
...
|
||||
v1ac "k8s.io/client-go/applyconfigurations/autoscaling/v1"
|
||||
)
|
||||
hpaApplyConfig := v1ac.HorizontalPodAutoscaler(autoscalerName, ns).
|
||||
WithSpec(v1ac.HorizontalPodAutoscalerSpec().
|
||||
WithMinReplicas(0)
|
||||
)
|
||||
return hpav1client.Apply(ctx, hpaApplyConfig, metav1.ApplyOptions{FieldManager: "mycontroller", Force: true})
|
||||
import (
|
||||
...
|
||||
v1ac "k8s.io/client-go/applyconfigurations/autoscaling/v1"
|
||||
)
|
||||
hpaApplyConfig := v1ac.HorizontalPodAutoscaler(autoscalerName, ns).
|
||||
WithSpec(v1ac.HorizontalPodAutoscalerSpec().
|
||||
WithMinReplicas(0)
|
||||
)
|
||||
return hpav1client.Apply(ctx, hpaApplyConfig, metav1.ApplyOptions{FieldManager: "mycontroller", Force: true})
|
||||
|
||||
Note in this example that HorizontalPodAutoscaler is imported from an "applyconfigurations"
|
||||
package. Each "apply configuration" type represents the same Kubernetes object kind as the
|
||||
@ -43,46 +43,46 @@ corresponding go struct, but where all fields are pointers to make them optional
|
||||
requests to be accurately represented. For example, this when the apply configuration in the above
|
||||
example is marshalled to YAML, it produces:
|
||||
|
||||
apiVersion: autoscaling/v1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: myHPA
|
||||
namespace: myNamespace
|
||||
spec:
|
||||
minReplicas: 0
|
||||
apiVersion: autoscaling/v1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: myHPA
|
||||
namespace: myNamespace
|
||||
spec:
|
||||
minReplicas: 0
|
||||
|
||||
To understand why this is needed, the above YAML cannot be produced by the
|
||||
v1.HorizontalPodAutoscaler go struct. Take for example:
|
||||
|
||||
hpa := v1.HorizontalPodAutoscaler{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "autoscaling/v1",
|
||||
Kind: "HorizontalPodAutoscaler",
|
||||
},
|
||||
ObjectMeta: ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: autoscalerName,
|
||||
},
|
||||
Spec: v1.HorizontalPodAutoscalerSpec{
|
||||
MinReplicas: pointer.Int32Ptr(0),
|
||||
},
|
||||
}
|
||||
hpa := v1.HorizontalPodAutoscaler{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "autoscaling/v1",
|
||||
Kind: "HorizontalPodAutoscaler",
|
||||
},
|
||||
ObjectMeta: ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: autoscalerName,
|
||||
},
|
||||
Spec: v1.HorizontalPodAutoscalerSpec{
|
||||
MinReplicas: pointer.Int32Ptr(0),
|
||||
},
|
||||
}
|
||||
|
||||
The above code attempts to declare the same apply configuration as shown in the previous examples,
|
||||
but when marshalled to YAML, produces:
|
||||
|
||||
kind: HorizontalPodAutoscaler
|
||||
apiVersion: autoscaling/v1
|
||||
metadata:
|
||||
name: myHPA
|
||||
namespace: myNamespace
|
||||
creationTimestamp: null
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
kind: ""
|
||||
name: ""
|
||||
minReplicas: 0
|
||||
maxReplicas: 0
|
||||
kind: HorizontalPodAutoscaler
|
||||
apiVersion: autoscaling/v1
|
||||
metadata:
|
||||
name: myHPA
|
||||
namespace: myNamespace
|
||||
creationTimestamp: null
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
kind: ""
|
||||
name: ""
|
||||
minReplicas: 0
|
||||
maxReplicas: 0
|
||||
|
||||
Which, among other things, contains spec.maxReplicas set to 0. This is almost certainly not what
|
||||
the caller intended (the intended apply configuration says nothing about the maxReplicas field),
|
||||
@ -102,7 +102,7 @@ general purpose library. In addition to the convenience, the With functions also
|
||||
developers from the underlying representation, which makes it safer for the underlying
|
||||
representation to be changed to support additional features in the future.
|
||||
|
||||
Controller Support
|
||||
# Controller Support
|
||||
|
||||
The new client-go support makes it much easier to use Server-side Apply in controllers, by either of
|
||||
two mechanisms.
|
||||
@ -130,24 +130,24 @@ accidentally deleted. For such cases, an alternative to mechanism 1 is to replac
|
||||
reconciliation code that performs a "read/modify-in-place/update" (or patch) workflow with a
|
||||
"extract/modify-in-place/apply" workflow. Here's an example of the new workflow:
|
||||
|
||||
fieldMgr := "my-field-manager"
|
||||
deploymentClient := clientset.AppsV1().Deployments("default")
|
||||
// read, could also be read from a shared informer
|
||||
deployment, err := deploymentClient.Get(ctx, "example-deployment", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
// extract
|
||||
deploymentApplyConfig, err := appsv1ac.ExtractDeployment(deployment, fieldMgr)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
// modify-in-place
|
||||
deploymentApplyConfig.Spec.Template.Spec.WithContainers(corev1ac.Container().
|
||||
WithName("modify-slice").
|
||||
WithImage("nginx:1.14.2"),
|
||||
)
|
||||
// apply
|
||||
applied, err := deploymentClient.Apply(ctx, extractedDeployment, metav1.ApplyOptions{FieldManager: fieldMgr})
|
||||
fieldMgr := "my-field-manager"
|
||||
deploymentClient := clientset.AppsV1().Deployments("default")
|
||||
// read, could also be read from a shared informer
|
||||
deployment, err := deploymentClient.Get(ctx, "example-deployment", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
// extract
|
||||
deploymentApplyConfig, err := appsv1ac.ExtractDeployment(deployment, fieldMgr)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
// modify-in-place
|
||||
deploymentApplyConfig.Spec.Template.Spec.WithContainers(corev1ac.Container().
|
||||
WithName("modify-slice").
|
||||
WithImage("nginx:1.14.2"),
|
||||
)
|
||||
// apply
|
||||
applied, err := deploymentClient.Apply(ctx, extractedDeployment, metav1.ApplyOptions{FieldManager: fieldMgr})
|
||||
*/
|
||||
package applyconfigurations
|
||||
|
8
go.mod
8
go.mod
@ -24,8 +24,8 @@ require (
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8
|
||||
google.golang.org/protobuf v1.28.0
|
||||
k8s.io/api v0.0.0-20220725160253-f6f0d0e54216
|
||||
k8s.io/apimachinery v0.0.0-20220726200055-965218438260
|
||||
k8s.io/api v0.0.0-20220727000259-04aced3612cf
|
||||
k8s.io/apimachinery v0.0.0-20220727000102-7fb03423f864
|
||||
k8s.io/klog/v2 v2.70.1
|
||||
k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
|
||||
@ -61,6 +61,6 @@ require (
|
||||
)
|
||||
|
||||
replace (
|
||||
k8s.io/api => k8s.io/api v0.0.0-20220725160253-f6f0d0e54216
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20220726200055-965218438260
|
||||
k8s.io/api => k8s.io/api v0.0.0-20220727000259-04aced3612cf
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20220727000102-7fb03423f864
|
||||
)
|
||||
|
8
go.sum
8
go.sum
@ -479,10 +479,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.0.0-20220725160253-f6f0d0e54216 h1:lK3Ih8q6XH8zLGPKkAx0CDs3WYWiQuK3NxL/W7LtqSw=
|
||||
k8s.io/api v0.0.0-20220725160253-f6f0d0e54216/go.mod h1:esLTNaRjCQz5pCDYxvulIzMUPDaw9bOuAQ9d6ndNdvs=
|
||||
k8s.io/apimachinery v0.0.0-20220726200055-965218438260 h1:Yec9pKZ5AYLiwe5G9piTsqNR7/V57T/9dL/ISMGZRQM=
|
||||
k8s.io/apimachinery v0.0.0-20220726200055-965218438260/go.mod h1:E6C2QnwpsJTXktTwPqd4nAJ/xmsdi1AHqDD0iPB61j0=
|
||||
k8s.io/api v0.0.0-20220727000259-04aced3612cf h1:kkCK4zgoE0+1Fsp4XsTu4rqZNhWOxvoiHBocRmvb+Qg=
|
||||
k8s.io/api v0.0.0-20220727000259-04aced3612cf/go.mod h1:FsQVUKy1P+2xAVDq48sxpYxRCSComqggD4SjWqnqoM4=
|
||||
k8s.io/apimachinery v0.0.0-20220727000102-7fb03423f864 h1:4PKVBjm5Lzfp9XRjEzRJHLhh0FGAUntjaxAkWBdSOyc=
|
||||
k8s.io/apimachinery v0.0.0-20220727000102-7fb03423f864/go.mod h1:E6C2QnwpsJTXktTwPqd4nAJ/xmsdi1AHqDD0iPB61j0=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ=
|
||||
k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
|
@ -125,14 +125,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
||||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
// of clientsets, like in:
|
||||
//
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
|
@ -125,14 +125,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
||||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
// of clientsets, like in:
|
||||
//
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
|
@ -82,7 +82,8 @@ func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event,
|
||||
// It returns the copy of the event that the server returns, or an error.
|
||||
// The namespace and name of the target event is deduced from the event.
|
||||
// The namespace must either match this event client's namespace, or this event client must
|
||||
// have been created with the "" namespace.
|
||||
//
|
||||
// have been created with the "" namespace.
|
||||
func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) {
|
||||
if e.ns != "" && event.Namespace != e.ns {
|
||||
return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns)
|
||||
|
@ -52,7 +52,8 @@ type Interface interface {
|
||||
// ClientContentConfig controls how RESTClient communicates with the server.
|
||||
//
|
||||
// TODO: ContentConfig will be updated to accept a Negotiator instead of a
|
||||
// NegotiatedSerializer and NegotiatedSerializer will be removed.
|
||||
//
|
||||
// NegotiatedSerializer and NegotiatedSerializer will be removed.
|
||||
type ClientContentConfig struct {
|
||||
// AcceptContentTypes specifies the types the client will accept and is optional.
|
||||
// If not set, ContentType will be used to define the Accept header
|
||||
@ -159,13 +160,14 @@ func readExpBackoffConfig() BackoffManager {
|
||||
// c, err := NewRESTClient(...)
|
||||
// if err != nil { ... }
|
||||
// resp, err := c.Verb("GET").
|
||||
// Path("pods").
|
||||
// SelectorParam("labels", "area=staging").
|
||||
// Timeout(10*time.Second).
|
||||
// Do()
|
||||
//
|
||||
// Path("pods").
|
||||
// SelectorParam("labels", "area=staging").
|
||||
// Timeout(10*time.Second).
|
||||
// Do()
|
||||
//
|
||||
// if err != nil { ... }
|
||||
// list, ok := resp.(*api.PodList)
|
||||
//
|
||||
func (c *RESTClient) Verb(verb string) *Request {
|
||||
return NewRequest(c).Verb(verb)
|
||||
}
|
||||
|
@ -36,9 +36,10 @@ type AuthProvider interface {
|
||||
}
|
||||
|
||||
// Factory generates an AuthProvider plugin.
|
||||
// clusterAddress is the address of the current cluster.
|
||||
// config is the initial configuration for this plugin.
|
||||
// persister allows the plugin to save updated configuration.
|
||||
//
|
||||
// clusterAddress is the address of the current cluster.
|
||||
// config is the initial configuration for this plugin.
|
||||
// persister allows the plugin to save updated configuration.
|
||||
type Factory func(clusterAddress string, config map[string]string, persister AuthProviderConfigPersister) (AuthProvider, error)
|
||||
|
||||
// AuthProviderConfigPersister allows a plugin to persist configuration info
|
||||
|
@ -917,8 +917,8 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
|
||||
// processing.
|
||||
//
|
||||
// Error type:
|
||||
// * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError
|
||||
// * http.Client.Do errors are returned directly.
|
||||
// - If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError
|
||||
// - http.Client.Do errors are returned directly.
|
||||
func (r *Request) Do(ctx context.Context) Result {
|
||||
var result Result
|
||||
err := r.request(ctx, func(req *http.Request, resp *http.Response) {
|
||||
@ -1085,15 +1085,15 @@ const maxUnstructuredResponseTextBytes = 2048
|
||||
// unexpected responses. The rough structure is:
|
||||
//
|
||||
// 1. Assume the server sends you something sane - JSON + well defined error objects + proper codes
|
||||
// - this is the happy path
|
||||
// - when you get this output, trust what the server sends
|
||||
// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to
|
||||
// generate a reasonable facsimile of the original failure.
|
||||
// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above
|
||||
// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error
|
||||
// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected
|
||||
// initial contact, the presence of mismatched body contents from posted content types
|
||||
// - Give these a separate distinct error type and capture as much as possible of the original message
|
||||
// - this is the happy path
|
||||
// - when you get this output, trust what the server sends
|
||||
// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to
|
||||
// generate a reasonable facsimile of the original failure.
|
||||
// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above
|
||||
// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error
|
||||
// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected
|
||||
// initial contact, the presence of mismatched body contents from posted content types
|
||||
// - Give these a separate distinct error type and capture as much as possible of the original message
|
||||
//
|
||||
// TODO: introduce transformation of generic http.Client.Do() errors that separates 4.
|
||||
func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error {
|
||||
|
@ -40,9 +40,9 @@ var (
|
||||
|
||||
// SetDefaultWarningHandler sets the default handler clients use when warning headers are encountered.
|
||||
// By default, warnings are logged. Several built-in implementations are provided:
|
||||
// - NoWarnings suppresses warnings.
|
||||
// - WarningLogger logs warnings.
|
||||
// - NewWarningWriter() outputs warnings to the provided writer.
|
||||
// - NoWarnings suppresses warnings.
|
||||
// - WarningLogger logs warnings.
|
||||
// - NewWarningWriter() outputs warnings to the provided writer.
|
||||
func SetDefaultWarningHandler(l WarningHandler) {
|
||||
defaultWarningHandlerLock.Lock()
|
||||
defer defaultWarningHandlerLock.Unlock()
|
||||
|
@ -45,20 +45,20 @@ client.Client from an authcfg.Info.
|
||||
|
||||
Example:
|
||||
|
||||
import (
|
||||
"pkg/client"
|
||||
"pkg/client/auth"
|
||||
)
|
||||
import (
|
||||
"pkg/client"
|
||||
"pkg/client/auth"
|
||||
)
|
||||
|
||||
info, err := auth.LoadFromFile(filename)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
clientConfig = client.Config{}
|
||||
clientConfig.Host = "example.com:4901"
|
||||
clientConfig = info.MergeWithConfig()
|
||||
client := client.New(clientConfig)
|
||||
client.Pods(ns).List()
|
||||
info, err := auth.LoadFromFile(filename)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
clientConfig = client.Config{}
|
||||
clientConfig.Host = "example.com:4901"
|
||||
clientConfig = info.MergeWithConfig()
|
||||
client := client.New(clientConfig)
|
||||
client.Pods(ns).List()
|
||||
*/
|
||||
package auth
|
||||
|
||||
|
77
tools/cache/controller.go
vendored
77
tools/cache/controller.go
vendored
@ -199,17 +199,17 @@ func (c *controller) processLoop() {
|
||||
// can't return an error. The handlers MUST NOT modify the objects
|
||||
// received; this concerns not only the top level of structure but all
|
||||
// the data structures reachable from it.
|
||||
// * OnAdd is called when an object is added.
|
||||
// * OnUpdate is called when an object is modified. Note that oldObj is the
|
||||
// last known state of the object-- it is possible that several changes
|
||||
// were combined together, so you can't use this to see every single
|
||||
// change. OnUpdate is also called when a re-list happens, and it will
|
||||
// get called even if nothing changed. This is useful for periodically
|
||||
// evaluating or syncing something.
|
||||
// * OnDelete will get the final state of the item if it is known, otherwise
|
||||
// it will get an object of type DeletedFinalStateUnknown. This can
|
||||
// happen if the watch is closed and misses the delete event and we don't
|
||||
// notice the deletion until the subsequent re-list.
|
||||
// - OnAdd is called when an object is added.
|
||||
// - OnUpdate is called when an object is modified. Note that oldObj is the
|
||||
// last known state of the object-- it is possible that several changes
|
||||
// were combined together, so you can't use this to see every single
|
||||
// change. OnUpdate is also called when a re-list happens, and it will
|
||||
// get called even if nothing changed. This is useful for periodically
|
||||
// evaluating or syncing something.
|
||||
// - OnDelete will get the final state of the item if it is known, otherwise
|
||||
// it will get an object of type DeletedFinalStateUnknown. This can
|
||||
// happen if the watch is closed and misses the delete event and we don't
|
||||
// notice the deletion until the subsequent re-list.
|
||||
type ResourceEventHandler interface {
|
||||
OnAdd(obj interface{})
|
||||
OnUpdate(oldObj, newObj interface{})
|
||||
@ -305,15 +305,14 @@ func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
//
|
||||
// - lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// - objType is an object of the type that you expect to receive.
|
||||
// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// - h is the object you want notifications sent to.
|
||||
func NewInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
@ -332,16 +331,15 @@ func NewInformer(
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
// * indexers is the indexer for the received object type.
|
||||
//
|
||||
// - lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// - objType is an object of the type that you expect to receive.
|
||||
// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// - h is the object you want notifications sent to.
|
||||
// - indexers is the indexer for the received object type.
|
||||
func NewIndexerInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
@ -454,16 +452,15 @@ func processDeltas(
|
||||
// providing event notifications.
|
||||
//
|
||||
// Parameters
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
// * clientState is the store you want to populate
|
||||
//
|
||||
// - lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// - objType is an object of the type that you expect to receive.
|
||||
// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// - h is the object you want notifications sent to.
|
||||
// - clientState is the store you want to populate
|
||||
func newInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
|
38
tools/cache/delta_fifo.go
vendored
38
tools/cache/delta_fifo.go
vendored
@ -74,11 +74,11 @@ type DeltaFIFOOptions struct {
|
||||
// the Pop() method.
|
||||
//
|
||||
// DeltaFIFO solves this use case:
|
||||
// * You want to process every object change (delta) at most once.
|
||||
// * When you process an object, you want to see everything
|
||||
// that's happened to it since you last processed it.
|
||||
// * You want to process the deletion of some of the objects.
|
||||
// * You might want to periodically reprocess objects.
|
||||
// - You want to process every object change (delta) at most once.
|
||||
// - When you process an object, you want to see everything
|
||||
// that's happened to it since you last processed it.
|
||||
// - You want to process the deletion of some of the objects.
|
||||
// - You might want to periodically reprocess objects.
|
||||
//
|
||||
// DeltaFIFO's Pop(), Get(), and GetByKey() methods return
|
||||
// interface{} to satisfy the Store/Queue interfaces, but they
|
||||
@ -179,21 +179,21 @@ type Deltas []Delta
|
||||
// "known" keys when Pop() is called. Have to think about how that
|
||||
// affects error retrying.
|
||||
//
|
||||
// NOTE: It is possible to misuse this and cause a race when using an
|
||||
// external known object source.
|
||||
// Whether there is a potential race depends on how the consumer
|
||||
// modifies knownObjects. In Pop(), process function is called under
|
||||
// lock, so it is safe to update data structures in it that need to be
|
||||
// in sync with the queue (e.g. knownObjects).
|
||||
// NOTE: It is possible to misuse this and cause a race when using an
|
||||
// external known object source.
|
||||
// Whether there is a potential race depends on how the consumer
|
||||
// modifies knownObjects. In Pop(), process function is called under
|
||||
// lock, so it is safe to update data structures in it that need to be
|
||||
// in sync with the queue (e.g. knownObjects).
|
||||
//
|
||||
// Example:
|
||||
// In case of sharedIndexInformer being a consumer
|
||||
// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
|
||||
// there is no race as knownObjects (s.indexer) is modified safely
|
||||
// under DeltaFIFO's lock. The only exceptions are GetStore() and
|
||||
// GetIndexer() methods, which expose ways to modify the underlying
|
||||
// storage. Currently these two methods are used for creating Lister
|
||||
// and internal tests.
|
||||
// Example:
|
||||
// In case of sharedIndexInformer being a consumer
|
||||
// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
|
||||
// there is no race as knownObjects (s.indexer) is modified safely
|
||||
// under DeltaFIFO's lock. The only exceptions are GetStore() and
|
||||
// GetIndexer() methods, which expose ways to modify the underlying
|
||||
// storage. Currently these two methods are used for creating Lister
|
||||
// and internal tests.
|
||||
//
|
||||
// Also see the comment on DeltaFIFO.
|
||||
//
|
||||
|
15
tools/cache/expiration_cache.go
vendored
15
tools/cache/expiration_cache.go
vendored
@ -25,13 +25,14 @@ import (
|
||||
)
|
||||
|
||||
// ExpirationCache implements the store interface
|
||||
// 1. All entries are automatically time stamped on insert
|
||||
// a. The key is computed based off the original item/keyFunc
|
||||
// b. The value inserted under that key is the timestamped item
|
||||
// 2. Expiration happens lazily on read based on the expiration policy
|
||||
// a. No item can be inserted into the store while we're expiring
|
||||
// *any* item in the cache.
|
||||
// 3. Time-stamps are stripped off unexpired entries before return
|
||||
// 1. All entries are automatically time stamped on insert
|
||||
// a. The key is computed based off the original item/keyFunc
|
||||
// b. The value inserted under that key is the timestamped item
|
||||
// 2. Expiration happens lazily on read based on the expiration policy
|
||||
// a. No item can be inserted into the store while we're expiring
|
||||
// *any* item in the cache.
|
||||
// 3. Time-stamps are stripped off unexpired entries before return
|
||||
//
|
||||
// Note that the ExpirationCache is inherently slower than a normal
|
||||
// threadSafeStore because it takes a write lock every time it checks if
|
||||
// an item has expired.
|
||||
|
9
tools/cache/fifo.go
vendored
9
tools/cache/fifo.go
vendored
@ -103,10 +103,11 @@ func Pop(queue Queue) interface{} {
|
||||
// recent version will be processed. This can't be done with a channel
|
||||
//
|
||||
// FIFO solves this use case:
|
||||
// * You want to process every object (exactly) once.
|
||||
// * You want to process the most recent version of the object when you process it.
|
||||
// * You do not want to process deleted objects, they should be removed from the queue.
|
||||
// * You do not want to periodically reprocess objects.
|
||||
// - You want to process every object (exactly) once.
|
||||
// - You want to process the most recent version of the object when you process it.
|
||||
// - You do not want to process deleted objects, they should be removed from the queue.
|
||||
// - You do not want to periodically reprocess objects.
|
||||
//
|
||||
// Compare with DeltaFIFO for other use cases.
|
||||
type FIFO struct {
|
||||
lock sync.RWMutex
|
||||
|
8
tools/cache/index.go
vendored
8
tools/cache/index.go
vendored
@ -28,10 +28,10 @@ import (
|
||||
// Delete).
|
||||
//
|
||||
// There are three kinds of strings here:
|
||||
// 1. a storage key, as defined in the Store interface,
|
||||
// 2. a name of an index, and
|
||||
// 3. an "indexed value", which is produced by an IndexFunc and
|
||||
// can be a field value or any other string computed from the object.
|
||||
// 1. a storage key, as defined in the Store interface,
|
||||
// 2. a name of an index, and
|
||||
// 3. an "indexed value", which is produced by an IndexFunc and
|
||||
// can be a field value or any other string computed from the object.
|
||||
type Indexer interface {
|
||||
Store
|
||||
// Index returns the stored objects whose set of indexed values
|
||||
|
@ -160,8 +160,10 @@ func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules {
|
||||
|
||||
// Load starts by running the MigrationRules and then
|
||||
// takes the loading rules and returns a Config object based on following rules.
|
||||
// if the ExplicitPath, return the unmerged explicit file
|
||||
// Otherwise, return a merged config based on the Precedence slice
|
||||
//
|
||||
// if the ExplicitPath, return the unmerged explicit file
|
||||
// Otherwise, return a merged config based on the Precedence slice
|
||||
//
|
||||
// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored.
|
||||
// Read errors or files with non-deserializable content produce errors.
|
||||
// The first file to set a particular map key wins and map key's value is never changed.
|
||||
|
@ -161,7 +161,7 @@ type LeaderElectionConfig struct {
|
||||
// lifecycle events of the LeaderElector. These are invoked asynchronously.
|
||||
//
|
||||
// possible future callbacks:
|
||||
// * OnChallenge()
|
||||
// - OnChallenge()
|
||||
type LeaderCallbacks struct {
|
||||
// OnStartedLeading is called when a LeaderElector client starts leading
|
||||
OnStartedLeading func(context.Context)
|
||||
|
@ -62,18 +62,18 @@ type ForwardedPort struct {
|
||||
}
|
||||
|
||||
/*
|
||||
valid port specifications:
|
||||
valid port specifications:
|
||||
|
||||
5000
|
||||
- forwards from localhost:5000 to pod:5000
|
||||
5000
|
||||
- forwards from localhost:5000 to pod:5000
|
||||
|
||||
8888:5000
|
||||
- forwards from localhost:8888 to pod:5000
|
||||
8888:5000
|
||||
- forwards from localhost:8888 to pod:5000
|
||||
|
||||
0:5000
|
||||
:5000
|
||||
- selects a random available local port,
|
||||
forwards from localhost:<random port> to pod:5000
|
||||
0:5000
|
||||
:5000
|
||||
- selects a random available local port,
|
||||
forwards from localhost:<random port> to pod:5000
|
||||
*/
|
||||
func parsePorts(ports []string) ([]ForwardedPort, error) {
|
||||
var forwards []ForwardedPort
|
||||
|
@ -235,10 +235,10 @@ type aggregateRecord struct {
|
||||
// EventAggregate checks if a similar event has been seen according to the
|
||||
// aggregation configuration (max events, max interval, etc) and returns:
|
||||
//
|
||||
// - The (potentially modified) event that should be created
|
||||
// - The cache key for the event, for correlation purposes. This will be set to
|
||||
// the full key for normal events, and to the result of
|
||||
// EventAggregatorMessageFunc for aggregate events.
|
||||
// - The (potentially modified) event that should be created
|
||||
// - The cache key for the event, for correlation purposes. This will be set to
|
||||
// the full key for normal events, and to the result of
|
||||
// EventAggregatorMessageFunc for aggregate events.
|
||||
func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, string) {
|
||||
now := metav1.NewTime(e.clock.Now())
|
||||
var record aggregateRecord
|
||||
@ -427,14 +427,14 @@ type EventCorrelateResult struct {
|
||||
// prior to interacting with the API server to record the event.
|
||||
//
|
||||
// The default behavior is as follows:
|
||||
// * Aggregation is performed if a similar event is recorded 10 times
|
||||
// - Aggregation is performed if a similar event is recorded 10 times
|
||||
// in a 10 minute rolling interval. A similar event is an event that varies only by
|
||||
// the Event.Message field. Rather than recording the precise event, aggregation
|
||||
// will create a new event whose message reports that it has combined events with
|
||||
// the same reason.
|
||||
// * Events are incrementally counted if the exact same event is encountered multiple
|
||||
// - Events are incrementally counted if the exact same event is encountered multiple
|
||||
// times.
|
||||
// * A source may burst 25 events about an object, but has a refill rate budget
|
||||
// - A source may burst 25 events about an object, but has a refill rate budget
|
||||
// per object of 1 event every 5 minutes to control long-tail of spam.
|
||||
func NewEventCorrelator(clock clock.PassiveClock) *EventCorrelator {
|
||||
cacheSize := maxLruCacheEntries
|
||||
|
@ -101,7 +101,9 @@ func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions
|
||||
// It guarantees you to see all events and in the order they happened.
|
||||
// Due to this guarantee there is no way it can deal with 'Resource version too old error'. It will fail in this case.
|
||||
// (See `UntilWithSync` if you'd prefer to recover from all the errors including RV too old by re-listing
|
||||
// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.)
|
||||
//
|
||||
// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.)
|
||||
//
|
||||
// The most frequent usage for Until would be a test where you want to verify exact order of events ("edges").
|
||||
func Until(ctx context.Context, initialResourceVersion string, watcherClient cache.Watcher, conditions ...ConditionFunc) (*watch.Event, error) {
|
||||
w, err := NewRetryWatcher(initialResourceVersion, watcherClient)
|
||||
|
@ -478,7 +478,7 @@ func isBool(s string) bool {
|
||||
return s == "true" || s == "false"
|
||||
}
|
||||
|
||||
//UnquoteExtend is almost same as strconv.Unquote(), but it support parse single quotes as a string
|
||||
// UnquoteExtend is almost same as strconv.Unquote(), but it support parse single quotes as a string
|
||||
func UnquoteExtend(s string) (string, error) {
|
||||
n := len(s)
|
||||
if n < 2 {
|
||||
|
@ -74,30 +74,30 @@ func OnError(backoff wait.Backoff, retriable func(error) bool, fn func() error)
|
||||
// backoff, and then try again. On a non-"Conflict" error, or if it retries too many times
|
||||
// and gives up, RetryOnConflict will return an error to the caller.
|
||||
//
|
||||
// err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
// // Fetch the resource here; you need to refetch it on every try, since
|
||||
// // if you got a conflict on the last update attempt then you need to get
|
||||
// // the current version before making your own changes.
|
||||
// pod, err := c.Pods("mynamespace").Get(name, metav1.GetOptions{})
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
// // Fetch the resource here; you need to refetch it on every try, since
|
||||
// // if you got a conflict on the last update attempt then you need to get
|
||||
// // the current version before making your own changes.
|
||||
// pod, err := c.Pods("mynamespace").Get(name, metav1.GetOptions{})
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// // Make whatever updates to the resource are needed
|
||||
// pod.Status.Phase = v1.PodFailed
|
||||
// // Make whatever updates to the resource are needed
|
||||
// pod.Status.Phase = v1.PodFailed
|
||||
//
|
||||
// // Try to update
|
||||
// _, err = c.Pods("mynamespace").UpdateStatus(pod)
|
||||
// // You have to return err itself here (not wrapped inside another error)
|
||||
// // so that RetryOnConflict can identify it correctly.
|
||||
// return err
|
||||
// })
|
||||
// if err != nil {
|
||||
// // May be conflict if max retries were hit, or may be something unrelated
|
||||
// // like permissions or a network error
|
||||
// return err
|
||||
// }
|
||||
// ...
|
||||
// // Try to update
|
||||
// _, err = c.Pods("mynamespace").UpdateStatus(pod)
|
||||
// // You have to return err itself here (not wrapped inside another error)
|
||||
// // so that RetryOnConflict can identify it correctly.
|
||||
// return err
|
||||
// })
|
||||
// if err != nil {
|
||||
// // May be conflict if max retries were hit, or may be something unrelated
|
||||
// // like permissions or a network error
|
||||
// return err
|
||||
// }
|
||||
// ...
|
||||
//
|
||||
// TODO: Make Backoff an interface?
|
||||
func RetryOnConflict(backoff wait.Backoff, fn func() error) error {
|
||||
|
@ -42,8 +42,9 @@ type FakeOpenAPIServer struct {
|
||||
// API server.
|
||||
//
|
||||
// specsPath - Give a path to some test data organized so that each GroupVersion
|
||||
// has its own OpenAPI V3 JSON file.
|
||||
// i.e. apps/v1beta1 is stored in <specsPath>/apps/v1beta1.json
|
||||
//
|
||||
// has its own OpenAPI V3 JSON file.
|
||||
// i.e. apps/v1beta1 is stored in <specsPath>/apps/v1beta1.json
|
||||
func NewFakeOpenAPIV3Server(specsPath string) (*FakeOpenAPIServer, error) {
|
||||
mux := &testMux{
|
||||
counts: map[string]int{},
|
||||
|
@ -16,11 +16,11 @@ limitations under the License.
|
||||
|
||||
// Package workqueue provides a simple queue that supports the following
|
||||
// features:
|
||||
// * Fair: items processed in the order in which they are added.
|
||||
// * Stingy: a single item will not be processed multiple times concurrently,
|
||||
// and if an item is added multiple times before it can be processed, it
|
||||
// will only be processed once.
|
||||
// * Multiple consumers and producers. In particular, it is allowed for an
|
||||
// item to be reenqueued while it is being processed.
|
||||
// * Shutdown notifications.
|
||||
// - Fair: items processed in the order in which they are added.
|
||||
// - Stingy: a single item will not be processed multiple times concurrently,
|
||||
// and if an item is added multiple times before it can be processed, it
|
||||
// will only be processed once.
|
||||
// - Multiple consumers and producers. In particular, it is allowed for an
|
||||
// item to be reenqueued while it is being processed.
|
||||
// - Shutdown notifications.
|
||||
package workqueue // import "k8s.io/client-go/util/workqueue"
|
||||
|
Loading…
Reference in New Issue
Block a user