From 6c274ea72d1027592712aaacc407fd655929cae4 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Fri, 31 Jan 2020 13:58:23 -0800 Subject: [PATCH 1/4] update client gen --- .../fake/generator_fake_for_type.go | 17 +++--- .../generators/generator_for_type.go | 53 +++++++++++-------- 2 files changed, 42 insertions(+), 28 deletions(-) diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go index 737c5abf9a9..e3c03d6e3f8 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go @@ -122,9 +122,12 @@ func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io. "group": canonicalGroup, "groupName": groupName, "version": g.version, + "CreateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "CreateOptions"}), "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), - "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), "GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}), + "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), + "PatchOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "PatchOptions"}), + "UpdateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "UpdateOptions"}), "Everything": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/labels", Name: "Everything"}), "GroupVersionResource": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersionResource"}), "GroupVersionKind": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersionKind"}), @@ -392,7 +395,7 @@ func (c *Fake$.type|publicPlural$) DeleteCollection(ctx context.Context, options ` var createTemplate = ` // Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. -func (c *Fake$.type|publicPlural$) Create(ctx context.Context, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { +func (c *Fake$.type|publicPlural$) Create(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (result *$.resultType|raw$, err error) { obj, err := c.Fake. $if .namespaced$Invokes($.NewCreateAction|raw$($.inputType|allLowercasePlural$Resource, c.ns, $.inputType|private$), &$.resultType|raw${}) $else$Invokes($.NewRootCreateAction|raw$($.inputType|allLowercasePlural$Resource, $.inputType|private$), &$.resultType|raw${})$end$ @@ -405,7 +408,7 @@ func (c *Fake$.type|publicPlural$) Create(ctx context.Context, $.inputType|priva var createSubresourceTemplate = ` // Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. -func (c *Fake$.type|publicPlural$) Create(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { +func (c *Fake$.type|publicPlural$) Create(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (result *$.resultType|raw$, err error) { obj, err := c.Fake. $if .namespaced$Invokes($.NewCreateSubresourceAction|raw$($.type|allLowercasePlural$Resource, $.type|private$Name, "$.subresourcePath$", c.ns, $.inputType|private$), &$.resultType|raw${}) $else$Invokes($.NewRootCreateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.inputType|private$), &$.resultType|raw${})$end$ @@ -418,7 +421,7 @@ func (c *Fake$.type|publicPlural$) Create(ctx context.Context, $.type|private$Na var updateTemplate = ` // Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. -func (c *Fake$.type|publicPlural$) Update(ctx context.Context, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { +func (c *Fake$.type|publicPlural$) Update(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (result *$.resultType|raw$, err error) { obj, err := c.Fake. $if .namespaced$Invokes($.NewUpdateAction|raw$($.inputType|allLowercasePlural$Resource, c.ns, $.inputType|private$), &$.resultType|raw${}) $else$Invokes($.NewRootUpdateAction|raw$($.inputType|allLowercasePlural$Resource, $.inputType|private$), &$.resultType|raw${})$end$ @@ -431,7 +434,7 @@ func (c *Fake$.type|publicPlural$) Update(ctx context.Context, $.inputType|priva var updateSubresourceTemplate = ` // Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. -func (c *Fake$.type|publicPlural$) Update(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { +func (c *Fake$.type|publicPlural$) Update(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (result *$.resultType|raw$, err error) { obj, err := c.Fake. $if .namespaced$Invokes($.NewUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", c.ns, $.inputType|private$), &$.inputType|raw${}) $else$Invokes($.NewRootUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.inputType|private$), &$.resultType|raw${})$end$ @@ -445,7 +448,7 @@ func (c *Fake$.type|publicPlural$) Update(ctx context.Context, $.type|private$Na var updateStatusTemplate = ` // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *Fake$.type|publicPlural$) UpdateStatus(ctx context.Context, $.type|private$ *$.type|raw$) (*$.type|raw$, error) { +func (c *Fake$.type|publicPlural$) UpdateStatus(ctx context.Context, $.type|private$ *$.type|raw$, opts $.UpdateOptions|raw$) (*$.type|raw$, error) { obj, err := c.Fake. $if .namespaced$Invokes($.NewUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "status", c.ns, $.type|private$), &$.type|raw${}) $else$Invokes($.NewRootUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "status", $.type|private$), &$.type|raw${})$end$ @@ -467,7 +470,7 @@ func (c *Fake$.type|publicPlural$) Watch(ctx context.Context, opts $.ListOptions var patchTemplate = ` // Patch applies the patch and returns the patched $.resultType|private$. -func (c *Fake$.type|publicPlural$) Patch(ctx context.Context, name string, pt $.PatchType|raw$, data []byte, subresources ...string) (result *$.resultType|raw$, err error) { +func (c *Fake$.type|publicPlural$) Patch(ctx context.Context, name string, pt $.PatchType|raw$, data []byte, opts $.PatchOptions|raw$, subresources ...string) (result *$.resultType|raw$, err error) { obj, err := c.Fake. $if .namespaced$Invokes($.NewPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, name, pt, data, subresources... ), &$.resultType|raw${}) $else$Invokes($.NewRootPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, name, pt, data, subresources...), &$.resultType|raw${})$end$ diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go index a2ccdbf03ac..9aa419a00ac 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go @@ -43,7 +43,9 @@ type genClientForType struct { var _ generator.Generator = &genClientForType{} // Filter ignores all but one type because we're making a single file per type. -func (g *genClientForType) Filter(c *generator.Context, t *types.Type) bool { return t == g.typeToMatch } +func (g *genClientForType) Filter(c *generator.Context, t *types.Type) bool { + return t == g.typeToMatch +} func (g *genClientForType) Namers(c *generator.Context) namer.NameSystems { return namer.NameSystems{ @@ -116,9 +118,10 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i "type": t, "inputType": &inputType, "resultType": &resultType, - "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), - "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), + "CreateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "CreateOptions"}), "GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}), + "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), + "UpdateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "UpdateOptions"}), "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), }, }) @@ -135,9 +138,12 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i "subresourcePath": "", "GroupGoName": g.groupGoName, "Version": namer.IC(g.version), + "CreateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "CreateOptions"}), "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), - "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), "GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}), + "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), + "PatchOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "PatchOptions"}), + "UpdateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "UpdateOptions"}), "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), "watchInterface": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"}), "RESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), @@ -304,22 +310,22 @@ func generateInterface(tags util.Tags) string { } var subresourceDefaultVerbTemplates = map[string]string{ - "create": `Create(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (*$.resultType|raw$, error)`, + "create": `Create(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (*$.resultType|raw$, error)`, "list": `List(ctx context.Context, $.type|private$Name string, opts $.ListOptions|raw$) (*$.resultType|raw$List, error)`, - "update": `Update(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (*$.resultType|raw$, error)`, + "update": `Update(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (*$.resultType|raw$, error)`, "get": `Get(ctx context.Context, $.type|private$Name string, options $.GetOptions|raw$) (*$.resultType|raw$, error)`, } var defaultVerbTemplates = map[string]string{ - "create": `Create(context.Context, *$.inputType|raw$) (*$.resultType|raw$, error)`, - "update": `Update(context.Context, *$.inputType|raw$) (*$.resultType|raw$, error)`, - "updateStatus": `UpdateStatus(context.Context, *$.type|raw$) (*$.type|raw$, error)`, - "delete": `Delete(ctx context.Context, name string, options *$.DeleteOptions|raw$) error`, - "deleteCollection": `DeleteCollection(ctx context.Context, options *$.DeleteOptions|raw$, listOptions $.ListOptions|raw$) error`, - "get": `Get(ctx context.Context, name string, options $.GetOptions|raw$) (*$.resultType|raw$, error)`, + "create": `Create(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (*$.resultType|raw$, error)`, + "update": `Update(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (*$.resultType|raw$, error)`, + "updateStatus": `UpdateStatus(ctx context.Context, $.inputType|private$ *$.type|raw$, opts $.UpdateOptions|raw$) (*$.type|raw$, error)`, + "delete": `Delete(ctx context.Context, name string, opts *$.DeleteOptions|raw$) error`, + "deleteCollection": `DeleteCollection(ctx context.Context, opts *$.DeleteOptions|raw$, listOpts $.ListOptions|raw$) error`, + "get": `Get(ctx context.Context, name string, opts $.GetOptions|raw$) (*$.resultType|raw$, error)`, "list": `List(ctx context.Context, opts $.ListOptions|raw$) (*$.resultType|raw$List, error)`, "watch": `Watch(ctx context.Context, opts $.ListOptions|raw$) ($.watchInterface|raw$, error)`, - "patch": `Patch(ctx context.Context, name string, pt $.PatchType|raw$, data []byte, subresources ...string) (result *$.resultType|raw$, err error)`, + "patch": `Patch(ctx context.Context, name string, pt $.PatchType|raw$, data []byte, opts $.PatchOptions|raw$, subresources ...string) (result *$.resultType|raw$, err error)`, } // group client will implement this interface. @@ -488,13 +494,14 @@ func (c *$.type|privatePlural$) DeleteCollection(ctx context.Context, options *$ var createSubresourceTemplate = ` // Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. -func (c *$.type|privatePlural$) Create(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { +func (c *$.type|privatePlural$) Create(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} err = c.client.Post(). $if .namespaced$Namespace(c.ns).$end$ Resource("$.type|resource$"). Name($.type|private$Name). SubResource("$.subresourcePath$"). + VersionedParams(&opts, $.schemeParameterCodec|raw$). Body($.inputType|private$). Do(ctx). Into(result) @@ -504,11 +511,12 @@ func (c *$.type|privatePlural$) Create(ctx context.Context, $.type|private$Name var createTemplate = ` // Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. -func (c *$.type|privatePlural$) Create(ctx context.Context, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { +func (c *$.type|privatePlural$) Create(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} err = c.client.Post(). $if .namespaced$Namespace(c.ns).$end$ Resource("$.type|resource$"). + VersionedParams(&opts, $.schemeParameterCodec|raw$). Body($.inputType|private$). Do(ctx). Into(result) @@ -518,13 +526,14 @@ func (c *$.type|privatePlural$) Create(ctx context.Context, $.inputType|private$ var updateSubresourceTemplate = ` // Update takes the top resource name and the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. -func (c *$.type|privatePlural$) Update(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { +func (c *$.type|privatePlural$) Update(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} err = c.client.Put(). $if .namespaced$Namespace(c.ns).$end$ Resource("$.type|resource$"). Name($.type|private$Name). SubResource("$.subresourcePath$"). + VersionedParams(&opts, $.schemeParameterCodec|raw$). Body($.inputType|private$). Do(ctx). Into(result) @@ -534,12 +543,13 @@ func (c *$.type|privatePlural$) Update(ctx context.Context, $.type|private$Name var updateTemplate = ` // Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. -func (c *$.type|privatePlural$) Update(ctx context.Context, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { +func (c *$.type|privatePlural$) Update(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} err = c.client.Put(). $if .namespaced$Namespace(c.ns).$end$ Resource("$.type|resource$"). Name($.inputType|private$.Name). + VersionedParams(&opts, $.schemeParameterCodec|raw$). Body($.inputType|private$). Do(ctx). Into(result) @@ -550,14 +560,14 @@ func (c *$.type|privatePlural$) Update(ctx context.Context, $.inputType|private$ var updateStatusTemplate = ` // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *$.type|privatePlural$) UpdateStatus(ctx context.Context, $.type|private$ *$.type|raw$) (result *$.type|raw$, err error) { +func (c *$.type|privatePlural$) UpdateStatus(ctx context.Context, $.type|private$ *$.type|raw$, opts $.UpdateOptions|raw$) (result *$.type|raw$, err error) { result = &$.type|raw${} err = c.client.Put(). $if .namespaced$Namespace(c.ns).$end$ Resource("$.type|resource$"). Name($.type|private$.Name). SubResource("status"). + VersionedParams(&opts, $.schemeParameterCodec|raw$). Body($.type|private$). Do(ctx). Into(result) @@ -584,13 +594,14 @@ func (c *$.type|privatePlural$) Watch(ctx context.Context, opts $.ListOptions|ra var patchTemplate = ` // Patch applies the patch and returns the patched $.resultType|private$. -func (c *$.type|privatePlural$) Patch(ctx context.Context, name string, pt $.PatchType|raw$, data []byte, subresources ...string) (result *$.resultType|raw$, err error) { +func (c *$.type|privatePlural$) Patch(ctx context.Context, name string, pt $.PatchType|raw$, data []byte, opts $.PatchOptions|raw$, subresources ...string) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} err = c.client.Patch(pt). $if .namespaced$Namespace(c.ns).$end$ Resource("$.type|resource$"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, $.schemeParameterCodec|raw$). Body(data). Do(ctx). Into(result) From 25651408aeadf38c3df7ea8c760e7519fd37d625 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Sat, 8 Feb 2020 12:30:21 -0500 Subject: [PATCH 2/4] generated: run refactor --- cmd/kubeadm/app/phases/addons/dns/dns.go | 10 +-- cmd/kubeadm/app/phases/addons/dns/dns_test.go | 4 +- .../app/phases/certs/renewal/apirenewer.go | 2 +- cmd/kubeadm/app/phases/upgrade/health.go | 2 +- cmd/kubeadm/app/util/apiclient/idempotency.go | 44 +++++----- .../app/util/apiclient/idempotency_test.go | 4 +- cmd/kubeadm/app/util/config/cluster_test.go | 4 +- pkg/client/tests/fake_client_test.go | 12 +-- pkg/controller/bootstrap/bootstrapsigner.go | 3 +- pkg/controller/certificates/approver/BUILD | 1 + .../certificates/approver/sarapprove.go | 3 +- .../rootcacertpublisher/publisher.go | 4 +- pkg/controller/certificates/signer/BUILD | 1 + pkg/controller/certificates/signer/signer.go | 3 +- pkg/controller/client_builder.go | 2 +- pkg/controller/client_builder_dynamic.go | 3 +- pkg/controller/cloud/node_controller.go | 2 +- .../clusterroleaggregation_controller.go | 2 +- pkg/controller/controller_utils.go | 16 ++-- pkg/controller/cronjob/injection.go | 8 +- pkg/controller/daemon/daemon_controller.go | 2 +- pkg/controller/daemon/update.go | 10 +-- .../deployment/deployment_controller.go | 2 +- pkg/controller/deployment/progress.go | 3 +- pkg/controller/deployment/rollback.go | 3 +- pkg/controller/deployment/sync.go | 18 ++--- pkg/controller/disruption/disruption.go | 2 +- pkg/controller/disruption/disruption_test.go | 4 +- .../endpoint/endpoints_controller.go | 4 +- .../endpointslice_controller_test.go | 6 +- pkg/controller/endpointslice/reconciler.go | 4 +- .../endpointslice/reconciler_test.go | 4 +- pkg/controller/history/controller_history.go | 8 +- .../history/controller_history_test.go | 2 +- pkg/controller/job/job_controller.go | 2 +- .../deletion/namespaced_resources_deleter.go | 4 +- pkg/controller/nodeipam/ipam/adapter.go | 2 +- .../node_lifecycle_controller.go | 2 +- pkg/controller/podautoscaler/horizontal.go | 2 +- pkg/controller/podgc/gc_controller_test.go | 2 +- pkg/controller/replicaset/replica_set_test.go | 4 +- .../replicaset/replica_set_utils.go | 2 +- .../resource_quota_controller.go | 2 +- pkg/controller/service/BUILD | 1 + pkg/controller/service/controller_test.go | 10 +-- pkg/controller/service/patch.go | 3 +- pkg/controller/service/patch_test.go | 2 +- .../serviceaccounts_controller.go | 2 +- .../serviceaccount/tokens_controller.go | 8 +- .../statefulset/stateful_pod_control.go | 7 +- .../stateful_set_status_updater.go | 3 +- pkg/controller/ttl/BUILD | 1 + pkg/controller/ttl/ttl_controller.go | 3 +- pkg/controller/util/node/controller_utils.go | 4 +- .../attach_detach_controller_test.go | 4 +- .../volume/expand/cache/volume_resize_map.go | 2 +- .../volume/persistentvolume/pv_controller.go | 14 ++-- .../persistentvolume/pv_controller_base.go | 6 +- .../pvc_protection_controller.go | 4 +- pkg/controller/volume/pvprotection/BUILD | 1 + .../pvprotection/pv_protection_controller.go | 5 +- .../volume/scheduling/scheduler_binder.go | 4 +- .../scheduling/scheduler_binder_test.go | 16 ++-- pkg/kubectl/cmd/auth/BUILD | 1 + pkg/kubectl/cmd/auth/cani.go | 5 +- pkg/kubelet/kubelet_node_status.go | 2 +- pkg/kubelet/kubelet_node_status_test.go | 2 +- pkg/kubelet/kubeletconfig/configsync.go | 2 +- pkg/kubelet/nodelease/controller.go | 4 +- pkg/kubelet/pod/mirror_client.go | 2 +- pkg/kubelet/token/BUILD | 1 + pkg/kubelet/token/token_manager.go | 3 +- .../volumemanager/volume_manager_test.go | 2 +- pkg/kubemark/controller.go | 2 +- pkg/master/client_util.go | 2 +- pkg/master/controller.go | 4 +- ...cluster_authentication_trust_controller.go | 6 +- pkg/master/master_test.go | 4 +- pkg/master/reconcilers/endpointsadapter.go | 10 +-- .../reconcilers/endpointsadapter_test.go | 8 +- pkg/master/reconcilers/lease_test.go | 6 +- pkg/registry/core/pod/storage/eviction.go | 2 +- pkg/registry/flowcontrol/rest/BUILD | 1 + .../flowcontrol/rest/storage_flowcontrol.go | 12 +-- .../rest/storage_flowcontrol_test.go | 3 +- .../reconciliation/clusterrole_interfaces.go | 4 +- .../clusterrolebinding_interfaces.go | 4 +- pkg/registry/rbac/reconciliation/namespace.go | 2 +- .../rbac/reconciliation/role_interfaces.go | 4 +- .../reconciliation/rolebinding_interfaces.go | 4 +- pkg/registry/rbac/rest/storage_rbac.go | 4 +- .../scheduling/rest/storage_scheduling.go | 2 +- pkg/scheduler/scheduler.go | 4 +- pkg/util/node/node.go | 6 +- pkg/util/pod/pod.go | 2 +- pkg/util/pod/pod_test.go | 2 +- pkg/volume/azure_file/azure_util.go | 2 +- pkg/volume/csi/csi_attacher.go | 3 +- pkg/volume/csi/csi_attacher_test.go | 20 ++--- pkg/volume/csi/csi_block_test.go | 7 +- pkg/volume/csi/csi_mounter_test.go | 11 +-- .../csi/nodeinfomanager/nodeinfomanager.go | 8 +- pkg/volume/glusterfs/glusterfs.go | 6 +- pkg/volume/storageos/storageos_test.go | 2 +- pkg/volume/testing/testing.go | 2 +- .../util/recyclerclient/recycler_client.go | 2 +- pkg/volume/util/resize_util.go | 4 +- .../namespace/autoprovision/admission.go | 2 +- .../resourcequota/resource_access.go | 2 +- .../pkg/controller/apiapproval/BUILD | 1 + .../apiapproval/apiapproval_controller.go | 3 +- .../pkg/controller/establish/BUILD | 1 + .../establish/establishing_controller.go | 3 +- .../pkg/controller/finalizer/BUILD | 1 + .../pkg/controller/finalizer/crd_finalizer.go | 7 +- .../nonstructuralschema_controller.go | 2 +- .../pkg/controller/status/BUILD | 1 + .../controller/status/naming_controller.go | 3 +- .../test/integration/basic_test.go | 4 +- .../test/integration/change_test.go | 2 +- .../integration/conversion/conversion_test.go | 8 +- .../test/integration/defaulting_test.go | 2 +- .../test/integration/fixtures/resources.go | 4 +- .../test/integration/helpers.go | 4 +- .../test/integration/table_test.go | 4 +- .../test/integration/validation_test.go | 8 +- .../test/integration/versioning_test.go | 2 +- .../create-update-delete-deployment/main.go | 4 +- .../examples/fake-client/main_test.go | 2 +- .../resourcelock/configmaplock.go | 4 +- .../resourcelock/endpointslock.go | 4 +- .../leaderelection/resourcelock/leaselock.go | 4 +- .../client-go/util/certificate/csr/csr.go | 2 +- .../cloud-provider/node/helpers/labels.go | 2 +- .../cloud-provider/node/helpers/taints.go | 2 +- .../cloud-provider/service/helpers/BUILD | 1 + .../cloud-provider/service/helpers/helper.go | 3 +- .../service/helpers/helper_test.go | 2 +- .../autoregister/autoregister_controller.go | 4 +- .../status/available_controller.go | 2 +- .../k8s.io/kubectl/pkg/cmd/autoscale/BUILD | 1 + .../kubectl/pkg/cmd/autoscale/autoscale.go | 3 +- .../pkg/cmd/create/create_clusterrole.go | 2 +- .../kubectl/pkg/cmd/create/create_cronjob.go | 2 +- .../kubectl/pkg/cmd/create/create_job.go | 2 +- .../kubectl/pkg/cmd/create/create_role.go | 2 +- .../pkg/cmd/rollingupdate/rolling_updater.go | 8 +- .../pkg/cmd/rollingupdate/rollingupdate.go | 2 +- .../src/k8s.io/kubectl/pkg/drain/cordon.go | 5 +- .../pkg/polymorphichelpers/history_test.go | 2 +- .../pkg/polymorphichelpers/rollback.go | 6 +- .../azure/azure_config_test.go | 2 +- .../gce/gce_clusterid.go | 2 +- .../gce/gce_loadbalancer_internal_test.go | 48 +++++------ .../gce/gce_loadbalancer_test.go | 7 +- .../legacy-cloud-providers/gce/gce_util.go | 3 +- .../gce/gce_util_test.go | 2 +- .../k8s.io/sample-controller/controller.go | 6 +- test/e2e/apimachinery/aggregator.go | 18 ++--- test/e2e/apimachinery/chunking.go | 2 +- .../apimachinery/crd_conversion_webhook.go | 8 +- test/e2e/apimachinery/crd_publish_openapi.go | 4 +- .../custom_resource_definition.go | 8 +- test/e2e/apimachinery/garbage_collector.go | 32 ++++---- test/e2e/apimachinery/generated_clientset.go | 4 +- test/e2e/apimachinery/namespace.go | 6 +- test/e2e/apimachinery/resource_quota.go | 80 +++++++++---------- test/e2e/apimachinery/table_conversion.go | 4 +- test/e2e/apimachinery/watch.go | 14 ++-- test/e2e/apimachinery/webhook.go | 76 +++++++++--------- test/e2e/apps/cronjob.go | 2 +- test/e2e/apps/daemon_set.go | 28 +++---- test/e2e/apps/deployment.go | 26 +++--- test/e2e/apps/disruption.go | 10 +-- test/e2e/apps/network_partition.go | 8 +- test/e2e/apps/rc.go | 14 ++-- test/e2e/apps/replica_set.go | 10 +-- test/e2e/apps/statefulset.go | 28 +++---- test/e2e/auth/audit.go | 20 ++--- test/e2e/auth/audit_dynamic.go | 10 +-- test/e2e/auth/certificates.go | 2 +- test/e2e/auth/node_authn.go | 2 +- test/e2e/auth/node_authz.go | 8 +- test/e2e/auth/pod_security_policy.go | 12 +-- test/e2e/auth/service_accounts.go | 18 ++--- .../cluster_autoscaler_scalability.go | 2 +- .../autoscaling/cluster_size_autoscaling.go | 12 +-- .../custom_metrics_stackdriver_autoscaling.go | 6 +- test/e2e/autoscaling/dns_autoscaling.go | 2 +- test/e2e/cloud/gcp/resize_nodes.go | 2 +- test/e2e/common/configmap.go | 14 ++-- test/e2e/common/configmap_volume.go | 34 ++++---- test/e2e/common/lease.go | 8 +- test/e2e/common/pods.go | 8 +- test/e2e/common/podtemplates.go | 4 +- test/e2e/common/projected_combined.go | 4 +- test/e2e/common/projected_configmap.go | 18 ++--- test/e2e/common/projected_secret.go | 16 ++-- test/e2e/common/runtime.go | 3 +- test/e2e/common/runtimeclass.go | 4 +- test/e2e/common/secrets.go | 10 +-- test/e2e/common/secrets_volume.go | 30 +++---- test/e2e/common/sysctl.go | 2 +- test/e2e/common/util.go | 4 +- test/e2e/e2e.go | 2 +- test/e2e/framework/auth/helpers.go | 6 +- .../autoscaling/autoscaling_utils.go | 6 +- test/e2e/framework/deployment/fixtures.go | 2 +- test/e2e/framework/framework.go | 4 +- test/e2e/framework/ingress/ingress_utils.go | 14 ++-- test/e2e/framework/job/rest.go | 2 +- test/e2e/framework/network/utils.go | 2 +- test/e2e/framework/pod/create.go | 6 +- test/e2e/framework/pod/resource.go | 2 +- test/e2e/framework/pods.go | 4 +- test/e2e/framework/psp.go | 4 +- test/e2e/framework/pv/pv.go | 4 +- test/e2e/framework/security/apparmor.go | 4 +- test/e2e/framework/service/jig.go | 18 ++--- test/e2e/framework/service/resource.go | 2 +- test/e2e/framework/statefulset/rest.go | 6 +- test/e2e/framework/util.go | 2 +- test/e2e/framework/volume/fixtures.go | 6 +- .../monitoring/custom_metrics_stackdriver.go | 8 +- test/e2e/kubectl/portforward.go | 8 +- .../lifecycle/bootstrap/bootstrap_signer.go | 10 +-- .../bootstrap/bootstrap_token_cleaner.go | 4 +- test/e2e/network/dns.go | 20 ++--- test/e2e/network/dns_common.go | 14 ++-- test/e2e/network/dns_configmap.go | 5 +- test/e2e/network/dual_stack.go | 4 +- test/e2e/network/endpointslice.go | 2 +- test/e2e/network/firewall.go | 3 +- test/e2e/network/fixture.go | 6 +- test/e2e/network/ingress.go | 20 ++--- test/e2e/network/kube_proxy.go | 6 +- test/e2e/network/network_policy.go | 68 ++++++++-------- test/e2e/network/networking.go | 2 +- test/e2e/network/no_snat.go | 4 +- test/e2e/network/proxy.go | 2 +- test/e2e/network/scale/ingress.go | 8 +- .../network/scale/localrun/ingress_scale.go | 2 +- test/e2e/network/service.go | 12 +-- test/e2e/network/service_latency.go | 2 +- test/e2e/node/events.go | 2 +- test/e2e/node/kubelet.go | 2 +- test/e2e/node/pod_gc.go | 4 +- test/e2e/node/pre_stop.go | 4 +- test/e2e/node/runtimeclass.go | 6 +- test/e2e/node/security_context.go | 4 +- test/e2e/node/ttlafterfinished.go | 2 +- test/e2e/scheduling/limit_range.go | 18 ++--- test/e2e/scheduling/nvidia-gpus.go | 2 +- test/e2e/scheduling/predicates.go | 4 +- test/e2e/scheduling/preemption.go | 12 +-- test/e2e/scheduling/priorities.go | 6 +- test/e2e/scheduling/ubernetes_lite.go | 4 +- test/e2e/scheduling/ubernetes_lite_volumes.go | 2 +- test/e2e/servicecatalog/podpreset.go | 2 +- test/e2e/storage/csi_mock_volume.go | 6 +- test/e2e/storage/detach_mounted.go | 2 +- test/e2e/storage/drivers/csi_objects.go | 2 +- test/e2e/storage/drivers/in_tree.go | 6 +- test/e2e/storage/empty_dir_wrapper.go | 10 +-- test/e2e/storage/ephemeral_volume.go | 2 +- .../flexvolume_mounted_volume_resize.go | 4 +- test/e2e/storage/flexvolume_online_resize.go | 6 +- .../generic_persistent_volume-disruptive.go | 3 +- test/e2e/storage/mounted_volume_resize.go | 4 +- .../nfs_persistent_volume-disruptive.go | 2 +- test/e2e/storage/pd.go | 12 +-- test/e2e/storage/persistent_volumes-local.go | 12 +-- test/e2e/storage/persistent_volumes.go | 10 +-- test/e2e/storage/pv_protection.go | 4 +- test/e2e/storage/pvc_protection.go | 2 +- test/e2e/storage/regional_pd.go | 10 +-- test/e2e/storage/subpath.go | 4 +- test/e2e/storage/testsuites/base.go | 2 +- test/e2e/storage/testsuites/ephemeral.go | 4 +- test/e2e/storage/testsuites/provisioning.go | 18 ++--- test/e2e/storage/testsuites/snapshottable.go | 4 +- test/e2e/storage/testsuites/subpath.go | 10 +-- test/e2e/storage/testsuites/topology.go | 6 +- test/e2e/storage/testsuites/volume_expand.go | 2 +- test/e2e/storage/testsuites/volume_io.go | 2 +- test/e2e/storage/testsuites/volumelimits.go | 6 +- test/e2e/storage/testsuites/volumemode.go | 16 ++-- test/e2e/storage/utils/create.go | 22 ++--- test/e2e/storage/utils/host_exec.go | 3 +- test/e2e/storage/utils/utils.go | 6 +- test/e2e/storage/volume_metrics.go | 26 +++--- test/e2e/storage/volume_provisioning.go | 26 +++--- test/e2e/storage/volumes.go | 2 +- test/e2e/storage/vsphere/pv_reclaimpolicy.go | 8 +- .../e2e/storage/vsphere/pvc_label_selector.go | 7 +- test/e2e/storage/vsphere/vsphere_scale.go | 2 +- .../storage/vsphere/vsphere_statefulsets.go | 2 +- test/e2e/storage/vsphere/vsphere_stress.go | 8 +- .../vsphere/vsphere_volume_cluster_ds.go | 2 +- .../vsphere/vsphere_volume_datastore.go | 2 +- .../vsphere/vsphere_volume_diskformat.go | 6 +- .../vsphere/vsphere_volume_disksize.go | 2 +- .../storage/vsphere/vsphere_volume_fstype.go | 4 +- .../vsphere/vsphere_volume_master_restart.go | 2 +- .../vsphere/vsphere_volume_node_poweroff.go | 2 +- .../vsphere/vsphere_volume_ops_storm.go | 3 +- .../storage/vsphere/vsphere_volume_perf.go | 9 ++- .../vsphere/vsphere_volume_placement.go | 3 +- .../vsphere/vsphere_volume_vpxd_restart.go | 2 +- .../vsphere/vsphere_volume_vsan_policy.go | 6 +- .../storage/vsphere/vsphere_zone_support.go | 12 +-- test/e2e/upgrades/apps/daemonsets.go | 2 +- test/e2e/upgrades/apps/deployments.go | 2 +- test/e2e/upgrades/apps/replicasets.go | 2 +- test/e2e/upgrades/apps/statefulset.go | 4 +- test/e2e/upgrades/configmaps.go | 2 +- test/e2e/upgrades/secrets.go | 2 +- test/e2e/windows/dns.go | 2 +- test/e2e/windows/gmsa_full.go | 6 +- test/e2e_kubeadm/util.go | 2 +- test/e2e_node/device_plugin_test.go | 6 +- test/e2e_node/dynamic_kubelet_config_test.go | 32 ++++---- test/e2e_node/eviction_test.go | 6 +- test/e2e_node/gpu_device_plugin_test.go | 2 +- test/e2e_node/node_problem_detector_linux.go | 2 +- test/e2e_node/util.go | 4 +- .../apimachinery/watch_restart_test.go | 4 +- .../admissionwebhook/admission_test.go | 10 +-- .../admissionwebhook/broken_webhook_test.go | 10 +-- .../admissionwebhook/client_auth_test.go | 8 +- .../admissionwebhook/load_balance_test.go | 12 +-- .../admissionwebhook/reinvocation_test.go | 14 ++-- .../admissionwebhook/timeout_test.go | 10 +-- test/integration/apiserver/apiserver_test.go | 64 +++++++-------- .../max_json_patch_operations_test.go | 2 +- .../apiserver/max_request_body_bytes_test.go | 2 +- test/integration/apiserver/patch_test.go | 2 +- .../apiserver/podlogs/podlogs_test.go | 10 +-- test/integration/auth/accessreview_test.go | 6 +- test/integration/auth/node_test.go | 46 +++++------ test/integration/auth/rbac_test.go | 12 +-- test/integration/auth/svcaccttoken_test.go | 48 +++++------ test/integration/client/client_test.go | 24 +++--- .../integration/client/dynamic_client_test.go | 4 +- test/integration/configmap/configmap_test.go | 4 +- test/integration/cronjob/cronjob_test.go | 2 +- test/integration/daemonset/daemonset_test.go | 46 +++++------ .../defaulttolerationseconds_test.go | 2 +- .../integration/deployment/deployment_test.go | 32 ++++---- test/integration/deployment/util.go | 2 +- .../integration/disruption/disruption_test.go | 8 +- test/integration/dryrun/dryrun_test.go | 2 +- .../etcd/crd_overlap_storage_test.go | 8 +- .../integration/etcd/etcd_cross_group_test.go | 2 +- .../etcd/etcd_storage_path_test.go | 2 +- test/integration/etcd/server.go | 2 +- test/integration/evictions/evictions_test.go | 12 +-- test/integration/examples/apiserver_test.go | 2 +- test/integration/examples/webhook_test.go | 4 +- test/integration/framework/perf_utils.go | 2 +- .../cluster_scoped_owner_test.go | 8 +- .../garbage_collector_test.go | 50 ++++++------ test/integration/ipamperf/util.go | 2 +- .../integration/kubelet/watch_manager_test.go | 4 +- test/integration/master/audit_dynamic_test.go | 6 +- test/integration/master/audit_test.go | 8 +- test/integration/master/crd_test.go | 8 +- .../integration/master/kube_apiserver_test.go | 2 +- .../master/synthetic_master_test.go | 12 +-- .../master/transformation_testcase.go | 4 +- .../namespace/ns_conditions_test.go | 2 +- .../integration/objectmeta/objectmeta_test.go | 4 +- test/integration/pods/pods_test.go | 18 ++--- test/integration/quota/quota_test.go | 8 +- .../integration/replicaset/replicaset_test.go | 20 ++--- .../replicationcontroller_test.go | 18 ++--- test/integration/scale/scale_test.go | 8 +- test/integration/scheduler/extender_test.go | 4 +- test/integration/scheduler/predicates_test.go | 8 +- test/integration/scheduler/preemption_test.go | 4 +- test/integration/scheduler/priorities_test.go | 4 +- test/integration/scheduler/scheduler_test.go | 12 +-- test/integration/scheduler/taint_test.go | 10 +-- test/integration/scheduler/util.go | 16 ++-- .../scheduler_perf/scheduler_test.go | 4 +- test/integration/secrets/secrets_test.go | 6 +- .../serviceaccount/service_account_test.go | 25 +++--- test/integration/statefulset/util.go | 14 ++-- .../storageclasses/storage_classes_test.go | 4 +- .../ttlcontroller/ttlcontroller_test.go | 2 +- test/integration/volume/attach_detach_test.go | 30 +++---- .../volume/persistent_volumes_test.go | 54 ++++++------- .../volumescheduling/volume_binding_test.go | 70 ++++++++-------- test/soak/serve_hostnames/serve_hostnames.go | 6 +- test/utils/create_resources.go | 25 +++--- test/utils/density_utils.go | 4 +- test/utils/deployment.go | 2 +- test/utils/replicaset.go | 4 +- test/utils/runners.go | 8 +- 399 files changed, 1560 insertions(+), 1507 deletions(-) diff --git a/cmd/kubeadm/app/phases/addons/dns/dns.go b/cmd/kubeadm/app/phases/addons/dns/dns.go index bffeda7fb04..5cdcb9f3259 100644 --- a/cmd/kubeadm/app/phases/addons/dns/dns.go +++ b/cmd/kubeadm/app/phases/addons/dns/dns.go @@ -347,7 +347,7 @@ func createDNSService(dnsService *v1.Service, serviceBytes []byte, client client } // Can't use a generic apiclient helper func here as we have to tolerate more than AlreadyExists. - if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Create(context.TODO(), dnsService); err != nil { + if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Create(context.TODO(), dnsService, metav1.CreateOptions{}); err != nil { // Ignore if the Service is invalid with this error message: // Service "kube-dns" is invalid: spec.clusterIP: Invalid value: "10.96.0.10": provided IP is already allocated @@ -355,7 +355,7 @@ func createDNSService(dnsService *v1.Service, serviceBytes []byte, client client return errors.Wrap(err, "unable to create a new DNS service") } - if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Update(context.TODO(), dnsService); err != nil { + if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Update(context.TODO(), dnsService, metav1.UpdateOptions{}); err != nil { return errors.Wrap(err, "unable to create/update the DNS service") } } @@ -383,7 +383,7 @@ func migrateCoreDNSCorefile(client clientset.Interface, cm *v1.ConfigMap, corefi "Corefile": corefile, "Corefile-backup": corefile, }, - }); err != nil { + }, metav1.UpdateOptions{}); err != nil { return errors.Wrap(err, "unable to update the CoreDNS ConfigMap with backup Corefile") } if err := patchCoreDNSDeployment(client, "Corefile-backup"); err != nil { @@ -405,7 +405,7 @@ func migrateCoreDNSCorefile(client clientset.Interface, cm *v1.ConfigMap, corefi "Corefile": updatedCorefile, "Corefile-backup": corefile, }, - }); err != nil { + }, metav1.UpdateOptions{}); err != nil { return errors.Wrap(err, "unable to update the CoreDNS ConfigMap") } fmt.Println("[addons]: Migrating CoreDNS Corefile") @@ -452,7 +452,7 @@ func patchCoreDNSDeployment(client clientset.Interface, coreDNSCorefileName stri } patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"volumes":[{"name": "config-volume", "configMap":{"name": "coredns", "items":[{"key": "%s", "path": "%s"}]}}]}}}}`, coreDNSCorefileName, coreDNSCorefileName) - if _, err := client.AppsV1().Deployments(dnsDeployment.ObjectMeta.Namespace).Patch(context.TODO(), dnsDeployment.Name, types.StrategicMergePatchType, []byte(patch)); err != nil { + if _, err := client.AppsV1().Deployments(dnsDeployment.ObjectMeta.Namespace).Patch(context.TODO(), dnsDeployment.Name, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}); err != nil { return errors.Wrap(err, "unable to patch the CoreDNS deployment") } return nil diff --git a/cmd/kubeadm/app/phases/addons/dns/dns_test.go b/cmd/kubeadm/app/phases/addons/dns/dns_test.go index 096f38b4f46..54b0093849f 100644 --- a/cmd/kubeadm/app/phases/addons/dns/dns_test.go +++ b/cmd/kubeadm/app/phases/addons/dns/dns_test.go @@ -741,7 +741,7 @@ func createClientAndCoreDNSManifest(t *testing.T, corefile, coreDNSVersion strin Data: map[string]string{ "Corefile": corefile, }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatalf("error creating ConfigMap: %v", err) } @@ -768,7 +768,7 @@ func createClientAndCoreDNSManifest(t *testing.T, corefile, coreDNSVersion strin }, }, }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatalf("error creating deployment: %v", err) } diff --git a/cmd/kubeadm/app/phases/certs/renewal/apirenewer.go b/cmd/kubeadm/app/phases/certs/renewal/apirenewer.go index 87f12475488..e92d8ef686a 100644 --- a/cmd/kubeadm/app/phases/certs/renewal/apirenewer.go +++ b/cmd/kubeadm/app/phases/certs/renewal/apirenewer.go @@ -91,7 +91,7 @@ func (r *APIRenewer) Renew(cfg *certutil.Config) (*x509.Certificate, crypto.Sign }, } - req, err := r.client.CertificateSigningRequests().Create(context.TODO(), k8sCSR) + req, err := r.client.CertificateSigningRequests().Create(context.TODO(), k8sCSR, metav1.CreateOptions{}) if err != nil { return nil, nil, errors.Wrap(err, "couldn't create certificate signing request") } diff --git a/cmd/kubeadm/app/phases/upgrade/health.go b/cmd/kubeadm/app/phases/upgrade/health.go index f48294b4109..64e57045409 100644 --- a/cmd/kubeadm/app/phases/upgrade/health.go +++ b/cmd/kubeadm/app/phases/upgrade/health.go @@ -157,7 +157,7 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration) // Create the Job, but retry in case it is being currently deleted klog.V(2).Infof("Creating Job %q in the namespace %q", jobName, ns) err := wait.PollImmediate(time.Second*1, timeout, func() (bool, error) { - if _, err := client.BatchV1().Jobs(ns).Create(context.TODO(), job); err != nil { + if _, err := client.BatchV1().Jobs(ns).Create(context.TODO(), job, metav1.CreateOptions{}); err != nil { klog.V(2).Infof("Could not create Job %q in the namespace %q, retrying: %v", jobName, ns, err) lastError = err return false, nil diff --git a/cmd/kubeadm/app/util/apiclient/idempotency.go b/cmd/kubeadm/app/util/apiclient/idempotency.go index cbf61c8d0e8..fa15260e69a 100644 --- a/cmd/kubeadm/app/util/apiclient/idempotency.go +++ b/cmd/kubeadm/app/util/apiclient/idempotency.go @@ -44,12 +44,12 @@ type ConfigMapMutator func(*v1.ConfigMap) error // CreateOrUpdateConfigMap creates a ConfigMap if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateConfigMap(client clientset.Interface, cm *v1.ConfigMap) error { - if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm); err != nil { + if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm, metav1.CreateOptions{}); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create ConfigMap") } - if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(context.TODO(), cm); err != nil { + if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(context.TODO(), cm, metav1.UpdateOptions{}); err != nil { return errors.Wrap(err, "unable to update ConfigMap") } } @@ -68,7 +68,7 @@ func CreateOrMutateConfigMap(client clientset.Interface, cm *v1.ConfigMap, mutat Factor: 1.0, Jitter: 0.1, }, func() (bool, error) { - if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm); err != nil { + if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm, metav1.CreateOptions{}); err != nil { lastError = err if apierrors.IsAlreadyExists(err) { lastError = MutateConfigMap(client, metav1.ObjectMeta{Namespace: cm.ObjectMeta.Namespace, Name: cm.ObjectMeta.Name}, mutator) @@ -102,7 +102,7 @@ func MutateConfigMap(client clientset.Interface, meta metav1.ObjectMeta, mutator if err = mutator(configMap); err != nil { return errors.Wrap(err, "unable to mutate ConfigMap") } - _, err = client.CoreV1().ConfigMaps(configMap.ObjectMeta.Namespace).Update(context.TODO(), configMap) + _, err = client.CoreV1().ConfigMaps(configMap.ObjectMeta.Namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{}) return err }) } @@ -113,7 +113,7 @@ func CreateOrRetainConfigMap(client clientset.Interface, cm *v1.ConfigMap, confi if !apierrors.IsNotFound(err) { return nil } - if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm); err != nil { + if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm, metav1.CreateOptions{}); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create ConfigMap") } @@ -124,12 +124,12 @@ func CreateOrRetainConfigMap(client clientset.Interface, cm *v1.ConfigMap, confi // CreateOrUpdateSecret creates a Secret if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateSecret(client clientset.Interface, secret *v1.Secret) error { - if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Create(context.TODO(), secret); err != nil { + if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create secret") } - if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Update(context.TODO(), secret); err != nil { + if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil { return errors.Wrap(err, "unable to update secret") } } @@ -138,7 +138,7 @@ func CreateOrUpdateSecret(client clientset.Interface, secret *v1.Secret) error { // CreateOrUpdateServiceAccount creates a ServiceAccount if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateServiceAccount(client clientset.Interface, sa *v1.ServiceAccount) error { - if _, err := client.CoreV1().ServiceAccounts(sa.ObjectMeta.Namespace).Create(context.TODO(), sa); err != nil { + if _, err := client.CoreV1().ServiceAccounts(sa.ObjectMeta.Namespace).Create(context.TODO(), sa, metav1.CreateOptions{}); err != nil { // Note: We don't run .Update here afterwards as that's probably not required // Only thing that could be updated is annotations/labels in .metadata, but we don't use that currently if !apierrors.IsAlreadyExists(err) { @@ -150,12 +150,12 @@ func CreateOrUpdateServiceAccount(client clientset.Interface, sa *v1.ServiceAcco // CreateOrUpdateDeployment creates a Deployment if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateDeployment(client clientset.Interface, deploy *apps.Deployment) error { - if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(context.TODO(), deploy); err != nil { + if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(context.TODO(), deploy, metav1.CreateOptions{}); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create deployment") } - if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Update(context.TODO(), deploy); err != nil { + if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Update(context.TODO(), deploy, metav1.UpdateOptions{}); err != nil { return errors.Wrap(err, "unable to update deployment") } } @@ -168,7 +168,7 @@ func CreateOrRetainDeployment(client clientset.Interface, deploy *apps.Deploymen if !apierrors.IsNotFound(err) { return nil } - if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(context.TODO(), deploy); err != nil { + if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(context.TODO(), deploy, metav1.CreateOptions{}); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create deployment") } @@ -179,12 +179,12 @@ func CreateOrRetainDeployment(client clientset.Interface, deploy *apps.Deploymen // CreateOrUpdateDaemonSet creates a DaemonSet if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateDaemonSet(client clientset.Interface, ds *apps.DaemonSet) error { - if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Create(context.TODO(), ds); err != nil { + if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Create(context.TODO(), ds, metav1.CreateOptions{}); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create daemonset") } - if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Update(context.TODO(), ds); err != nil { + if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Update(context.TODO(), ds, metav1.UpdateOptions{}); err != nil { return errors.Wrap(err, "unable to update daemonset") } } @@ -211,12 +211,12 @@ func DeleteDeploymentForeground(client clientset.Interface, namespace, name stri // CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error { - if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(context.TODO(), role); err != nil { + if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(context.TODO(), role, metav1.CreateOptions{}); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create RBAC role") } - if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Update(context.TODO(), role); err != nil { + if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Update(context.TODO(), role, metav1.UpdateOptions{}); err != nil { return errors.Wrap(err, "unable to update RBAC role") } } @@ -225,12 +225,12 @@ func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error { // CreateOrUpdateRoleBinding creates a RoleBinding if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateRoleBinding(client clientset.Interface, roleBinding *rbac.RoleBinding) error { - if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Create(context.TODO(), roleBinding); err != nil { + if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Create(context.TODO(), roleBinding, metav1.CreateOptions{}); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create RBAC rolebinding") } - if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Update(context.TODO(), roleBinding); err != nil { + if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Update(context.TODO(), roleBinding, metav1.UpdateOptions{}); err != nil { return errors.Wrap(err, "unable to update RBAC rolebinding") } } @@ -239,12 +239,12 @@ func CreateOrUpdateRoleBinding(client clientset.Interface, roleBinding *rbac.Rol // CreateOrUpdateClusterRole creates a ClusterRole if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateClusterRole(client clientset.Interface, clusterRole *rbac.ClusterRole) error { - if _, err := client.RbacV1().ClusterRoles().Create(context.TODO(), clusterRole); err != nil { + if _, err := client.RbacV1().ClusterRoles().Create(context.TODO(), clusterRole, metav1.CreateOptions{}); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create RBAC clusterrole") } - if _, err := client.RbacV1().ClusterRoles().Update(context.TODO(), clusterRole); err != nil { + if _, err := client.RbacV1().ClusterRoles().Update(context.TODO(), clusterRole, metav1.UpdateOptions{}); err != nil { return errors.Wrap(err, "unable to update RBAC clusterrole") } } @@ -253,12 +253,12 @@ func CreateOrUpdateClusterRole(client clientset.Interface, clusterRole *rbac.Clu // CreateOrUpdateClusterRoleBinding creates a ClusterRoleBinding if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateClusterRoleBinding(client clientset.Interface, clusterRoleBinding *rbac.ClusterRoleBinding) error { - if _, err := client.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterRoleBinding); err != nil { + if _, err := client.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterRoleBinding, metav1.CreateOptions{}); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create RBAC clusterrolebinding") } - if _, err := client.RbacV1().ClusterRoleBindings().Update(context.TODO(), clusterRoleBinding); err != nil { + if _, err := client.RbacV1().ClusterRoleBindings().Update(context.TODO(), clusterRoleBinding, metav1.UpdateOptions{}); err != nil { return errors.Wrap(err, "unable to update RBAC clusterrolebinding") } } @@ -302,7 +302,7 @@ func PatchNodeOnce(client clientset.Interface, nodeName string, patchFn func(*v1 return false, errors.Wrap(err, "failed to create two way merge patch") } - if _, err := client.CoreV1().Nodes().Patch(context.TODO(), n.Name, types.StrategicMergePatchType, patchBytes); err != nil { + if _, err := client.CoreV1().Nodes().Patch(context.TODO(), n.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { // TODO also check for timeouts if apierrors.IsConflict(err) { fmt.Println("Temporarily unable to update node metadata due to conflict (will retry)") diff --git a/cmd/kubeadm/app/util/apiclient/idempotency_test.go b/cmd/kubeadm/app/util/apiclient/idempotency_test.go index d91555bba28..13879cf0edd 100644 --- a/cmd/kubeadm/app/util/apiclient/idempotency_test.go +++ b/cmd/kubeadm/app/util/apiclient/idempotency_test.go @@ -69,7 +69,7 @@ func TestPatchNodeNonErrorCases(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { client := fake.NewSimpleClientset() - _, err := client.CoreV1().Nodes().Create(context.TODO(), &tc.node) + _, err := client.CoreV1().Nodes().Create(context.TODO(), &tc.node, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create node to fake client: %v", err) } @@ -122,7 +122,7 @@ func createClientAndConfigMap(t *testing.T) *fake.Clientset { Data: map[string]string{ "key": "some-value", }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatalf("error creating ConfigMap: %v", err) } diff --git a/cmd/kubeadm/app/util/config/cluster_test.go b/cmd/kubeadm/app/util/config/cluster_test.go index 0c4b6aa1556..f42a85c81b3 100644 --- a/cmd/kubeadm/app/util/config/cluster_test.go +++ b/cmd/kubeadm/app/util/config/cluster_test.go @@ -343,7 +343,7 @@ func TestGetNodeRegistration(t *testing.T) { client := clientsetfake.NewSimpleClientset() if rt.node != nil { - _, err := client.CoreV1().Nodes().Create(context.TODO(), rt.node) + _, err := client.CoreV1().Nodes().Create(context.TODO(), rt.node, metav1.CreateOptions{}) if err != nil { t.Errorf("couldn't create Node") return @@ -619,7 +619,7 @@ func TestGetInitConfigurationFromCluster(t *testing.T) { client := clientsetfake.NewSimpleClientset() if rt.node != nil { - _, err := client.CoreV1().Nodes().Create(context.TODO(), rt.node) + _, err := client.CoreV1().Nodes().Create(context.TODO(), rt.node, metav1.CreateOptions{}) if err != nil { t.Errorf("couldn't create Node") return diff --git a/pkg/client/tests/fake_client_test.go b/pkg/client/tests/fake_client_test.go index 0ae5d87bc14..376a04feedd 100644 --- a/pkg/client/tests/fake_client_test.go +++ b/pkg/client/tests/fake_client_test.go @@ -103,7 +103,7 @@ func TestFakeClientsetInheritsNamespace(t *testing.T) { testPod("nsA", "pod-1"), ) - _, err := tc.CoreV1().Namespaces().Create(context.TODO(), testNamespace("nsB")) + _, err := tc.CoreV1().Namespaces().Create(context.TODO(), testNamespace("nsB"), metav1.CreateOptions{}) if err != nil { t.Fatalf("Namespaces.Create: %s", err) } @@ -116,7 +116,7 @@ func TestFakeClientsetInheritsNamespace(t *testing.T) { t.Fatalf("Expected %d namespaces to match, got %d", expected, actual) } - _, err = tc.CoreV1().Pods("nsB").Create(context.TODO(), testPod("", "pod-1")) + _, err = tc.CoreV1().Pods("nsB").Create(context.TODO(), testPod("", "pod-1"), metav1.CreateOptions{}) if err != nil { t.Fatalf("Pods.Create nsB/pod-1: %s", err) } @@ -132,17 +132,17 @@ func TestFakeClientsetInheritsNamespace(t *testing.T) { t.Fatalf("Expected to find pod nsB/pod-1t, got %s/%s", podB1.Namespace, podB1.Name) } - _, err = tc.CoreV1().Pods("nsA").Create(context.TODO(), testPod("", "pod-1")) + _, err = tc.CoreV1().Pods("nsA").Create(context.TODO(), testPod("", "pod-1"), metav1.CreateOptions{}) if err == nil { t.Fatalf("Expected Pods.Create to fail with already exists error") } - _, err = tc.CoreV1().Pods("nsA").Update(context.TODO(), testPod("", "pod-1")) + _, err = tc.CoreV1().Pods("nsA").Update(context.TODO(), testPod("", "pod-1"), metav1.UpdateOptions{}) if err != nil { t.Fatalf("Pods.Update nsA/pod-1: %s", err) } - _, err = tc.CoreV1().Pods("nsA").Create(context.TODO(), testPod("nsB", "pod-2")) + _, err = tc.CoreV1().Pods("nsA").Create(context.TODO(), testPod("nsB", "pod-2"), metav1.CreateOptions{}) if err == nil { t.Fatalf("Expected Pods.Create to fail with bad request from namespace mismtach") } @@ -150,7 +150,7 @@ func TestFakeClientsetInheritsNamespace(t *testing.T) { t.Fatalf("Expected Pods.Create error to provide object and request namespaces, got %q", err) } - _, err = tc.CoreV1().Pods("nsA").Update(context.TODO(), testPod("", "pod-3")) + _, err = tc.CoreV1().Pods("nsA").Update(context.TODO(), testPod("", "pod-3"), metav1.UpdateOptions{}) if err == nil { t.Fatalf("Expected Pods.Update nsA/pod-3 to fail with not found error") } diff --git a/pkg/controller/bootstrap/bootstrapsigner.go b/pkg/controller/bootstrap/bootstrapsigner.go index 27c0d4692d2..59e9ae7ea9a 100644 --- a/pkg/controller/bootstrap/bootstrapsigner.go +++ b/pkg/controller/bootstrap/bootstrapsigner.go @@ -27,6 +27,7 @@ import ( v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -243,7 +244,7 @@ func (e *Signer) signConfigMap() { } func (e *Signer) updateConfigMap(cm *v1.ConfigMap) { - _, err := e.client.CoreV1().ConfigMaps(cm.Namespace).Update(context.TODO(), cm) + _, err := e.client.CoreV1().ConfigMaps(cm.Namespace).Update(context.TODO(), cm, metav1.UpdateOptions{}) if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) { klog.V(3).Infof("Error updating ConfigMap: %v", err) } diff --git a/pkg/controller/certificates/approver/BUILD b/pkg/controller/certificates/approver/BUILD index a05220dad88..1516c2c7498 100644 --- a/pkg/controller/certificates/approver/BUILD +++ b/pkg/controller/certificates/approver/BUILD @@ -30,6 +30,7 @@ go_library( "//pkg/controller/certificates:go_default_library", "//staging/src/k8s.io/api/authorization/v1:go_default_library", "//staging/src/k8s.io/api/certificates/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/client-go/informers/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", ], diff --git a/pkg/controller/certificates/approver/sarapprove.go b/pkg/controller/certificates/approver/sarapprove.go index e159920ab38..413e094bc22 100644 --- a/pkg/controller/certificates/approver/sarapprove.go +++ b/pkg/controller/certificates/approver/sarapprove.go @@ -26,6 +26,7 @@ import ( authorization "k8s.io/api/authorization/v1" capi "k8s.io/api/certificates/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1" clientset "k8s.io/client-go/kubernetes" capihelper "k8s.io/kubernetes/pkg/apis/certificates/v1beta1" @@ -130,7 +131,7 @@ func (a *sarApprover) authorize(csr *capi.CertificateSigningRequest, rattrs auth ResourceAttributes: &rattrs, }, } - sar, err := a.client.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar) + sar, err := a.client.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{}) if err != nil { return false, err } diff --git a/pkg/controller/certificates/rootcacertpublisher/publisher.go b/pkg/controller/certificates/rootcacertpublisher/publisher.go index 24c6205fa3c..f6fe90c4691 100644 --- a/pkg/controller/certificates/rootcacertpublisher/publisher.go +++ b/pkg/controller/certificates/rootcacertpublisher/publisher.go @@ -186,7 +186,7 @@ func (c *Publisher) syncNamespace(ns string) error { Data: map[string]string{ "ca.crt": string(c.rootCA), }, - }) + }, metav1.CreateOptions{}) return err case err != nil: return err @@ -202,7 +202,7 @@ func (c *Publisher) syncNamespace(ns string) error { cm.Data = data - _, err = c.client.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm) + _, err = c.client.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm, metav1.UpdateOptions{}) return err } diff --git a/pkg/controller/certificates/signer/BUILD b/pkg/controller/certificates/signer/BUILD index f37a0922277..d86feab5ebd 100644 --- a/pkg/controller/certificates/signer/BUILD +++ b/pkg/controller/certificates/signer/BUILD @@ -36,6 +36,7 @@ go_library( "//pkg/controller/certificates:go_default_library", "//pkg/controller/certificates/authority:go_default_library", "//staging/src/k8s.io/api/certificates/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates:go_default_library", "//staging/src/k8s.io/client-go/informers/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", diff --git a/pkg/controller/certificates/signer/signer.go b/pkg/controller/certificates/signer/signer.go index 9b7861eccac..27ccf0e553a 100644 --- a/pkg/controller/certificates/signer/signer.go +++ b/pkg/controller/certificates/signer/signer.go @@ -24,6 +24,7 @@ import ( "time" capi "k8s.io/api/certificates/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/server/dynamiccertificates" certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1" clientset "k8s.io/client-go/kubernetes" @@ -95,7 +96,7 @@ func (s *signer) handle(csr *capi.CertificateSigningRequest) error { if err != nil { return fmt.Errorf("error auto signing csr: %v", err) } - _, err = s.client.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(context.TODO(), csr) + _, err = s.client.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(context.TODO(), csr, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("error updating signature for csr: %v", err) } diff --git a/pkg/controller/client_builder.go b/pkg/controller/client_builder.go index 7e5bc78b5b9..b9d8421c5e0 100644 --- a/pkg/controller/client_builder.go +++ b/pkg/controller/client_builder.go @@ -186,7 +186,7 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount, // Try token review first tokenReview := &v1authenticationapi.TokenReview{Spec: v1authenticationapi.TokenReviewSpec{Token: token}} - if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(context.TODO(), tokenReview); err == nil { + if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(context.TODO(), tokenReview, metav1.CreateOptions{}); err == nil { if !tokenResult.Status.Authenticated { klog.Warningf("Token for %s/%s did not authenticate correctly", sa.Namespace, sa.Name) return nil, false, nil diff --git a/pkg/controller/client_builder_dynamic.go b/pkg/controller/client_builder_dynamic.go index b89bfde9b6f..4ece4e0af63 100644 --- a/pkg/controller/client_builder_dynamic.go +++ b/pkg/controller/client_builder_dynamic.go @@ -26,6 +26,7 @@ import ( "golang.org/x/oauth2" v1authenticationapi "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/wait" apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" @@ -179,7 +180,7 @@ func (ts *tokenSourceImpl) Token() (*oauth2.Token, error) { Spec: v1authenticationapi.TokenRequestSpec{ ExpirationSeconds: utilpointer.Int64Ptr(ts.expirationSeconds), }, - }) + }, metav1.CreateOptions{}) if inErr != nil { klog.Warningf("get token failed: %v", inErr) return false, nil diff --git a/pkg/controller/cloud/node_controller.go b/pkg/controller/cloud/node_controller.go index 085875cbc01..e69a5318840 100644 --- a/pkg/controller/cloud/node_controller.go +++ b/pkg/controller/cloud/node_controller.go @@ -385,7 +385,7 @@ func (cnc *CloudNodeController) initializeNode(ctx context.Context, node *v1.Nod modify(curNode) } - _, err = cnc.kubeClient.CoreV1().Nodes().Update(context.TODO(), curNode) + _, err = cnc.kubeClient.CoreV1().Nodes().Update(context.TODO(), curNode, metav1.UpdateOptions{}) if err != nil { return err } diff --git a/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go b/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go index 8765755e1bc..cbf76d18add 100644 --- a/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go +++ b/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go @@ -127,7 +127,7 @@ func (c *ClusterRoleAggregationController) syncClusterRole(key string) error { for _, rule := range newPolicyRules { clusterRole.Rules = append(clusterRole.Rules, *rule.DeepCopy()) } - _, err = c.clusterRoleClient.ClusterRoles().Update(context.TODO(), clusterRole) + _, err = c.clusterRoleClient.ClusterRoles().Update(context.TODO(), clusterRole, metav1.UpdateOptions{}) return err } diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index e05e5b68428..772c2a218f7 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -420,7 +420,7 @@ type RealRSControl struct { var _ RSControlInterface = &RealRSControl{} func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error { - _, err := r.KubeClient.AppsV1().ReplicaSets(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data) + _, err := r.KubeClient.AppsV1().ReplicaSets(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data, metav1.PatchOptions{}) return err } @@ -440,7 +440,7 @@ type RealControllerRevisionControl struct { var _ ControllerRevisionControlInterface = &RealControllerRevisionControl{} func (r RealControllerRevisionControl) PatchControllerRevision(namespace, name string, data []byte) error { - _, err := r.KubeClient.AppsV1().ControllerRevisions(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data) + _, err := r.KubeClient.AppsV1().ControllerRevisions(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data, metav1.PatchOptions{}) return err } @@ -537,7 +537,7 @@ func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *v } func (r RealPodControl) PatchPod(namespace, name string, data []byte) error { - _, err := r.KubeClient.CoreV1().Pods(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data) + _, err := r.KubeClient.CoreV1().Pods(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data, metav1.PatchOptions{}) return err } @@ -577,7 +577,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodT if len(labels.Set(pod.Labels)) == 0 { return fmt.Errorf("unable to create pods, no labels") } - newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod) + newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { // only send an event if the namespace isn't terminating if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) { @@ -1119,7 +1119,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) } - _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes) + _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) return err } @@ -1178,7 +1178,7 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la if err != nil { return fmt.Errorf("failed to create a two-way merge patch: %v", err) } - if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes); err != nil { + if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { return fmt.Errorf("failed to patch the node: %v", err) } return nil @@ -1197,13 +1197,13 @@ func getOrCreateServiceAccount(coreClient v1core.CoreV1Interface, namespace, nam // Create the namespace if we can't verify it exists. // Tolerate errors, since we don't know whether this component has namespace creation permissions. if _, err := coreClient.Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) { - if _, err = coreClient.Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}); err != nil && !apierrors.IsAlreadyExists(err) { + if _, err = coreClient.Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { klog.Warningf("create non-exist namespace %s failed:%v", namespace, err) } } // Create the service account - sa, err = coreClient.ServiceAccounts(namespace).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}}) + sa, err = coreClient.ServiceAccounts(namespace).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}}, metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { // If we're racing to init and someone else already created it, re-fetch return coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) diff --git a/pkg/controller/cronjob/injection.go b/pkg/controller/cronjob/injection.go index 27e22b49daa..eea61f94a62 100644 --- a/pkg/controller/cronjob/injection.go +++ b/pkg/controller/cronjob/injection.go @@ -45,7 +45,7 @@ type realSJControl struct { var _ sjControlInterface = &realSJControl{} func (c *realSJControl) UpdateStatus(sj *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) { - return c.KubeClient.BatchV1beta1().CronJobs(sj.Namespace).UpdateStatus(context.TODO(), sj) + return c.KubeClient.BatchV1beta1().CronJobs(sj.Namespace).UpdateStatus(context.TODO(), sj, metav1.UpdateOptions{}) } // fakeSJControl is the default implementation of sjControlInterface. @@ -107,15 +107,15 @@ func (r realJobControl) GetJob(namespace, name string) (*batchv1.Job, error) { } func (r realJobControl) UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) { - return r.KubeClient.BatchV1().Jobs(namespace).Update(context.TODO(), job) + return r.KubeClient.BatchV1().Jobs(namespace).Update(context.TODO(), job, metav1.UpdateOptions{}) } func (r realJobControl) PatchJob(namespace string, name string, pt types.PatchType, data []byte, subresources ...string) (*batchv1.Job, error) { - return r.KubeClient.BatchV1().Jobs(namespace).Patch(context.TODO(), name, pt, data, subresources...) + return r.KubeClient.BatchV1().Jobs(namespace).Patch(context.TODO(), name, pt, data, metav1.PatchOptions{}, subresources...) } func (r realJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) { - return r.KubeClient.BatchV1().Jobs(namespace).Create(context.TODO(), job) + return r.KubeClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, metav1.CreateOptions{}) } func (r realJobControl) DeleteJob(namespace string, name string) error { diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index 320a30fc7b1..3036f842cd0 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -1032,7 +1032,7 @@ func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps. toUpdate.Status.NumberAvailable = int32(numberAvailable) toUpdate.Status.NumberUnavailable = int32(numberUnavailable) - if _, updateErr = dsClient.UpdateStatus(context.TODO(), toUpdate); updateErr == nil { + if _, updateErr = dsClient.UpdateStatus(context.TODO(), toUpdate, metav1.UpdateOptions{}); updateErr == nil { return nil } diff --git a/pkg/controller/daemon/update.go b/pkg/controller/daemon/update.go index 2a7694771c9..7abe32b744f 100644 --- a/pkg/controller/daemon/update.go +++ b/pkg/controller/daemon/update.go @@ -95,7 +95,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps if _, ok := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; !ok { toUpdate := history.DeepCopy() toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name - history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate) + history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate, metav1.UpdateOptions{}) if err != nil { return nil, nil, err } @@ -130,7 +130,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps if cur.Revision < currRevision { toUpdate := cur.DeepCopy() toUpdate.Revision = currRevision - _, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate) + _, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate, metav1.UpdateOptions{}) if err != nil { return nil, nil, err } @@ -220,7 +220,7 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistor toUpdate.Labels = make(map[string]string) } toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey] - _, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(context.TODO(), toUpdate) + _, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(context.TODO(), toUpdate, metav1.UpdateOptions{}) if err != nil { return nil, err } @@ -323,7 +323,7 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (* Revision: revision, } - history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(context.TODO(), history) + history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(context.TODO(), history, metav1.CreateOptions{}) if outerErr := err; errors.IsAlreadyExists(outerErr) { // TODO: Is it okay to get from historyLister? existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) @@ -353,7 +353,7 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (* currDS.Status.CollisionCount = new(int32) } *currDS.Status.CollisionCount++ - _, updateErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).UpdateStatus(context.TODO(), currDS) + _, updateErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).UpdateStatus(context.TODO(), currDS, metav1.UpdateOptions{}) if updateErr != nil { return nil, updateErr } diff --git a/pkg/controller/deployment/deployment_controller.go b/pkg/controller/deployment/deployment_controller.go index 5cc946f0441..c7dea658102 100644 --- a/pkg/controller/deployment/deployment_controller.go +++ b/pkg/controller/deployment/deployment_controller.go @@ -589,7 +589,7 @@ func (dc *DeploymentController) syncDeployment(key string) error { dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.") if d.Status.ObservedGeneration < d.Generation { d.Status.ObservedGeneration = d.Generation - dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d) + dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{}) } return nil } diff --git a/pkg/controller/deployment/progress.go b/pkg/controller/deployment/progress.go index 85d3628e3ee..823f96d3b33 100644 --- a/pkg/controller/deployment/progress.go +++ b/pkg/controller/deployment/progress.go @@ -22,6 +22,7 @@ import ( "reflect" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog" apps "k8s.io/api/apps/v1" @@ -113,7 +114,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, new newDeployment := d newDeployment.Status = newStatus - _, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment) + _, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment, metav1.UpdateOptions{}) return err } diff --git a/pkg/controller/deployment/rollback.go b/pkg/controller/deployment/rollback.go index bc8ac36ad6e..75dfcf20d67 100644 --- a/pkg/controller/deployment/rollback.go +++ b/pkg/controller/deployment/rollback.go @@ -21,6 +21,7 @@ import ( "fmt" "strconv" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog" apps "k8s.io/api/apps/v1" @@ -114,7 +115,7 @@ func (dc *DeploymentController) emitRollbackNormalEvent(d *apps.Deployment, mess func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error { klog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name) setRollbackTo(d, nil) - _, err := dc.client.AppsV1().Deployments(d.Namespace).Update(context.TODO(), d) + _, err := dc.client.AppsV1().Deployments(d.Namespace).Update(context.TODO(), d, metav1.UpdateOptions{}) return err } diff --git a/pkg/controller/deployment/sync.go b/pkg/controller/deployment/sync.go index 1e191528820..89889e41a63 100644 --- a/pkg/controller/deployment/sync.go +++ b/pkg/controller/deployment/sync.go @@ -98,7 +98,7 @@ func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error } var err error - d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d) + d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{}) return err } @@ -155,7 +155,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds if annotationsUpdated || minReadySecondsNeedsUpdate { rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds - return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(context.TODO(), rsCopy) + return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(context.TODO(), rsCopy, metav1.UpdateOptions{}) } // Should use the revision in existingNewRS's annotation, since it set by before @@ -173,7 +173,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old if needsUpdate { var err error - if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d); err != nil { + if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{}); err != nil { return nil, err } } @@ -220,7 +220,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old // hash collisions. If there is any other error, we need to report it in the status of // the Deployment. alreadyExists := false - createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(context.TODO(), &newRS) + createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(context.TODO(), &newRS, metav1.CreateOptions{}) switch { // We may end up hitting this due to a slow cache or a fast resync of the Deployment. case errors.IsAlreadyExists(err): @@ -252,7 +252,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old *d.Status.CollisionCount++ // Update the collisionCount for the Deployment and let it requeue by returning the original // error. - _, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d) + _, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{}) if dErr == nil { klog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount) } @@ -268,7 +268,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old // We don't really care about this error at this point, since we have a bigger issue to report. // TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account // these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568 - _, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d) + _, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{}) } dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg) return nil, err @@ -285,7 +285,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old needsUpdate = true } if needsUpdate { - _, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d) + _, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{}) } return createdRS, err } @@ -420,7 +420,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale in rsCopy := rs.DeepCopy() *(rsCopy.Spec.Replicas) = newScale deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment)) - rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(context.TODO(), rsCopy) + rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(context.TODO(), rsCopy, metav1.UpdateOptions{}) if err == nil && sizeNeedsUpdate { scaled = true dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale) @@ -478,7 +478,7 @@ func (dc *DeploymentController) syncDeploymentStatus(allRSs []*apps.ReplicaSet, newDeployment := d newDeployment.Status = newStatus - _, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment) + _, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment, metav1.UpdateOptions{}) return err } diff --git a/pkg/controller/disruption/disruption.go b/pkg/controller/disruption/disruption.go index f1526b38c47..ee75edaed90 100644 --- a/pkg/controller/disruption/disruption.go +++ b/pkg/controller/disruption/disruption.go @@ -792,6 +792,6 @@ func (dc *DisruptionController) updatePdbStatus(pdb *policy.PodDisruptionBudget, func (dc *DisruptionController) writePdbStatus(pdb *policy.PodDisruptionBudget) error { // If this update fails, don't retry it. Allow the failure to get handled & // retried in `processNextWorkItem()`. - _, err := dc.kubeClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).UpdateStatus(context.TODO(), pdb) + _, err := dc.kubeClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).UpdateStatus(context.TODO(), pdb, metav1.UpdateOptions{}) return err } diff --git a/pkg/controller/disruption/disruption_test.go b/pkg/controller/disruption/disruption_test.go index 6c0a9d65e56..8458dcaeb1a 100644 --- a/pkg/controller/disruption/disruption_test.go +++ b/pkg/controller/disruption/disruption_test.go @@ -1054,14 +1054,14 @@ func TestUpdatePDBStatusRetries(t *testing.T) { // Create a PDB and 3 pods that match it. pdb, pdbKey := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1)) - pdb, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).Create(context.TODO(), pdb) + pdb, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).Create(context.TODO(), pdb, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create PDB: %v", err) } podNames := []string{"moe", "larry", "curly"} for _, name := range podNames { pod, _ := newPod(t, name) - _, err := dc.coreClient.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) + _, err := dc.coreClient.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create pod: %v", err) } diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index e307f722255..881e8f6bda5 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -513,10 +513,10 @@ func (e *EndpointController) syncService(key string) error { klog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps) if createEndpoints { // No previous endpoints, create them - _, err = e.client.CoreV1().Endpoints(service.Namespace).Create(context.TODO(), newEndpoints) + _, err = e.client.CoreV1().Endpoints(service.Namespace).Create(context.TODO(), newEndpoints, metav1.CreateOptions{}) } else { // Pre-existing - _, err = e.client.CoreV1().Endpoints(service.Namespace).Update(context.TODO(), newEndpoints) + _, err = e.client.CoreV1().Endpoints(service.Namespace).Update(context.TODO(), newEndpoints, metav1.UpdateOptions{}) } if err != nil { if createEndpoints && errors.IsForbidden(err) { diff --git a/pkg/controller/endpointslice/endpointslice_controller_test.go b/pkg/controller/endpointslice/endpointslice_controller_test.go index da48dad40c2..9ae3c78fdf5 100644 --- a/pkg/controller/endpointslice/endpointslice_controller_test.go +++ b/pkg/controller/endpointslice/endpointslice_controller_test.go @@ -250,7 +250,7 @@ func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) { if err != nil { t.Fatalf("Expected no error adding EndpointSlice: %v", err) } - _, err = client.DiscoveryV1beta1().EndpointSlices(ns).Create(context.TODO(), endpointSlice) + _, err = client.DiscoveryV1beta1().EndpointSlices(ns).Create(context.TODO(), endpointSlice, metav1.CreateOptions{}) if err != nil { t.Fatalf("Expected no error creating EndpointSlice: %v", err) } @@ -306,7 +306,7 @@ func TestSyncServiceFull(t *testing.T) { }, } esController.serviceStore.Add(service) - _, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service) + _, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) assert.Nil(t, err, "Expected no error creating service") // run through full sync service loop @@ -369,7 +369,7 @@ func createService(t *testing.T, esController *endpointSliceController, namespac }, } esController.serviceStore.Add(service) - _, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service) + _, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) assert.Nil(t, err, "Expected no error creating service") return service } diff --git a/pkg/controller/endpointslice/reconciler.go b/pkg/controller/endpointslice/reconciler.go index f474908a251..0d2ff70e011 100644 --- a/pkg/controller/endpointslice/reconciler.go +++ b/pkg/controller/endpointslice/reconciler.go @@ -206,7 +206,7 @@ func (r *reconciler) finalize( for _, endpointSlice := range slicesToCreate { addTriggerTimeAnnotation(endpointSlice, triggerTime) - _, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Create(context.TODO(), endpointSlice) + _, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{}) if err != nil { // If the namespace is terminating, creates will continue to fail. Simply drop the item. if errors.HasStatusCause(err, corev1.NamespaceTerminatingCause) { @@ -221,7 +221,7 @@ func (r *reconciler) finalize( for _, endpointSlice := range slicesToUpdate { addTriggerTimeAnnotation(endpointSlice, triggerTime) - _, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Update(context.TODO(), endpointSlice) + _, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Update(context.TODO(), endpointSlice, metav1.UpdateOptions{}) if err != nil { errs = append(errs, fmt.Errorf("Error updating %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err)) } else { diff --git a/pkg/controller/endpointslice/reconciler_test.go b/pkg/controller/endpointslice/reconciler_test.go index a5a74bdf1b9..f305ca2478e 100644 --- a/pkg/controller/endpointslice/reconciler_test.go +++ b/pkg/controller/endpointslice/reconciler_test.go @@ -204,7 +204,7 @@ func TestReconcile1EndpointSlice(t *testing.T) { svc, endpointMeta := newServiceAndEndpointMeta("foo", namespace) endpointSlice1 := newEmptyEndpointSlice(1, namespace, endpointMeta, svc) - _, createErr := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice1) + _, createErr := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice1, metav1.CreateOptions{}) assert.Nil(t, createErr, "Expected no error creating endpoint slice") numActionsBefore := len(client.Actions()) @@ -828,7 +828,7 @@ func portsAndAddressTypeEqual(slice1, slice2 discovery.EndpointSlice) bool { func createEndpointSlices(t *testing.T, client *fake.Clientset, namespace string, endpointSlices []*discovery.EndpointSlice) { t.Helper() for _, endpointSlice := range endpointSlices { - _, err := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice) + _, err := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{}) if err != nil { t.Fatalf("Expected no error creating Endpoint Slice, got: %v", err) } diff --git a/pkg/controller/history/controller_history.go b/pkg/controller/history/controller_history.go index 66dfc440e78..6bac086c2f7 100644 --- a/pkg/controller/history/controller_history.go +++ b/pkg/controller/history/controller_history.go @@ -249,7 +249,7 @@ func (rh *realHistory) CreateControllerRevision(parent metav1.Object, revision * // Update the revisions name clone.Name = ControllerRevisionName(parent.GetName(), hash) ns := parent.GetNamespace() - created, err := rh.client.AppsV1().ControllerRevisions(ns).Create(context.TODO(), clone) + created, err := rh.client.AppsV1().ControllerRevisions(ns).Create(context.TODO(), clone, metav1.CreateOptions{}) if errors.IsAlreadyExists(err) { exists, err := rh.client.AppsV1().ControllerRevisions(ns).Get(context.TODO(), clone.Name, metav1.GetOptions{}) if err != nil { @@ -272,7 +272,7 @@ func (rh *realHistory) UpdateControllerRevision(revision *apps.ControllerRevisio return nil } clone.Revision = newRevision - updated, updateErr := rh.client.AppsV1().ControllerRevisions(clone.Namespace).Update(context.TODO(), clone) + updated, updateErr := rh.client.AppsV1().ControllerRevisions(clone.Namespace).Update(context.TODO(), clone, metav1.UpdateOptions{}) if updateErr == nil { return nil } @@ -328,14 +328,14 @@ func (rh *realHistory) AdoptControllerRevision(parent metav1.Object, parentKind } // Use strategic merge patch to add an owner reference indicating a controller ref return rh.client.AppsV1().ControllerRevisions(parent.GetNamespace()).Patch(context.TODO(), revision.GetName(), - types.StrategicMergePatchType, patchBytes) + types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) } func (rh *realHistory) ReleaseControllerRevision(parent metav1.Object, revision *apps.ControllerRevision) (*apps.ControllerRevision, error) { // Use strategic merge patch to add an owner reference indicating a controller ref released, err := rh.client.AppsV1().ControllerRevisions(revision.GetNamespace()).Patch(context.TODO(), revision.GetName(), types.StrategicMergePatchType, - []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, parent.GetUID(), revision.UID))) + []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, parent.GetUID(), revision.UID)), metav1.PatchOptions{}) if err != nil { if errors.IsNotFound(err) { diff --git a/pkg/controller/history/controller_history_test.go b/pkg/controller/history/controller_history_test.go index 8e5d5e4291b..8b95aac5c57 100644 --- a/pkg/controller/history/controller_history_test.go +++ b/pkg/controller/history/controller_history_test.go @@ -261,7 +261,7 @@ func TestRealHistory_CreateControllerRevision(t *testing.T) { var collisionCount int32 for _, item := range test.existing { - _, err := client.AppsV1().ControllerRevisions(item.parent.GetNamespace()).Create(context.TODO(), item.revision) + _, err := client.AppsV1().ControllerRevisions(item.parent.GetNamespace()).Create(context.TODO(), item.revision, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } diff --git a/pkg/controller/job/job_controller.go b/pkg/controller/job/job_controller.go index 1c5c445aeae..439072a0cb6 100644 --- a/pkg/controller/job/job_controller.go +++ b/pkg/controller/job/job_controller.go @@ -832,7 +832,7 @@ func (jm *JobController) updateJobStatus(job *batch.Job) error { break } newJob.Status = job.Status - if _, err = jobClient.UpdateStatus(context.TODO(), newJob); err == nil { + if _, err = jobClient.UpdateStatus(context.TODO(), newJob, metav1.UpdateOptions{}); err == nil { break } } diff --git a/pkg/controller/namespace/deletion/namespaced_resources_deleter.go b/pkg/controller/namespace/deletion/namespaced_resources_deleter.go index 7035a3f9b6f..94cfc757f4e 100644 --- a/pkg/controller/namespace/deletion/namespaced_resources_deleter.go +++ b/pkg/controller/namespace/deletion/namespaced_resources_deleter.go @@ -269,7 +269,7 @@ func (d *namespacedResourcesDeleter) updateNamespaceStatusFunc(namespace *v1.Nam newNamespace.ObjectMeta = namespace.ObjectMeta newNamespace.Status = *namespace.Status.DeepCopy() newNamespace.Status.Phase = v1.NamespaceTerminating - return d.nsClient.UpdateStatus(context.TODO(), &newNamespace) + return d.nsClient.UpdateStatus(context.TODO(), &newNamespace, metav1.UpdateOptions{}) } // finalized returns true if the namespace.Spec.Finalizers is an empty list @@ -551,7 +551,7 @@ func (d *namespacedResourcesDeleter) deleteAllContent(ns *v1.Namespace) (int64, // we need to reflect that information. Recall that additional finalizers can be set on namespaces, so this finalizer may clear itself and // NOT remove the resource instance. if hasChanged := conditionUpdater.Update(ns); hasChanged { - if _, err = d.nsClient.UpdateStatus(context.TODO(), ns); err != nil { + if _, err = d.nsClient.UpdateStatus(context.TODO(), ns, metav1.UpdateOptions{}); err != nil { utilruntime.HandleError(fmt.Errorf("couldn't update status condition for namespace %q: %v", namespace, err)) } } diff --git a/pkg/controller/nodeipam/ipam/adapter.go b/pkg/controller/nodeipam/ipam/adapter.go index 18603dc2fe0..8bb13843f02 100644 --- a/pkg/controller/nodeipam/ipam/adapter.go +++ b/pkg/controller/nodeipam/ipam/adapter.go @@ -103,7 +103,7 @@ func (a *adapter) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRang return err } - _, err = a.k8s.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, bytes) + _, err = a.k8s.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, bytes, metav1.PatchOptions{}) return err } diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller.go b/pkg/controller/nodelifecycle/node_lifecycle_controller.go index 18d916aed1b..4f135963c74 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller.go @@ -1149,7 +1149,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node _, currentReadyCondition = nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) if !apiequality.Semantic.DeepEqual(currentReadyCondition, &observedReadyCondition) { - if _, err := nc.kubeClient.CoreV1().Nodes().UpdateStatus(context.TODO(), node); err != nil { + if _, err := nc.kubeClient.CoreV1().Nodes().UpdateStatus(context.TODO(), node, metav1.UpdateOptions{}); err != nil { klog.Errorf("Error updating node %s: %v", node.Name, err) return gracePeriod, observedReadyCondition, currentReadyCondition, err } diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index 465d5521f6e..3e2874bcdd9 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -1113,7 +1113,7 @@ func (a *HorizontalController) updateStatus(hpa *autoscalingv2.HorizontalPodAuto } hpav1 := hpaRaw.(*autoscalingv1.HorizontalPodAutoscaler) - _, err = a.hpaNamespacer.HorizontalPodAutoscalers(hpav1.Namespace).UpdateStatus(context.TODO(), hpav1) + _, err = a.hpaNamespacer.HorizontalPodAutoscalers(hpav1.Namespace).UpdateStatus(context.TODO(), hpav1, metav1.UpdateOptions{}) if err != nil { a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error()) return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err) diff --git a/pkg/controller/podgc/gc_controller_test.go b/pkg/controller/podgc/gc_controller_test.go index d7fca40dbe0..b3ddd33b73c 100644 --- a/pkg/controller/podgc/gc_controller_test.go +++ b/pkg/controller/podgc/gc_controller_test.go @@ -346,7 +346,7 @@ func TestGCOrphaned(t *testing.T) { // Execute planned nodes changes for _, node := range test.addedClientNodes { - client.CoreV1().Nodes().Create(context.TODO(), node) + client.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) } for _, node := range test.deletedClientNodes { client.CoreV1().Nodes().Delete(context.TODO(), node.Name, &metav1.DeleteOptions{}) diff --git a/pkg/controller/replicaset/replica_set_test.go b/pkg/controller/replicaset/replica_set_test.go index b35600c3c81..df24f4870ac 100644 --- a/pkg/controller/replicaset/replica_set_test.go +++ b/pkg/controller/replicaset/replica_set_test.go @@ -1159,7 +1159,7 @@ func TestExpectationsOnRecreate(t *testing.T) { } oldRS := newReplicaSet(1, map[string]string{"foo": "bar"}) - oldRS, err := client.AppsV1().ReplicaSets(oldRS.Namespace).Create(context.TODO(), oldRS) + oldRS, err := client.AppsV1().ReplicaSets(oldRS.Namespace).Create(context.TODO(), oldRS, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -1240,7 +1240,7 @@ func TestExpectationsOnRecreate(t *testing.T) { newRS := oldRS.DeepCopy() newRS.UID = uuid.NewUUID() - newRS, err = client.AppsV1().ReplicaSets(newRS.Namespace).Create(context.TODO(), newRS) + newRS, err = client.AppsV1().ReplicaSets(newRS.Namespace).Create(context.TODO(), newRS, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } diff --git a/pkg/controller/replicaset/replica_set_utils.go b/pkg/controller/replicaset/replica_set_utils.go index f3d0ac11dec..8fd3ea24ee3 100644 --- a/pkg/controller/replicaset/replica_set_utils.go +++ b/pkg/controller/replicaset/replica_set_utils.go @@ -64,7 +64,7 @@ func updateReplicaSetStatus(c appsclient.ReplicaSetInterface, rs *apps.ReplicaSe fmt.Sprintf("sequence No: %v->%v", rs.Status.ObservedGeneration, newStatus.ObservedGeneration)) rs.Status = newStatus - updatedRS, updateErr = c.UpdateStatus(context.TODO(), rs) + updatedRS, updateErr = c.UpdateStatus(context.TODO(), rs, metav1.UpdateOptions{}) if updateErr == nil { return updatedRS, nil } diff --git a/pkg/controller/resourcequota/resource_quota_controller.go b/pkg/controller/resourcequota/resource_quota_controller.go index 875ff8cba8e..4b1062bad5f 100644 --- a/pkg/controller/resourcequota/resource_quota_controller.go +++ b/pkg/controller/resourcequota/resource_quota_controller.go @@ -356,7 +356,7 @@ func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQ // there was a change observed by this controller that requires we update quota if dirty { - _, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(context.TODO(), usage) + _, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(context.TODO(), usage, metav1.UpdateOptions{}) if err != nil { errors = append(errors, err) } diff --git a/pkg/controller/service/BUILD b/pkg/controller/service/BUILD index 1e5097cd826..ddceb9e498c 100644 --- a/pkg/controller/service/BUILD +++ b/pkg/controller/service/BUILD @@ -12,6 +12,7 @@ go_library( deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", diff --git a/pkg/controller/service/controller_test.go b/pkg/controller/service/controller_test.go index 2c2b5023adb..bc6222961ea 100644 --- a/pkg/controller/service/controller_test.go +++ b/pkg/controller/service/controller_test.go @@ -310,7 +310,7 @@ func TestSyncLoadBalancerIfNeeded(t *testing.T) { controller, cloud, client := newController() cloud.Exists = tc.lbExists key := fmt.Sprintf("%s/%s", tc.service.Namespace, tc.service.Name) - if _, err := client.CoreV1().Services(tc.service.Namespace).Create(context.TODO(), tc.service); err != nil { + if _, err := client.CoreV1().Services(tc.service.Namespace).Create(context.TODO(), tc.service, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to prepare service %s for testing: %v", key, err) } client.ClearActions() @@ -603,7 +603,7 @@ func TestProcessServiceCreateOrUpdate(t *testing.T) { for _, tc := range testCases { newSvc := tc.updateFn(tc.svc) - if _, err := client.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil { + if _, err := client.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to prepare service %s for testing: %v", tc.key, err) } obtErr := controller.processServiceCreateOrUpdate(newSvc, tc.key) @@ -1222,7 +1222,7 @@ func TestAddFinalizer(t *testing.T) { s := &Controller{ kubeClient: c, } - if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil { + if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to prepare service for testing: %v", err) } if err := s.addFinalizer(tc.svc); err != nil { @@ -1276,7 +1276,7 @@ func TestRemoveFinalizer(t *testing.T) { s := &Controller{ kubeClient: c, } - if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil { + if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to prepare service for testing: %v", err) } if err := s.removeFinalizer(tc.svc); err != nil { @@ -1376,7 +1376,7 @@ func TestPatchStatus(t *testing.T) { s := &Controller{ kubeClient: c, } - if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil { + if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to prepare service for testing: %v", err) } if err := s.patchStatus(tc.svc, &tc.svc.Status.LoadBalancer, tc.newStatus); err != nil { diff --git a/pkg/controller/service/patch.go b/pkg/controller/service/patch.go index a0ee158aa5a..ceb6221fdfb 100644 --- a/pkg/controller/service/patch.go +++ b/pkg/controller/service/patch.go @@ -22,6 +22,7 @@ import ( "fmt" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" v1core "k8s.io/client-go/kubernetes/typed/core/v1" @@ -38,7 +39,7 @@ func patch(c v1core.CoreV1Interface, oldSvc *v1.Service, newSvc *v1.Service) (*v return nil, err } - return c.Services(oldSvc.Namespace).Patch(context.TODO(), oldSvc.Name, types.StrategicMergePatchType, patchBytes, "status") + return c.Services(oldSvc.Namespace).Patch(context.TODO(), oldSvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") } func getPatchBytes(oldSvc *v1.Service, newSvc *v1.Service) ([]byte, error) { diff --git a/pkg/controller/service/patch_test.go b/pkg/controller/service/patch_test.go index 95b60941529..a1f8e08c3a9 100644 --- a/pkg/controller/service/patch_test.go +++ b/pkg/controller/service/patch_test.go @@ -45,7 +45,7 @@ func TestPatch(t *testing.T) { // Issue a separate update and verify patch doesn't fail after this. svcToUpdate := svcOrigin.DeepCopy() addAnnotations(svcToUpdate) - if _, err := fakeCs.CoreV1().Services(svcOrigin.Namespace).Update(context.TODO(), svcToUpdate); err != nil { + if _, err := fakeCs.CoreV1().Services(svcOrigin.Namespace).Update(context.TODO(), svcToUpdate, metav1.UpdateOptions{}); err != nil { t.Fatalf("Failed to update service: %v", err) } diff --git a/pkg/controller/serviceaccount/serviceaccounts_controller.go b/pkg/controller/serviceaccount/serviceaccounts_controller.go index f746a309682..9f4ccdbe7e8 100644 --- a/pkg/controller/serviceaccount/serviceaccounts_controller.go +++ b/pkg/controller/serviceaccount/serviceaccounts_controller.go @@ -213,7 +213,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error { // TODO eliminate this once the fake client can handle creation without NS sa.Namespace = ns.Name - if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(context.TODO(), &sa); err != nil && !apierrors.IsAlreadyExists(err) { + if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(context.TODO(), &sa, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { // we can safely ignore terminating namespace errors if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) { createFailures = append(createFailures, err) diff --git a/pkg/controller/serviceaccount/tokens_controller.go b/pkg/controller/serviceaccount/tokens_controller.go index 1cf9621472e..896ab4b1660 100644 --- a/pkg/controller/serviceaccount/tokens_controller.go +++ b/pkg/controller/serviceaccount/tokens_controller.go @@ -407,7 +407,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou } // Save the secret - createdToken, err := e.client.CoreV1().Secrets(serviceAccount.Namespace).Create(context.TODO(), secret) + createdToken, err := e.client.CoreV1().Secrets(serviceAccount.Namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) if err != nil { // if the namespace is being terminated, create will fail no matter what if apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) { @@ -449,7 +449,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou // Try to add a reference to the token liveServiceAccount.Secrets = append(liveServiceAccount.Secrets, v1.ObjectReference{Name: secret.Name}) - if _, err := serviceAccounts.Update(context.TODO(), liveServiceAccount); err != nil { + if _, err := serviceAccounts.Update(context.TODO(), liveServiceAccount, metav1.UpdateOptions{}); err != nil { return err } @@ -567,7 +567,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccou liveSecret.Annotations[v1.ServiceAccountUIDKey] = string(serviceAccount.UID) // Save the secret - _, err = secrets.Update(context.TODO(), liveSecret) + _, err = secrets.Update(context.TODO(), liveSecret, metav1.UpdateOptions{}) if apierrors.IsConflict(err) || apierrors.IsNotFound(err) { // if we got a Conflict error, the secret was updated by someone else, and we'll get an update notification later // if we got a NotFound error, the secret no longer exists, and we don't need to populate a token @@ -611,7 +611,7 @@ func (e *TokensController) removeSecretReference(saNamespace string, saName stri } } serviceAccount.Secrets = secrets - _, err = serviceAccounts.Update(context.TODO(), serviceAccount) + _, err = serviceAccounts.Update(context.TODO(), serviceAccount, metav1.UpdateOptions{}) // Ignore NotFound errors when attempting to remove a reference if apierrors.IsNotFound(err) { return nil diff --git a/pkg/controller/statefulset/stateful_pod_control.go b/pkg/controller/statefulset/stateful_pod_control.go index 5ec41d4b9b3..824671ee6a8 100644 --- a/pkg/controller/statefulset/stateful_pod_control.go +++ b/pkg/controller/statefulset/stateful_pod_control.go @@ -24,6 +24,7 @@ import ( apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" errorutils "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientset "k8s.io/client-go/kubernetes" @@ -78,7 +79,7 @@ func (spc *realStatefulPodControl) CreateStatefulPod(set *apps.StatefulSet, pod return err } // If we created the PVCs attempt to create the Pod - _, err := spc.client.CoreV1().Pods(set.Namespace).Create(context.TODO(), pod) + _, err := spc.client.CoreV1().Pods(set.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) // sink already exists errors if apierrors.IsAlreadyExists(err) { return err @@ -114,7 +115,7 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *apps.StatefulSet, pod attemptedUpdate = true // commit the update, retrying on conflicts - _, updateErr := spc.client.CoreV1().Pods(set.Namespace).Update(context.TODO(), pod) + _, updateErr := spc.client.CoreV1().Pods(set.Namespace).Update(context.TODO(), pod, metav1.UpdateOptions{}) if updateErr == nil { return nil } @@ -183,7 +184,7 @@ func (spc *realStatefulPodControl) createPersistentVolumeClaims(set *apps.Statef _, err := spc.pvcLister.PersistentVolumeClaims(claim.Namespace).Get(claim.Name) switch { case apierrors.IsNotFound(err): - _, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), &claim) + _, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), &claim, metav1.CreateOptions{}) if err != nil { errs = append(errs, fmt.Errorf("failed to create PVC %s: %s", claim.Name, err)) } diff --git a/pkg/controller/statefulset/stateful_set_status_updater.go b/pkg/controller/statefulset/stateful_set_status_updater.go index f7f99e77a53..52c79c28b63 100644 --- a/pkg/controller/statefulset/stateful_set_status_updater.go +++ b/pkg/controller/statefulset/stateful_set_status_updater.go @@ -21,6 +21,7 @@ import ( "fmt" apps "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientset "k8s.io/client-go/kubernetes" appslisters "k8s.io/client-go/listers/apps/v1" @@ -54,7 +55,7 @@ func (ssu *realStatefulSetStatusUpdater) UpdateStatefulSetStatus( // don't wait due to limited number of clients, but backoff after the default number of steps return retry.RetryOnConflict(retry.DefaultRetry, func() error { set.Status = *status - _, updateErr := ssu.client.AppsV1().StatefulSets(set.Namespace).UpdateStatus(context.TODO(), set) + _, updateErr := ssu.client.AppsV1().StatefulSets(set.Namespace).UpdateStatus(context.TODO(), set, metav1.UpdateOptions{}) if updateErr == nil { return nil } diff --git a/pkg/controller/ttl/BUILD b/pkg/controller/ttl/BUILD index 3eff1a2ab62..1f69b0ae43b 100644 --- a/pkg/controller/ttl/BUILD +++ b/pkg/controller/ttl/BUILD @@ -14,6 +14,7 @@ go_library( "//pkg/controller:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", diff --git a/pkg/controller/ttl/ttl_controller.go b/pkg/controller/ttl/ttl_controller.go index 58bc3e1c194..2f5fd269f84 100644 --- a/pkg/controller/ttl/ttl_controller.go +++ b/pkg/controller/ttl/ttl_controller.go @@ -36,6 +36,7 @@ import ( "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -264,7 +265,7 @@ func (ttlc *TTLController) patchNodeWithAnnotation(node *v1.Node, annotationKey if err != nil { return err } - _, err = ttlc.kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes) + _, err = ttlc.kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) if err != nil { klog.V(2).Infof("Failed to change ttl annotation for node %s: %v", node.Name, err) return err diff --git a/pkg/controller/util/node/controller_utils.go b/pkg/controller/util/node/controller_utils.go index 97282b79db2..ffcdd654dac 100644 --- a/pkg/controller/util/node/controller_utils.go +++ b/pkg/controller/util/node/controller_utils.go @@ -110,7 +110,7 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa var updatedPod *v1.Pod var err error - if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod); err != nil { + if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}); err != nil { return nil, err } return updatedPod, nil @@ -137,7 +137,7 @@ func MarkPodsNotReady(kubeClient clientset.Interface, pods []*v1.Pod, nodeName s break } klog.V(2).Infof("Updating ready status of pod %v to false", pod.Name) - _, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod) + _, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}) if err != nil { if apierrors.IsNotFound(err) { // NotFound error means that pod was already deleted. diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go index 22236354f35..d7c59fccf5f 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go @@ -270,7 +270,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 for _, newPod := range extraPods1 { // Add a new pod between ASW and DSW ppoulators - _, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod) + _, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err) } @@ -287,7 +287,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 for _, newPod := range extraPods2 { // Add a new pod between DSW ppoulator and reconciler run - _, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod) + _, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err) } diff --git a/pkg/controller/volume/expand/cache/volume_resize_map.go b/pkg/controller/volume/expand/cache/volume_resize_map.go index 7e2a2342884..10ab38346c7 100644 --- a/pkg/controller/volume/expand/cache/volume_resize_map.go +++ b/pkg/controller/volume/expand/cache/volume_resize_map.go @@ -203,7 +203,7 @@ func (resizeMap *volumeResizeMap) UpdatePVSize(pvcr *PVCWithResizeRequest, newSi return fmt.Errorf("Error Creating two way merge patch for PV %q with error : %v", pvClone.Name, err) } - _, updateErr := resizeMap.kubeClient.CoreV1().PersistentVolumes().Patch(context.TODO(), pvClone.Name, commontypes.StrategicMergePatchType, patchBytes) + _, updateErr := resizeMap.kubeClient.CoreV1().PersistentVolumes().Patch(context.TODO(), pvClone.Name, commontypes.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) if updateErr != nil { klog.V(4).Infof("Error updating pv %q with error : %v", pvClone.Name, updateErr) diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index d8ab9396e0d..fe2ebc5a31b 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -754,7 +754,7 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo return claim, nil } - newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(context.TODO(), claimClone) + newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(context.TODO(), claimClone, metav1.UpdateOptions{}) if err != nil { klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s failed: %v", claimToClaimKey(claim), phase, err) return newClaim, err @@ -810,7 +810,7 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentV volumeClone.Status.Phase = phase volumeClone.Status.Message = message - newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(context.TODO(), volumeClone) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(context.TODO(), volumeClone, metav1.UpdateOptions{}) if err != nil { klog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err) return newVol, err @@ -872,7 +872,7 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV func (ctrl *PersistentVolumeController) updateBindVolumeToClaim(volumeClone *v1.PersistentVolume, updateCache bool) (*v1.PersistentVolume, error) { claimKey := claimrefToClaimKey(volumeClone.Spec.ClaimRef) klog.V(2).Infof("claim %q bound to volume %q", claimKey, volumeClone.Name) - newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone, metav1.UpdateOptions{}) if err != nil { klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimKey, err) return newVol, err @@ -924,7 +924,7 @@ func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVo if dirty { klog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) - newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone) + newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{}) if err != nil { klog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err) return newClaim, err @@ -1011,7 +1011,7 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume volumeClone.Spec.ClaimRef.UID = "" } - newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone, metav1.UpdateOptions{}) if err != nil { klog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err) return err @@ -1515,7 +1515,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation( for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ { klog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name) var newVol *v1.PersistentVolume - if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(context.TODO(), volume); err == nil || apierrors.IsAlreadyExists(err) { + if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(context.TODO(), volume, metav1.CreateOptions{}); err == nil || apierrors.IsAlreadyExists(err) { // Save succeeded. if err != nil { klog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim)) @@ -1631,7 +1631,7 @@ func (ctrl *PersistentVolumeController) rescheduleProvisioning(claim *v1.Persist newClaim := claim.DeepCopy() delete(newClaim.Annotations, pvutil.AnnSelectedNode) // Try to update the PVC object - if _, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(newClaim.Namespace).Update(context.TODO(), newClaim); err != nil { + if _, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(newClaim.Namespace).Update(context.TODO(), newClaim, metav1.UpdateOptions{}); err != nil { klog.V(4).Infof("Failed to delete annotation 'pvutil.AnnSelectedNode' for PersistentVolumeClaim %q: %v", claimToClaimKey(newClaim), err) return } diff --git a/pkg/controller/volume/persistentvolume/pv_controller_base.go b/pkg/controller/volume/persistentvolume/pv_controller_base.go index 44bd8aca2af..12e18d70ad0 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller_base.go +++ b/pkg/controller/volume/persistentvolume/pv_controller_base.go @@ -322,7 +322,7 @@ func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(claim *v if !modified { return claimClone, nil } - newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone) + newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{}) if err != nil { return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err) } @@ -339,7 +339,7 @@ func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotations(volume if !modified { return volumeClone, nil } - newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone, metav1.UpdateOptions{}) if err != nil { return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err) } @@ -546,7 +546,7 @@ func (ctrl *PersistentVolumeController) setClaimProvisioner(claim *v1.Persistent claimClone := claim.DeepCopy() metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, pvutil.AnnStorageProvisioner, provisionerName) updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, pvutil.AnnStorageProvisioner) - newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone) + newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{}) if err != nil { return newClaim, err } diff --git a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go index 42e33544ddb..19b3740b003 100644 --- a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go +++ b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go @@ -189,7 +189,7 @@ func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error { } claimClone := pvc.DeepCopy() claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer) - _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone) + _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{}) if err != nil { klog.V(3).Infof("Error adding protection finalizer to PVC %s/%s: %v", pvc.Namespace, pvc.Name, err) return err @@ -201,7 +201,7 @@ func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error { func (c *Controller) removeFinalizer(pvc *v1.PersistentVolumeClaim) error { claimClone := pvc.DeepCopy() claimClone.ObjectMeta.Finalizers = slice.RemoveString(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil) - _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone) + _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{}) if err != nil { klog.V(3).Infof("Error removing protection finalizer from PVC %s/%s: %v", pvc.Namespace, pvc.Name, err) return err diff --git a/pkg/controller/volume/pvprotection/BUILD b/pkg/controller/volume/pvprotection/BUILD index 9137ea5e901..81e1ae8e68c 100644 --- a/pkg/controller/volume/pvprotection/BUILD +++ b/pkg/controller/volume/pvprotection/BUILD @@ -11,6 +11,7 @@ go_library( "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", diff --git a/pkg/controller/volume/pvprotection/pv_protection_controller.go b/pkg/controller/volume/pvprotection/pv_protection_controller.go index 6b1844f502c..62fad1ae198 100644 --- a/pkg/controller/volume/pvprotection/pv_protection_controller.go +++ b/pkg/controller/volume/pvprotection/pv_protection_controller.go @@ -23,6 +23,7 @@ import ( "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" coreinformers "k8s.io/client-go/informers/core/v1" @@ -162,7 +163,7 @@ func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error { } pvClone := pv.DeepCopy() pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer) - _, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone) + _, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone, metav1.UpdateOptions{}) if err != nil { klog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err) return err @@ -174,7 +175,7 @@ func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error { func (c *Controller) removeFinalizer(pv *v1.PersistentVolume) error { pvClone := pv.DeepCopy() pvClone.ObjectMeta.Finalizers = slice.RemoveString(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil) - _, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone) + _, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone, metav1.UpdateOptions{}) if err != nil { klog.V(3).Infof("Error removing protection finalizer from PV %s: %v", pv.Name, err) return err diff --git a/pkg/controller/volume/scheduling/scheduler_binder.go b/pkg/controller/volume/scheduling/scheduler_binder.go index 6a57f272254..564e26eef37 100644 --- a/pkg/controller/volume/scheduling/scheduler_binder.go +++ b/pkg/controller/volume/scheduling/scheduler_binder.go @@ -424,7 +424,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl // TODO: does it hurt if we make an api call and nothing needs to be updated? claimKey := claimToClaimKey(binding.pvc) klog.V(2).Infof("claim %q bound to volume %q", claimKey, binding.pv.Name) - newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), binding.pv) + newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), binding.pv, metav1.UpdateOptions{}) if err != nil { klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", binding.pv.Name, claimKey, err) return err @@ -439,7 +439,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl // PV controller is expect to signal back by removing related annotations if actual provisioning fails for i, claim = range claimsToProvision { klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim)) - newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim) + newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim, metav1.UpdateOptions{}) if err != nil { return err } diff --git a/pkg/controller/volume/scheduling/scheduler_binder_test.go b/pkg/controller/volume/scheduling/scheduler_binder_test.go index 2b672416981..000d149bb12 100644 --- a/pkg/controller/volume/scheduling/scheduler_binder_test.go +++ b/pkg/controller/volume/scheduling/scheduler_binder_test.go @@ -295,7 +295,7 @@ func (env *testEnv) initVolumes(cachedPVs []*v1.PersistentVolume, apiPVs []*v1.P func (env *testEnv) updateVolumes(t *testing.T, pvs []*v1.PersistentVolume, waitCache bool) { for _, pv := range pvs { - if _, err := env.client.CoreV1().PersistentVolumes().Update(context.TODO(), pv); err != nil { + if _, err := env.client.CoreV1().PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{}); err != nil { t.Fatalf("failed to update PV %q", pv.Name) } } @@ -321,7 +321,7 @@ func (env *testEnv) updateVolumes(t *testing.T, pvs []*v1.PersistentVolume, wait func (env *testEnv) updateClaims(t *testing.T, pvcs []*v1.PersistentVolumeClaim, waitCache bool) { for _, pvc := range pvcs { - if _, err := env.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), pvc); err != nil { + if _, err := env.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), pvc, metav1.UpdateOptions{}); err != nil { t.Fatalf("failed to update PVC %q", getPVCName(pvc)) } } @@ -1769,7 +1769,7 @@ func TestBindPodVolumes(t *testing.T) { newPVC := pvc.DeepCopy() newPVC.Spec.VolumeName = pv.Name metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes") - if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC); err != nil { + if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC, metav1.UpdateOptions{}); err != nil { t.Errorf("failed to update PVC %q: %v", newPVC.Name, err) } }, @@ -1786,14 +1786,14 @@ func TestBindPodVolumes(t *testing.T) { return } dynamicPV := makeTestPV("dynamic-pv", "node1", "1G", "1", newPVC, waitClass) - dynamicPV, err = testEnv.client.CoreV1().PersistentVolumes().Create(context.TODO(), dynamicPV) + dynamicPV, err = testEnv.client.CoreV1().PersistentVolumes().Create(context.TODO(), dynamicPV, metav1.CreateOptions{}) if err != nil { t.Errorf("failed to create PV %q: %v", dynamicPV.Name, err) return } newPVC.Spec.VolumeName = dynamicPV.Name metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes") - if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC); err != nil { + if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC, metav1.UpdateOptions{}); err != nil { t.Errorf("failed to update PVC %q: %v", newPVC.Name, err) } }, @@ -1869,7 +1869,7 @@ func TestBindPodVolumes(t *testing.T) { newPVC := pvcs[0].DeepCopy() newPVC.Spec.VolumeName = pvNode2.Name metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes") - if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC); err != nil { + if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC, metav1.UpdateOptions{}); err != nil { t.Errorf("failed to update PVC %q: %v", newPVC.Name, err) } }, @@ -1904,13 +1904,13 @@ func TestBindPodVolumes(t *testing.T) { // Before Execute if scenario.apiPV != nil { - _, err := testEnv.client.CoreV1().PersistentVolumes().Update(context.TODO(), scenario.apiPV) + _, err := testEnv.client.CoreV1().PersistentVolumes().Update(context.TODO(), scenario.apiPV, metav1.UpdateOptions{}) if err != nil { t.Fatalf("failed to update PV %q", scenario.apiPV.Name) } } if scenario.apiPVC != nil { - _, err := testEnv.client.CoreV1().PersistentVolumeClaims(scenario.apiPVC.Namespace).Update(context.TODO(), scenario.apiPVC) + _, err := testEnv.client.CoreV1().PersistentVolumeClaims(scenario.apiPVC.Namespace).Update(context.TODO(), scenario.apiPVC, metav1.UpdateOptions{}) if err != nil { t.Fatalf("failed to update PVC %q", getPVCName(scenario.apiPVC)) } diff --git a/pkg/kubectl/cmd/auth/BUILD b/pkg/kubectl/cmd/auth/BUILD index a036a22f18b..828bbbfc4dd 100644 --- a/pkg/kubectl/cmd/auth/BUILD +++ b/pkg/kubectl/cmd/auth/BUILD @@ -22,6 +22,7 @@ go_library( "//staging/src/k8s.io/api/rbac/v1alpha1:go_default_library", "//staging/src/k8s.io/api/rbac/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", diff --git a/pkg/kubectl/cmd/auth/cani.go b/pkg/kubectl/cmd/auth/cani.go index 06f48e7c7cd..5d55ddf775f 100644 --- a/pkg/kubectl/cmd/auth/cani.go +++ b/pkg/kubectl/cmd/auth/cani.go @@ -31,6 +31,7 @@ import ( authorizationv1 "k8s.io/api/authorization/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/cli-runtime/pkg/genericclioptions" @@ -223,7 +224,7 @@ func (o *CanIOptions) RunAccessList() error { Namespace: o.Namespace, }, } - response, err := o.AuthClient.SelfSubjectRulesReviews().Create(context.TODO(), sar) + response, err := o.AuthClient.SelfSubjectRulesReviews().Create(context.TODO(), sar, metav1.CreateOptions{}) if err != nil { return err } @@ -258,7 +259,7 @@ func (o *CanIOptions) RunAccessCheck() (bool, error) { } } - response, err := o.AuthClient.SelfSubjectAccessReviews().Create(context.TODO(), sar) + response, err := o.AuthClient.SelfSubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{}) if err != nil { return false, err } diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index b72e3a55e65..28bf6a054ba 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -83,7 +83,7 @@ func (kl *Kubelet) registerWithAPIServer() { // value of the annotation for controller-managed attach-detach of attachable // persistent volumes for the node. func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { - _, err := kl.kubeClient.CoreV1().Nodes().Create(context.TODO(), node) + _, err := kl.kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) if err == nil { return true } diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index dc0b1c2bf6c..491d207371d 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -2264,7 +2264,7 @@ func TestUpdateNodeAddresses(t *testing.T) { }, } - _, err := kubeClient.CoreV1().Nodes().Update(context.TODO(), oldNode) + _, err := kubeClient.CoreV1().Nodes().Update(context.TODO(), oldNode, metav1.UpdateOptions{}) assert.NoError(t, err) kubelet.setNodeStatusFuncs = []func(*v1.Node) error{ func(node *v1.Node) error { diff --git a/pkg/kubelet/kubeletconfig/configsync.go b/pkg/kubelet/kubeletconfig/configsync.go index 7e28c7e0273..b2de60c7c07 100644 --- a/pkg/kubelet/kubeletconfig/configsync.go +++ b/pkg/kubelet/kubeletconfig/configsync.go @@ -199,7 +199,7 @@ func restartForNewConfig(eventClient v1core.EventsGetter, nodeName string, sourc // because the event recorder won't flush its queue before we exit (we'd lose the event) event := makeEvent(nodeName, apiv1.EventTypeNormal, KubeletConfigChangedEventReason, message) klog.V(3).Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message) - if _, err := eventClient.Events(apiv1.NamespaceDefault).Create(context.TODO(), event); err != nil { + if _, err := eventClient.Events(apiv1.NamespaceDefault).Create(context.TODO(), event, metav1.CreateOptions{}); err != nil { utillog.Errorf("failed to send event, error: %v", err) } utillog.Infof(message) diff --git a/pkg/kubelet/nodelease/controller.go b/pkg/kubelet/nodelease/controller.go index ce1594c74d4..4641a06b7fd 100644 --- a/pkg/kubelet/nodelease/controller.go +++ b/pkg/kubelet/nodelease/controller.go @@ -153,7 +153,7 @@ func (c *controller) ensureLease() (*coordinationv1.Lease, bool, error) { // not create it this time - we will retry in the next iteration. return nil, false, nil } - lease, err := c.leaseClient.Create(context.TODO(), leaseToCreate) + lease, err := c.leaseClient.Create(context.TODO(), leaseToCreate, metav1.CreateOptions{}) if err != nil { return nil, false, err } @@ -170,7 +170,7 @@ func (c *controller) ensureLease() (*coordinationv1.Lease, bool, error) { // call this once you're sure the lease has been created func (c *controller) retryUpdateLease(base *coordinationv1.Lease) error { for i := 0; i < maxUpdateRetries; i++ { - lease, err := c.leaseClient.Update(context.TODO(), c.newLease(base)) + lease, err := c.leaseClient.Update(context.TODO(), c.newLease(base), metav1.UpdateOptions{}) if err == nil { c.latestLease = lease return nil diff --git a/pkg/kubelet/pod/mirror_client.go b/pkg/kubelet/pod/mirror_client.go index 68487cf5800..551d27df444 100644 --- a/pkg/kubelet/pod/mirror_client.go +++ b/pkg/kubelet/pod/mirror_client.go @@ -96,7 +96,7 @@ func (mc *basicMirrorClient) CreateMirrorPod(pod *v1.Pod) error { Controller: &controller, }} - apiPod, err := mc.apiserverClient.CoreV1().Pods(copyPod.Namespace).Create(context.TODO(), ©Pod) + apiPod, err := mc.apiserverClient.CoreV1().Pods(copyPod.Namespace).Create(context.TODO(), ©Pod, metav1.CreateOptions{}) if err != nil && apierrors.IsAlreadyExists(err) { // Check if the existing pod is the same as the pod we want to create. if h, ok := apiPod.Annotations[kubetypes.ConfigMirrorAnnotationKey]; ok && h == hash { diff --git a/pkg/kubelet/token/BUILD b/pkg/kubelet/token/BUILD index 3980947498d..6db84d1254a 100644 --- a/pkg/kubelet/token/BUILD +++ b/pkg/kubelet/token/BUILD @@ -22,6 +22,7 @@ go_library( deps = [ "//staging/src/k8s.io/api/authentication/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", diff --git a/pkg/kubelet/token/token_manager.go b/pkg/kubelet/token/token_manager.go index f14d5fe43cc..72c067f74ac 100644 --- a/pkg/kubelet/token/token_manager.go +++ b/pkg/kubelet/token/token_manager.go @@ -27,6 +27,7 @@ import ( authenticationv1 "k8s.io/api/authentication/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/wait" @@ -65,7 +66,7 @@ func NewManager(c clientset.Interface) *Manager { if c == nil { return nil, errors.New("cannot use TokenManager when kubelet is in standalone mode") } - tokenRequest, err := c.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), name, tr) + tokenRequest, err := c.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), name, tr, metav1.CreateOptions{}) if apierrors.IsNotFound(err) && !tokenRequestsSupported() { return nil, fmt.Errorf("the API server does not have TokenRequest endpoints enabled") } diff --git a/pkg/kubelet/volumemanager/volume_manager_test.go b/pkg/kubelet/volumemanager/volume_manager_test.go index 1388fb9d77c..6672690cb65 100644 --- a/pkg/kubelet/volumemanager/volume_manager_test.go +++ b/pkg/kubelet/volumemanager/volume_manager_test.go @@ -430,7 +430,7 @@ func delayClaimBecomesBound( volumeClaim.Status = v1.PersistentVolumeClaimStatus{ Phase: v1.ClaimBound, } - kubeClient.CoreV1().PersistentVolumeClaims(namespace).Update(context.TODO(), volumeClaim) + kubeClient.CoreV1().PersistentVolumeClaims(namespace).Update(context.TODO(), volumeClaim, metav1.UpdateOptions{}) } func runVolumeManager(manager VolumeManager) chan struct{} { diff --git a/pkg/kubemark/controller.go b/pkg/kubemark/controller.go index 38f16e23d63..8068ef09ac5 100644 --- a/pkg/kubemark/controller.go +++ b/pkg/kubemark/controller.go @@ -227,7 +227,7 @@ func (kubemarkController *KubemarkController) addNodeToNodeGroup(nodeGroup strin var err error for i := 0; i < numRetries; i++ { - _, err = kubemarkController.externalCluster.client.CoreV1().ReplicationControllers(node.Namespace).Create(context.TODO(), node) + _, err = kubemarkController.externalCluster.client.CoreV1().ReplicationControllers(node.Namespace).Create(context.TODO(), node, metav1.CreateOptions{}) if err == nil { return nil } diff --git a/pkg/master/client_util.go b/pkg/master/client_util.go index afefa6a9ec5..689e2cc3352 100644 --- a/pkg/master/client_util.go +++ b/pkg/master/client_util.go @@ -35,7 +35,7 @@ func createNamespaceIfNeeded(c corev1client.NamespacesGetter, ns string) error { Namespace: "", }, } - _, err := c.Namespaces().Create(context.TODO(), newNs) + _, err := c.Namespaces().Create(context.TODO(), newNs, metav1.CreateOptions{}) if err != nil && errors.IsAlreadyExists(err) { err = nil } diff --git a/pkg/master/controller.go b/pkg/master/controller.go index 76ef9fabb0a..3a539451ef5 100644 --- a/pkg/master/controller.go +++ b/pkg/master/controller.go @@ -286,7 +286,7 @@ func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, ser if reconcile { if svc, updated := reconcilers.GetMasterServiceUpdateIfNeeded(s, servicePorts, serviceType); updated { klog.Warningf("Resetting master service %q to %#v", serviceName, svc) - _, err := c.ServiceClient.Services(metav1.NamespaceDefault).Update(context.TODO(), svc) + _, err := c.ServiceClient.Services(metav1.NamespaceDefault).Update(context.TODO(), svc, metav1.UpdateOptions{}) return err } } @@ -308,7 +308,7 @@ func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, ser }, } - _, err := c.ServiceClient.Services(metav1.NamespaceDefault).Create(context.TODO(), svc) + _, err := c.ServiceClient.Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) if errors.IsAlreadyExists(err) { return c.CreateOrUpdateMasterServiceIfNeeded(serviceName, serviceIP, servicePorts, serviceType, reconcile) } diff --git a/pkg/master/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go b/pkg/master/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go index abeb02bb50c..c017f6b2f19 100644 --- a/pkg/master/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go +++ b/pkg/master/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go @@ -185,7 +185,7 @@ func createNamespaceIfNeeded(nsClient corev1client.NamespacesGetter, ns string) Namespace: "", }, } - _, err := nsClient.Namespaces().Create(context.TODO(), newNs) + _, err := nsClient.Namespaces().Create(context.TODO(), newNs, metav1.CreateOptions{}) if err != nil && apierrors.IsAlreadyExists(err) { err = nil } @@ -193,9 +193,9 @@ func createNamespaceIfNeeded(nsClient corev1client.NamespacesGetter, ns string) } func writeConfigMap(configMapClient corev1client.ConfigMapsGetter, required *corev1.ConfigMap) error { - _, err := configMapClient.ConfigMaps(required.Namespace).Update(context.TODO(), required) + _, err := configMapClient.ConfigMaps(required.Namespace).Update(context.TODO(), required, metav1.UpdateOptions{}) if apierrors.IsNotFound(err) { - _, err := configMapClient.ConfigMaps(required.Namespace).Create(context.TODO(), required) + _, err := configMapClient.ConfigMaps(required.Namespace).Create(context.TODO(), required, metav1.CreateOptions{}) return err } diff --git a/pkg/master/master_test.go b/pkg/master/master_test.go index e26f2376bc3..05b64fbd688 100644 --- a/pkg/master/master_test.go +++ b/pkg/master/master_test.go @@ -262,7 +262,7 @@ func TestGetNodeAddresses(t *testing.T) { nodes, _ := fakeNodeClient.List(context.TODO(), metav1.ListOptions{}) for index := range nodes.Items { nodes.Items[index].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeExternalIP, Address: "127.0.0.1"}} - fakeNodeClient.Update(context.TODO(), &nodes.Items[index]) + fakeNodeClient.Update(context.TODO(), &nodes.Items[index], metav1.UpdateOptions{}) } addrs, err = addressProvider.externalAddresses() assert.NoError(err, "addresses should not have returned an error.") @@ -278,7 +278,7 @@ func TestGetNodeAddressesWithOnlySomeExternalIP(t *testing.T) { // Pass case with 1 External type IP (index == 1) and nodes (indexes 0 & 2) have no External IP. nodes, _ := fakeNodeClient.List(context.TODO(), metav1.ListOptions{}) nodes.Items[1].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeExternalIP, Address: "127.0.0.1"}} - fakeNodeClient.Update(context.TODO(), &nodes.Items[1]) + fakeNodeClient.Update(context.TODO(), &nodes.Items[1], metav1.UpdateOptions{}) addrs, err := addressProvider.externalAddresses() assert.NoError(err, "addresses should not have returned an error.") diff --git a/pkg/master/reconcilers/endpointsadapter.go b/pkg/master/reconcilers/endpointsadapter.go index 4dd38bdf678..eb7b8358d77 100644 --- a/pkg/master/reconcilers/endpointsadapter.go +++ b/pkg/master/reconcilers/endpointsadapter.go @@ -60,7 +60,7 @@ func (adapter *EndpointsAdapter) Get(namespace, name string, getOpts metav1.GetO // be created or updated. The created Endpoints object or an error will be // returned. func (adapter *EndpointsAdapter) Create(namespace string, endpoints *corev1.Endpoints) (*corev1.Endpoints, error) { - endpoints, err := adapter.endpointClient.Endpoints(namespace).Create(context.TODO(), endpoints) + endpoints, err := adapter.endpointClient.Endpoints(namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{}) if err == nil { err = adapter.EnsureEndpointSliceFromEndpoints(namespace, endpoints) } @@ -71,7 +71,7 @@ func (adapter *EndpointsAdapter) Create(namespace string, endpoints *corev1.Endp // endpointSliceClient exists, a matching EndpointSlice will also be created or // updated. The updated Endpoints object or an error will be returned. func (adapter *EndpointsAdapter) Update(namespace string, endpoints *corev1.Endpoints) (*corev1.Endpoints, error) { - endpoints, err := adapter.endpointClient.Endpoints(namespace).Update(context.TODO(), endpoints) + endpoints, err := adapter.endpointClient.Endpoints(namespace).Update(context.TODO(), endpoints, metav1.UpdateOptions{}) if err == nil { err = adapter.EnsureEndpointSliceFromEndpoints(namespace, endpoints) } @@ -90,7 +90,7 @@ func (adapter *EndpointsAdapter) EnsureEndpointSliceFromEndpoints(namespace stri if err != nil { if errors.IsNotFound(err) { - if _, err = adapter.endpointSliceClient.EndpointSlices(namespace).Create(context.TODO(), endpointSlice); errors.IsAlreadyExists(err) { + if _, err = adapter.endpointSliceClient.EndpointSlices(namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{}); errors.IsAlreadyExists(err) { err = nil } } @@ -103,7 +103,7 @@ func (adapter *EndpointsAdapter) EnsureEndpointSliceFromEndpoints(namespace stri if err != nil { return err } - _, err = adapter.endpointSliceClient.EndpointSlices(namespace).Create(context.TODO(), endpointSlice) + _, err = adapter.endpointSliceClient.EndpointSlices(namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{}) return err } @@ -113,7 +113,7 @@ func (adapter *EndpointsAdapter) EnsureEndpointSliceFromEndpoints(namespace stri return nil } - _, err = adapter.endpointSliceClient.EndpointSlices(namespace).Update(context.TODO(), endpointSlice) + _, err = adapter.endpointSliceClient.EndpointSlices(namespace).Update(context.TODO(), endpointSlice, metav1.UpdateOptions{}) return err } diff --git a/pkg/master/reconcilers/endpointsadapter_test.go b/pkg/master/reconcilers/endpointsadapter_test.go index c888da1e7db..d8febe69a3e 100644 --- a/pkg/master/reconcilers/endpointsadapter_test.go +++ b/pkg/master/reconcilers/endpointsadapter_test.go @@ -84,7 +84,7 @@ func TestEndpointsAdapterGet(t *testing.T) { } for _, endpoints := range testCase.endpoints { - _, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints) + _, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error creating Endpoints: %v", err) } @@ -182,7 +182,7 @@ func TestEndpointsAdapterCreate(t *testing.T) { } for _, endpoints := range testCase.endpoints { - _, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints) + _, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error creating Endpoints: %v", err) } @@ -293,7 +293,7 @@ func TestEndpointsAdapterUpdate(t *testing.T) { } for _, endpoints := range testCase.endpoints { - _, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints) + _, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error creating Endpoints: %v", err) } @@ -435,7 +435,7 @@ func TestEndpointsAdapterEnsureEndpointSliceFromEndpoints(t *testing.T) { } for _, endpointSlice := range testCase.endpointSlices { - _, err := client.DiscoveryV1beta1().EndpointSlices(endpointSlice.Namespace).Create(context.TODO(), endpointSlice) + _, err := client.DiscoveryV1beta1().EndpointSlices(endpointSlice.Namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error creating EndpointSlice: %v", err) } diff --git a/pkg/master/reconcilers/lease_test.go b/pkg/master/reconcilers/lease_test.go index 2160816c56f..c1f4d493f67 100644 --- a/pkg/master/reconcilers/lease_test.go +++ b/pkg/master/reconcilers/lease_test.go @@ -421,7 +421,7 @@ func TestLeaseEndpointReconciler(t *testing.T) { clientset := fake.NewSimpleClientset() if test.endpoints != nil { for _, ep := range test.endpoints.Items { - if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(context.TODO(), &ep); err != nil { + if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(context.TODO(), &ep, metav1.CreateOptions{}); err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) continue } @@ -523,7 +523,7 @@ func TestLeaseEndpointReconciler(t *testing.T) { clientset := fake.NewSimpleClientset() if test.endpoints != nil { for _, ep := range test.endpoints.Items { - if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(context.TODO(), &ep); err != nil { + if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(context.TODO(), &ep, metav1.CreateOptions{}); err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) continue } @@ -638,7 +638,7 @@ func TestLeaseRemoveEndpoints(t *testing.T) { fakeLeases.SetKeys(test.endpointKeys) clientset := fake.NewSimpleClientset() for _, ep := range test.endpoints.Items { - if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(context.TODO(), &ep); err != nil { + if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(context.TODO(), &ep, metav1.CreateOptions{}); err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) continue } diff --git a/pkg/registry/core/pod/storage/eviction.go b/pkg/registry/core/pod/storage/eviction.go index 6f69390f6eb..d396cc7ef22 100644 --- a/pkg/registry/core/pod/storage/eviction.go +++ b/pkg/registry/core/pod/storage/eviction.go @@ -241,7 +241,7 @@ func (r *EvictionREST) checkAndDecrement(namespace string, podName string, pdb p // If the pod is not deleted within a reasonable time limit PDB controller will assume that it won't // be deleted at all and remove it from DisruptedPod map. pdb.Status.DisruptedPods[podName] = metav1.Time{Time: time.Now()} - if _, err := r.podDisruptionBudgetClient.PodDisruptionBudgets(namespace).UpdateStatus(context.TODO(), &pdb); err != nil { + if _, err := r.podDisruptionBudgetClient.PodDisruptionBudgets(namespace).UpdateStatus(context.TODO(), &pdb, metav1.UpdateOptions{}); err != nil { return err } diff --git a/pkg/registry/flowcontrol/rest/BUILD b/pkg/registry/flowcontrol/rest/BUILD index 8d6000ceafa..34735acf467 100644 --- a/pkg/registry/flowcontrol/rest/BUILD +++ b/pkg/registry/flowcontrol/rest/BUILD @@ -47,6 +47,7 @@ go_test( deps = [ "//pkg/apis/flowcontrol/v1alpha1:go_default_library", "//staging/src/k8s.io/api/flowcontrol/v1alpha1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/registry/flowcontrol/rest/storage_flowcontrol.go b/pkg/registry/flowcontrol/rest/storage_flowcontrol.go index 965327a32b8..13790cfb681 100644 --- a/pkg/registry/flowcontrol/rest/storage_flowcontrol.go +++ b/pkg/registry/flowcontrol/rest/storage_flowcontrol.go @@ -154,7 +154,7 @@ func lastMandatoryExists(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alp func ensure(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface, flowSchemas []*flowcontrolv1alpha1.FlowSchema, priorityLevels []*flowcontrolv1alpha1.PriorityLevelConfiguration) error { for _, flowSchema := range flowSchemas { - _, err := flowcontrolClientSet.FlowSchemas().Create(context.TODO(), flowSchema) + _, err := flowcontrolClientSet.FlowSchemas().Create(context.TODO(), flowSchema, metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { klog.V(3).Infof("system preset FlowSchema %s already exists, skipping creating", flowSchema.Name) continue @@ -165,7 +165,7 @@ func ensure(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface, klog.V(3).Infof("created system preset FlowSchema %s", flowSchema.Name) } for _, priorityLevelConfiguration := range priorityLevels { - _, err := flowcontrolClientSet.PriorityLevelConfigurations().Create(context.TODO(), priorityLevelConfiguration) + _, err := flowcontrolClientSet.PriorityLevelConfigurations().Create(context.TODO(), priorityLevelConfiguration, metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { klog.V(3).Infof("system preset PriorityLevelConfiguration %s already exists, skipping creating", priorityLevelConfiguration.Name) continue @@ -189,7 +189,7 @@ func upgrade(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface return fmt.Errorf("failed checking if mandatory FlowSchema %s is up-to-date due to %v, will retry later", expectedFlowSchema.Name, err) } if !identical { - if _, err := flowcontrolClientSet.FlowSchemas().Update(context.TODO(), expectedFlowSchema); err != nil { + if _, err := flowcontrolClientSet.FlowSchemas().Update(context.TODO(), expectedFlowSchema, metav1.UpdateOptions{}); err != nil { return fmt.Errorf("failed upgrading mandatory FlowSchema %s due to %v, will retry later", expectedFlowSchema.Name, err) } } @@ -198,7 +198,7 @@ func upgrade(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface if !apierrors.IsNotFound(err) { return fmt.Errorf("failed getting FlowSchema %s due to %v, will retry later", expectedFlowSchema.Name, err) } - _, err = flowcontrolClientSet.FlowSchemas().Create(context.TODO(), expectedFlowSchema) + _, err = flowcontrolClientSet.FlowSchemas().Create(context.TODO(), expectedFlowSchema, metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { klog.V(3).Infof("system preset FlowSchema %s already exists, skipping creating", expectedFlowSchema.Name) continue @@ -218,7 +218,7 @@ func upgrade(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface return fmt.Errorf("failed checking if mandatory PriorityLevelConfiguration %s is up-to-date due to %v, will retry later", expectedPriorityLevelConfiguration.Name, err) } if !identical { - if _, err := flowcontrolClientSet.PriorityLevelConfigurations().Update(context.TODO(), expectedPriorityLevelConfiguration); err != nil { + if _, err := flowcontrolClientSet.PriorityLevelConfigurations().Update(context.TODO(), expectedPriorityLevelConfiguration, metav1.UpdateOptions{}); err != nil { return fmt.Errorf("failed upgrading mandatory PriorityLevelConfiguration %s due to %v, will retry later", expectedPriorityLevelConfiguration.Name, err) } } @@ -227,7 +227,7 @@ func upgrade(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface if !apierrors.IsNotFound(err) { return fmt.Errorf("failed getting PriorityLevelConfiguration %s due to %v, will retry later", expectedPriorityLevelConfiguration.Name, err) } - _, err = flowcontrolClientSet.PriorityLevelConfigurations().Create(context.TODO(), expectedPriorityLevelConfiguration) + _, err = flowcontrolClientSet.PriorityLevelConfigurations().Create(context.TODO(), expectedPriorityLevelConfiguration, metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { klog.V(3).Infof("system preset PriorityLevelConfiguration %s already exists, skipping creating", expectedPriorityLevelConfiguration.Name) continue diff --git a/pkg/registry/flowcontrol/rest/storage_flowcontrol_test.go b/pkg/registry/flowcontrol/rest/storage_flowcontrol_test.go index 1c344bb5b9b..d14afc4c693 100644 --- a/pkg/registry/flowcontrol/rest/storage_flowcontrol_test.go +++ b/pkg/registry/flowcontrol/rest/storage_flowcontrol_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/assert" flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap" "k8s.io/client-go/kubernetes/fake" flowcontrolapisv1alpha1 "k8s.io/kubernetes/pkg/apis/flowcontrol/v1alpha1" @@ -50,7 +51,7 @@ func TestShouldEnsurePredefinedSettings(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { c := fake.NewSimpleClientset() if testCase.existingPriorityLevel != nil { - c.FlowcontrolV1alpha1().PriorityLevelConfigurations().Create(context.TODO(), testCase.existingPriorityLevel) + c.FlowcontrolV1alpha1().PriorityLevelConfigurations().Create(context.TODO(), testCase.existingPriorityLevel, metav1.CreateOptions{}) } should, err := lastMandatoryExists(c.FlowcontrolV1alpha1()) assert.NoError(t, err) diff --git a/pkg/registry/rbac/reconciliation/clusterrole_interfaces.go b/pkg/registry/rbac/reconciliation/clusterrole_interfaces.go index cb90ca0d82a..4a4527581d3 100644 --- a/pkg/registry/rbac/reconciliation/clusterrole_interfaces.go +++ b/pkg/registry/rbac/reconciliation/clusterrole_interfaces.go @@ -88,7 +88,7 @@ func (c ClusterRoleModifier) Get(namespace, name string) (RuleOwner, error) { } func (c ClusterRoleModifier) Create(in RuleOwner) (RuleOwner, error) { - ret, err := c.Client.Create(context.TODO(), in.(ClusterRoleRuleOwner).ClusterRole) + ret, err := c.Client.Create(context.TODO(), in.(ClusterRoleRuleOwner).ClusterRole, metav1.CreateOptions{}) if err != nil { return nil, err } @@ -96,7 +96,7 @@ func (c ClusterRoleModifier) Create(in RuleOwner) (RuleOwner, error) { } func (c ClusterRoleModifier) Update(in RuleOwner) (RuleOwner, error) { - ret, err := c.Client.Update(context.TODO(), in.(ClusterRoleRuleOwner).ClusterRole) + ret, err := c.Client.Update(context.TODO(), in.(ClusterRoleRuleOwner).ClusterRole, metav1.UpdateOptions{}) if err != nil { return nil, err } diff --git a/pkg/registry/rbac/reconciliation/clusterrolebinding_interfaces.go b/pkg/registry/rbac/reconciliation/clusterrolebinding_interfaces.go index 0261ec2d92d..6b136bba4f7 100644 --- a/pkg/registry/rbac/reconciliation/clusterrolebinding_interfaces.go +++ b/pkg/registry/rbac/reconciliation/clusterrolebinding_interfaces.go @@ -89,7 +89,7 @@ func (c ClusterRoleBindingClientAdapter) Get(namespace, name string) (RoleBindin } func (c ClusterRoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, error) { - ret, err := c.Client.Create(context.TODO(), in.(ClusterRoleBindingAdapter).ClusterRoleBinding) + ret, err := c.Client.Create(context.TODO(), in.(ClusterRoleBindingAdapter).ClusterRoleBinding, metav1.CreateOptions{}) if err != nil { return nil, err } @@ -97,7 +97,7 @@ func (c ClusterRoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, er } func (c ClusterRoleBindingClientAdapter) Update(in RoleBinding) (RoleBinding, error) { - ret, err := c.Client.Update(context.TODO(), in.(ClusterRoleBindingAdapter).ClusterRoleBinding) + ret, err := c.Client.Update(context.TODO(), in.(ClusterRoleBindingAdapter).ClusterRoleBinding, metav1.UpdateOptions{}) if err != nil { return nil, err } diff --git a/pkg/registry/rbac/reconciliation/namespace.go b/pkg/registry/rbac/reconciliation/namespace.go index 75d161a95d0..584a6252263 100644 --- a/pkg/registry/rbac/reconciliation/namespace.go +++ b/pkg/registry/rbac/reconciliation/namespace.go @@ -39,7 +39,7 @@ func tryEnsureNamespace(client corev1client.NamespaceInterface, namespace string } ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - _, createErr := client.Create(context.TODO(), ns) + _, createErr := client.Create(context.TODO(), ns, metav1.CreateOptions{}) return utilerrors.FilterOut(createErr, apierrors.IsAlreadyExists, apierrors.IsForbidden) } diff --git a/pkg/registry/rbac/reconciliation/role_interfaces.go b/pkg/registry/rbac/reconciliation/role_interfaces.go index 30819f3b573..31bb6566de1 100644 --- a/pkg/registry/rbac/reconciliation/role_interfaces.go +++ b/pkg/registry/rbac/reconciliation/role_interfaces.go @@ -93,7 +93,7 @@ func (c RoleModifier) Create(in RuleOwner) (RuleOwner, error) { return nil, err } - ret, err := c.Client.Roles(in.GetNamespace()).Create(context.TODO(), in.(RoleRuleOwner).Role) + ret, err := c.Client.Roles(in.GetNamespace()).Create(context.TODO(), in.(RoleRuleOwner).Role, metav1.CreateOptions{}) if err != nil { return nil, err } @@ -101,7 +101,7 @@ func (c RoleModifier) Create(in RuleOwner) (RuleOwner, error) { } func (c RoleModifier) Update(in RuleOwner) (RuleOwner, error) { - ret, err := c.Client.Roles(in.GetNamespace()).Update(context.TODO(), in.(RoleRuleOwner).Role) + ret, err := c.Client.Roles(in.GetNamespace()).Update(context.TODO(), in.(RoleRuleOwner).Role, metav1.UpdateOptions{}) if err != nil { return nil, err } diff --git a/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go b/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go index ed55cbe668d..70d47921caf 100644 --- a/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go +++ b/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go @@ -95,7 +95,7 @@ func (c RoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, error) { return nil, err } - ret, err := c.Client.RoleBindings(in.GetNamespace()).Create(context.TODO(), in.(RoleBindingAdapter).RoleBinding) + ret, err := c.Client.RoleBindings(in.GetNamespace()).Create(context.TODO(), in.(RoleBindingAdapter).RoleBinding, metav1.CreateOptions{}) if err != nil { return nil, err } @@ -103,7 +103,7 @@ func (c RoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, error) { } func (c RoleBindingClientAdapter) Update(in RoleBinding) (RoleBinding, error) { - ret, err := c.Client.RoleBindings(in.GetNamespace()).Update(context.TODO(), in.(RoleBindingAdapter).RoleBinding) + ret, err := c.Client.RoleBindings(in.GetNamespace()).Update(context.TODO(), in.(RoleBindingAdapter).RoleBinding, metav1.UpdateOptions{}) if err != nil { return nil, err } diff --git a/pkg/registry/rbac/rest/storage_rbac.go b/pkg/registry/rbac/rest/storage_rbac.go index 3dd1b4794c2..1d29de6b30b 100644 --- a/pkg/registry/rbac/rest/storage_rbac.go +++ b/pkg/registry/rbac/rest/storage_rbac.go @@ -360,7 +360,7 @@ func primeAggregatedClusterRoles(clusterRolesToAggregate map[string]string, clus klog.V(1).Infof("migrating %v to %v", existingRole.Name, newName) existingRole.Name = newName existingRole.ResourceVersion = "" // clear this so the object can be created. - if _, err := clusterRoleClient.ClusterRoles().Create(context.TODO(), existingRole); err != nil && !apierrors.IsAlreadyExists(err) { + if _, err := clusterRoleClient.ClusterRoles().Create(context.TODO(), existingRole, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { return err } } @@ -398,7 +398,7 @@ func primeSplitClusterRoleBindings(clusterRoleBindingToSplit map[string]rbacapiv newCRB.Subjects = existingRoleBinding.Subjects newCRB.Labels = existingRoleBinding.Labels newCRB.Annotations = existingRoleBinding.Annotations - if _, err := clusterRoleBindingClient.ClusterRoleBindings().Create(context.TODO(), newCRB); err != nil && !apierrors.IsAlreadyExists(err) { + if _, err := clusterRoleBindingClient.ClusterRoleBindings().Create(context.TODO(), newCRB, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { return err } } diff --git a/pkg/registry/scheduling/rest/storage_scheduling.go b/pkg/registry/scheduling/rest/storage_scheduling.go index 966644b6957..bb476dd3216 100644 --- a/pkg/registry/scheduling/rest/storage_scheduling.go +++ b/pkg/registry/scheduling/rest/storage_scheduling.go @@ -127,7 +127,7 @@ func AddSystemPriorityClasses() genericapiserver.PostStartHookFunc { _, err := schedClientSet.PriorityClasses().Get(context.TODO(), pc.Name, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { - _, err := schedClientSet.PriorityClasses().Create(context.TODO(), pc) + _, err := schedClientSet.PriorityClasses().Create(context.TODO(), pc, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { return false, err } else { diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index a298df07e65..0d18894f4a1 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -752,7 +752,7 @@ type podConditionUpdaterImpl struct { func (p *podConditionUpdaterImpl) update(pod *v1.Pod, condition *v1.PodCondition) error { klog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s, Reason=%s)", pod.Namespace, pod.Name, condition.Type, condition.Status, condition.Reason) if podutil.UpdatePodCondition(&pod.Status, condition) { - _, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod) + _, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}) return err } return nil @@ -773,7 +773,7 @@ func (p *podPreemptorImpl) deletePod(pod *v1.Pod) error { func (p *podPreemptorImpl) setNominatedNodeName(pod *v1.Pod, nominatedNodeName string) error { podCopy := pod.DeepCopy() podCopy.Status.NominatedNodeName = nominatedNodeName - _, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), podCopy) + _, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), podCopy, metav1.UpdateOptions{}) return err } diff --git a/pkg/util/node/node.go b/pkg/util/node/node.go index 1d6b0a6e657..4830f52abf1 100644 --- a/pkg/util/node/node.go +++ b/pkg/util/node/node.go @@ -227,7 +227,7 @@ func PatchNodeCIDR(c clientset.Interface, node types.NodeName, cidr string) erro return fmt.Errorf("failed to json.Marshal CIDR: %v", err) } - if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes); err != nil { + if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { return fmt.Errorf("failed to patch node CIDR: %v", err) } return nil @@ -248,7 +248,7 @@ func PatchNodeCIDRs(c clientset.Interface, node types.NodeName, cidrs []string) return fmt.Errorf("failed to json.Marshal CIDR: %v", err) } klog.V(4).Infof("cidrs patch bytes are:%s", string(patchBytes)) - if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes); err != nil { + if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { return fmt.Errorf("failed to patch node CIDR: %v", err) } return nil @@ -261,7 +261,7 @@ func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode return nil, nil, err } - updatedNode, err := c.Nodes().Patch(context.TODO(), string(nodeName), types.StrategicMergePatchType, patchBytes, "status") + updatedNode, err := c.Nodes().Patch(context.TODO(), string(nodeName), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") if err != nil { return nil, nil, fmt.Errorf("failed to patch status %q for node %q: %v", patchBytes, nodeName, err) } diff --git a/pkg/util/pod/pod.go b/pkg/util/pod/pod.go index 844eec83908..d61cb523743 100644 --- a/pkg/util/pod/pod.go +++ b/pkg/util/pod/pod.go @@ -35,7 +35,7 @@ func PatchPodStatus(c clientset.Interface, namespace, name string, uid types.UID return nil, nil, err } - updatedPod, err := c.CoreV1().Pods(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, patchBytes, "status") + updatedPod, err := c.CoreV1().Pods(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") if err != nil { return nil, nil, fmt.Errorf("failed to patch status %q for pod %q/%q: %v", patchBytes, namespace, name, err) } diff --git a/pkg/util/pod/pod_test.go b/pkg/util/pod/pod_test.go index e8937cf727c..09619227e64 100644 --- a/pkg/util/pod/pod_test.go +++ b/pkg/util/pod/pod_test.go @@ -39,7 +39,7 @@ func TestPatchPodStatus(t *testing.T) { Namespace: ns, Name: name, }, - }) + }, metav1.CreateOptions{}) testCases := []struct { description string diff --git a/pkg/volume/azure_file/azure_util.go b/pkg/volume/azure_file/azure_util.go index 8117c8cdc6f..eab56cb563b 100644 --- a/pkg/volume/azure_file/azure_util.go +++ b/pkg/volume/azure_file/azure_util.go @@ -90,7 +90,7 @@ func (s *azureSvc) SetAzureCredentials(host volume.VolumeHost, nameSpace, accoun }, Type: "Opaque", } - _, err := kubeClient.CoreV1().Secrets(nameSpace).Create(context.TODO(), secret) + _, err := kubeClient.CoreV1().Secrets(nameSpace).Create(context.TODO(), secret, metav1.CreateOptions{}) if errors.IsAlreadyExists(err) { err = nil } diff --git a/pkg/volume/csi/csi_attacher.go b/pkg/volume/csi/csi_attacher.go index fc6f94fafa1..e3ba528a14d 100644 --- a/pkg/volume/csi/csi_attacher.go +++ b/pkg/volume/csi/csi_attacher.go @@ -32,6 +32,7 @@ import ( storage "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" @@ -105,7 +106,7 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string }, } - _, err = c.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) + _, err = c.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) alreadyExist := false if err != nil { if !apierrors.IsAlreadyExists(err) { diff --git a/pkg/volume/csi/csi_attacher_test.go b/pkg/volume/csi/csi_attacher_test.go index c5be40cda13..7cb15a929eb 100644 --- a/pkg/volume/csi/csi_attacher_test.go +++ b/pkg/volume/csi/csi_attacher_test.go @@ -102,7 +102,7 @@ func markVolumeAttached(t *testing.T, client clientset.Interface, watch *watch.R } else { attach.Status = status t.Logf("updating attachment %s with attach status %v", attachID, status) - _, err := client.StorageV1().VolumeAttachments().Update(context.TODO(), attach) + _, err := client.StorageV1().VolumeAttachments().Update(context.TODO(), attach, metav1.UpdateOptions{}) if err != nil { t.Error(err) } @@ -530,7 +530,7 @@ func TestAttacherWaitForAttach(t *testing.T) { if test.makeAttachment != nil { attachment := test.makeAttachment() - _, err = csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) + _, err = csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create VolumeAttachment: %v", err) } @@ -612,7 +612,7 @@ func TestAttacherWaitForAttachWithInline(t *testing.T) { if test.makeAttachment != nil { attachment := test.makeAttachment() - _, err = csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) + _, err = csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create VolumeAttachment: %v", err) } @@ -703,7 +703,7 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) { attachment := makeTestAttachment(attachID, nodeName, pvName) attachment.Status.Attached = tc.initAttached attachment.Status.AttachError = tc.initAttachErr - _, err = csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) + _, err = csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to attach: %v", err) } @@ -801,7 +801,7 @@ func TestAttacherVolumesAreAttached(t *testing.T) { attachID := getAttachmentName(attachedSpec.volName, testDriver, nodeName) attachment := makeTestAttachment(attachID, nodeName, attachedSpec.spec.Name()) attachment.Status.Attached = attachedSpec.attached - _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) + _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to attach: %v", err) } @@ -872,7 +872,7 @@ func TestAttacherVolumesAreAttachedWithInline(t *testing.T) { attachID := getAttachmentName(attachedSpec.volName, testDriver, nodeName) attachment := makeTestAttachment(attachID, nodeName, attachedSpec.spec.Name()) attachment.Status.Attached = attachedSpec.attached - _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) + _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to attach: %v", err) } @@ -960,7 +960,7 @@ func TestAttacherDetach(t *testing.T) { pv := makeTestPV("test-pv", 10, testDriver, tc.volID) spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly) attachment := makeTestAttachment(tc.attachID, nodeName, "test-pv") - _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) + _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to attach: %v", err) } @@ -1186,7 +1186,7 @@ func TestAttacherMountDevice(t *testing.T) { if tc.createAttachment { // Set up volume attachment attachment := makeTestAttachment(attachID, nodeName, pvName) - _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) + _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to attach: %v", err) } @@ -1336,7 +1336,7 @@ func TestAttacherMountDeviceWithInline(t *testing.T) { // Set up volume attachment attachment := makeTestAttachment(attachID, nodeName, pvName) - _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) + _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to attach: %v", err) } @@ -1480,7 +1480,7 @@ func TestAttacherUnmountDevice(t *testing.T) { // Make the PV for this object pvName := filepath.Base(dir) pv := makeTestPV(pvName, 5, "csi", tc.volID) - _, err := csiAttacher.k8s.CoreV1().PersistentVolumes().Create(context.TODO(), pv) + _, err := csiAttacher.k8s.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) if err != nil && !tc.shouldFail { t.Fatalf("Failed to create PV: %v", err) } diff --git a/pkg/volume/csi/csi_block_test.go b/pkg/volume/csi/csi_block_test.go index 11fc94e4090..0823e9d2107 100644 --- a/pkg/volume/csi/csi_block_test.go +++ b/pkg/volume/csi/csi_block_test.go @@ -26,6 +26,7 @@ import ( api "k8s.io/api/core/v1" "k8s.io/api/storage/v1beta1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" fakeclient "k8s.io/client-go/kubernetes/fake" featuregatetesting "k8s.io/component-base/featuregate/testing" @@ -258,7 +259,7 @@ func TestBlockMapperSetupDevice(t *testing.T) { attachID := getAttachmentName(csiMapper.volumeID, string(csiMapper.driverName), string(nodeName)) attachment := makeTestAttachment(attachID, nodeName, pvName) attachment.Status.Attached = true - _, err = csiMapper.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) + _, err = csiMapper.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to setup VolumeAttachment: %v", err) } @@ -300,7 +301,7 @@ func TestBlockMapperMapPodDevice(t *testing.T) { attachID := getAttachmentName(csiMapper.volumeID, string(csiMapper.driverName), string(nodeName)) attachment := makeTestAttachment(attachID, nodeName, pvName) attachment.Status.Attached = true - _, err = csiMapper.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) + _, err = csiMapper.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to setup VolumeAttachment: %v", err) } @@ -342,7 +343,7 @@ func TestBlockMapperMapPodDeviceNotSupportAttach(t *testing.T) { AttachRequired: &attachRequired, }, } - _, err := fakeClient.StorageV1beta1().CSIDrivers().Create(context.TODO(), fakeDriver) + _, err := fakeClient.StorageV1beta1().CSIDrivers().Create(context.TODO(), fakeDriver, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create a fakeDriver: %v", err) } diff --git a/pkg/volume/csi/csi_mounter_test.go b/pkg/volume/csi/csi_mounter_test.go index 84f8ca65d00..70c1d492452 100644 --- a/pkg/volume/csi/csi_mounter_test.go +++ b/pkg/volume/csi/csi_mounter_test.go @@ -31,6 +31,7 @@ import ( storage "k8s.io/api/storage/v1" storagev1beta1 "k8s.io/api/storage/v1beta1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" fakeclient "k8s.io/client-go/kubernetes/fake" @@ -213,7 +214,7 @@ func MounterSetUpTests(t *testing.T, podInfoEnabled bool) { DetachError: nil, }, } - _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) + _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to setup VolumeAttachment: %v", err) } @@ -356,7 +357,7 @@ func TestMounterSetUpSimple(t *testing.T) { attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName())) attachment := makeTestAttachment(attachID, "test-node", csiMounter.spec.Name()) - _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) + _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to setup VolumeAttachment: %v", err) } @@ -484,7 +485,7 @@ func TestMounterSetupWithStatusTracking(t *testing.T) { if tc.createAttachment { attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName())) attachment := makeTestAttachment(attachID, "test-node", csiMounter.spec.Name()) - _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) + _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to setup VolumeAttachment: %v", err) } @@ -598,7 +599,7 @@ func TestMounterSetUpWithInline(t *testing.T) { if csiMounter.volumeLifecycleMode == storagev1beta1.VolumeLifecyclePersistent { attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName())) attachment := makeTestAttachment(attachID, "test-node", csiMounter.spec.Name()) - _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) + _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to setup VolumeAttachment: %v", err) } @@ -744,7 +745,7 @@ func TestMounterSetUpWithFSGroup(t *testing.T) { attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName())) attachment := makeTestAttachment(attachID, "test-node", pvName) - _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) + _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) if err != nil { t.Errorf("failed to setup VolumeAttachment: %v", err) continue diff --git a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go index 6a243550402..98a16be6e8a 100644 --- a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go +++ b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go @@ -425,7 +425,7 @@ func (nim *nodeInfoManager) tryInitializeCSINodeWithAnnotation(csiKubeClient cli annotationModified := setMigrationAnnotation(nim.migratedPlugins, nodeInfo) if annotationModified { - _, err := csiKubeClient.StorageV1().CSINodes().Update(context.TODO(), nodeInfo) + _, err := csiKubeClient.StorageV1().CSINodes().Update(context.TODO(), nodeInfo, metav1.UpdateOptions{}) return err } return nil @@ -468,7 +468,7 @@ func (nim *nodeInfoManager) CreateCSINode() (*storagev1.CSINode, error) { setMigrationAnnotation(nim.migratedPlugins, nodeInfo) - return csiKubeClient.StorageV1().CSINodes().Create(context.TODO(), nodeInfo) + return csiKubeClient.StorageV1().CSINodes().Create(context.TODO(), nodeInfo, metav1.CreateOptions{}) } func setMigrationAnnotation(migratedPlugins map[string](func() bool), nodeInfo *storagev1.CSINode) (modified bool) { @@ -571,7 +571,7 @@ func (nim *nodeInfoManager) installDriverToCSINode( newDriverSpecs = append(newDriverSpecs, driverSpec) nodeInfo.Spec.Drivers = newDriverSpecs - _, err := csiKubeClient.StorageV1().CSINodes().Update(context.TODO(), nodeInfo) + _, err := csiKubeClient.StorageV1().CSINodes().Update(context.TODO(), nodeInfo, metav1.UpdateOptions{}) return err } @@ -628,7 +628,7 @@ func (nim *nodeInfoManager) tryUninstallDriverFromCSINode( } nodeInfo.Spec.Drivers = drivers - _, err = nodeInfoClient.Update(context.TODO(), nodeInfo) + _, err = nodeInfoClient.Update(context.TODO(), nodeInfo, metav1.UpdateOptions{}) return err // do not wrap error diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index 6b92663922d..6a5bb15465e 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -833,7 +833,7 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi endpoint.Subsets = subset endpoint.Subsets[0].Addresses = addrlist endpoint.Subsets[0].Ports = ports - _, err = kubeClient.CoreV1().Endpoints(epNamespace).Update(context.TODO(), endpoint) + _, err = kubeClient.CoreV1().Endpoints(epNamespace).Update(context.TODO(), endpoint, metav1.UpdateOptions{}) if err != nil { deleteErr := cli.VolumeDelete(volume.Id) if deleteErr != nil { @@ -884,7 +884,7 @@ func (p *glusterfsVolumeProvisioner) createOrGetEndpointService(namespace string if kubeClient == nil { return nil, nil, fmt.Errorf("failed to get kube client when creating endpoint service") } - _, err = kubeClient.CoreV1().Endpoints(namespace).Create(context.TODO(), endpoint) + _, err = kubeClient.CoreV1().Endpoints(namespace).Create(context.TODO(), endpoint, metav1.CreateOptions{}) if err != nil && errors.IsAlreadyExists(err) { klog.V(1).Infof("endpoint %s already exist in namespace %s", endpoint, namespace) err = nil @@ -904,7 +904,7 @@ func (p *glusterfsVolumeProvisioner) createOrGetEndpointService(namespace string Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{ {Protocol: "TCP", Port: 1}}}} - _, err = kubeClient.CoreV1().Services(namespace).Create(context.TODO(), service) + _, err = kubeClient.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) if err != nil && errors.IsAlreadyExists(err) { klog.V(1).Infof("service %s already exist in namespace %s", service, namespace) err = nil diff --git a/pkg/volume/storageos/storageos_test.go b/pkg/volume/storageos/storageos_test.go index 3cfb9a54715..610bfefe15d 100644 --- a/pkg/volume/storageos/storageos_test.go +++ b/pkg/volume/storageos/storageos_test.go @@ -184,7 +184,7 @@ func TestPlugin(t *testing.T) { "apiUsername": []byte("storageos"), "apiPassword": []byte("storageos"), "apiAddr": []byte("tcp://localhost:5705"), - }}) + }}, metav1.CreateOptions{}) plug.(*storageosPlugin).host = volumetest.NewFakeVolumeHost(t, tmpDir, client, nil) diff --git a/pkg/volume/testing/testing.go b/pkg/volume/testing/testing.go index c69c73f2e4d..e414ddec813 100644 --- a/pkg/volume/testing/testing.go +++ b/pkg/volume/testing/testing.go @@ -266,7 +266,7 @@ func (f *fakeVolumeHost) GetConfigMapFunc() func(namespace, name string) (*v1.Co func (f *fakeVolumeHost) GetServiceAccountTokenFunc() func(string, string, *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) { return func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) { - return f.kubeClient.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), name, tr) + return f.kubeClient.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), name, tr, metav1.CreateOptions{}) } } diff --git a/pkg/volume/util/recyclerclient/recycler_client.go b/pkg/volume/util/recyclerclient/recycler_client.go index f66e609dadd..0558c1dced4 100644 --- a/pkg/volume/util/recyclerclient/recycler_client.go +++ b/pkg/volume/util/recyclerclient/recycler_client.go @@ -178,7 +178,7 @@ type realRecyclerClient struct { } func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) { - return c.client.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) + return c.client.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) } func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) { diff --git a/pkg/volume/util/resize_util.go b/pkg/volume/util/resize_util.go index 5a988a8d385..3dbffe19022 100644 --- a/pkg/volume/util/resize_util.go +++ b/pkg/volume/util/resize_util.go @@ -77,7 +77,7 @@ func UpdatePVSize( return fmt.Errorf("error Creating two way merge patch for PV %q with error : %v", pvClone.Name, err) } - _, err = kubeClient.CoreV1().PersistentVolumes().Patch(context.TODO(), pvClone.Name, types.StrategicMergePatchType, patchBytes) + _, err = kubeClient.CoreV1().PersistentVolumes().Patch(context.TODO(), pvClone.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) if err != nil { return fmt.Errorf("error Patching PV %q with error : %v", pvClone.Name, err) } @@ -172,7 +172,7 @@ func PatchPVCStatus( } updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(oldPVC.Namespace). - Patch(context.TODO(), oldPVC.Name, types.StrategicMergePatchType, patchBytes, "status") + Patch(context.TODO(), oldPVC.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") if updateErr != nil { return nil, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", oldPVC.Name, updateErr) } diff --git a/plugin/pkg/admission/namespace/autoprovision/admission.go b/plugin/pkg/admission/namespace/autoprovision/admission.go index 33a29ab28bc..6d5eb0bd8e4 100644 --- a/plugin/pkg/admission/namespace/autoprovision/admission.go +++ b/plugin/pkg/admission/namespace/autoprovision/admission.go @@ -90,7 +90,7 @@ func (p *Provision) Admit(ctx context.Context, a admission.Attributes, o admissi Status: corev1.NamespaceStatus{}, } - _, err = p.client.CoreV1().Namespaces().Create(context.TODO(), namespace) + _, err = p.client.CoreV1().Namespaces().Create(context.TODO(), namespace, metav1.CreateOptions{}) if err != nil && !errors.IsAlreadyExists(err) { return admission.NewForbidden(a, err) } diff --git a/plugin/pkg/admission/resourcequota/resource_access.go b/plugin/pkg/admission/resourcequota/resource_access.go index 820212d2283..9955402b906 100644 --- a/plugin/pkg/admission/resourcequota/resource_access.go +++ b/plugin/pkg/admission/resourcequota/resource_access.go @@ -79,7 +79,7 @@ func newQuotaAccessor() (*quotaAccessor, error) { } func (e *quotaAccessor) UpdateQuotaStatus(newQuota *corev1.ResourceQuota) error { - updatedQuota, err := e.client.CoreV1().ResourceQuotas(newQuota.Namespace).UpdateStatus(context.TODO(), newQuota) + updatedQuota, err := e.client.CoreV1().ResourceQuotas(newQuota.Namespace).UpdateStatus(context.TODO(), newQuota, metav1.UpdateOptions{}) if err != nil { return err } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/apiapproval/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/apiapproval/BUILD index 60f1067079d..9cf3259c219 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/apiapproval/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/apiapproval/BUILD @@ -13,6 +13,7 @@ go_library( "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1:go_default_library", "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/apiapproval/apiapproval_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/apiapproval/apiapproval_controller.go index 8f7592bbdd5..c0becc5697f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/apiapproval/apiapproval_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/apiapproval/apiapproval_controller.go @@ -28,6 +28,7 @@ import ( informers "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1" listers "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" @@ -159,7 +160,7 @@ func (c *KubernetesAPIApprovalPolicyConformantConditionController) sync(key stri crd := inCustomResourceDefinition.DeepCopy() apihelpers.SetCRDCondition(crd, *cond) - _, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd) + _, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd, metav1.UpdateOptions{}) if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { // deleted or changed in the meantime, we'll get called again return nil diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/BUILD index d523e974a21..6b1f1a3343a 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/BUILD @@ -13,6 +13,7 @@ go_library( "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1:go_default_library", "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go index bc8df0391c3..659bc3286d7 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go @@ -22,6 +22,7 @@ import ( "time" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" @@ -136,7 +137,7 @@ func (ec *EstablishingController) sync(key string) error { apiextensionshelpers.SetCRDCondition(crd, establishedCondition) // Update server with new CRD condition. - _, err = ec.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd) + _, err = ec.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd, metav1.UpdateOptions{}) if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { // deleted or changed in the meantime, we'll get called again return nil diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/BUILD index 072305e3941..3eff24761bb 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/BUILD @@ -18,6 +18,7 @@ go_library( "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go index dd5ed3210d8..0fa93967b49 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go @@ -26,6 +26,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -128,7 +129,7 @@ func (c *CRDFinalizer) sync(key string) error { Reason: "InstanceDeletionInProgress", Message: "CustomResource deletion is in progress", }) - crd, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd) + crd, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd, metav1.UpdateOptions{}) if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { // deleted or changed in the meantime, we'll get called again return nil @@ -151,7 +152,7 @@ func (c *CRDFinalizer) sync(key string) error { cond, deleteErr := c.deleteInstances(crd) apiextensionshelpers.SetCRDCondition(crd, cond) if deleteErr != nil { - if _, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd); err != nil { + if _, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd, metav1.UpdateOptions{}); err != nil { utilruntime.HandleError(err) } return deleteErr @@ -166,7 +167,7 @@ func (c *CRDFinalizer) sync(key string) error { } apiextensionshelpers.CRDRemoveFinalizer(crd, apiextensionsv1.CustomResourceCleanupFinalizer) - _, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd) + _, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd, metav1.UpdateOptions{}) if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { // deleted or changed in the meantime, we'll get called again return nil diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go index d41f653a0c1..06531ad755c 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go @@ -160,7 +160,7 @@ func (c *ConditionController) sync(key string) error { apiextensionshelpers.SetCRDCondition(crd, *cond) } - _, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd) + _, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd, metav1.UpdateOptions{}) if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { // deleted or changed in the meantime, we'll get called again return nil diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD index 6fd6099b41f..4de81d6ee00 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD @@ -32,6 +32,7 @@ go_library( "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go index 9492863b711..d621f847488 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -262,7 +263,7 @@ func (c *NamingConditionController) sync(key string) error { apiextensionshelpers.SetCRDCondition(crd, namingCondition) apiextensionshelpers.SetCRDCondition(crd, establishedCondition) - updatedObj, err := c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd) + updatedObj, err := c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd, metav1.UpdateOptions{}) if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { // deleted or changed in the meantime, we'll get called again return nil diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go index 0302a423898..5d7adf4eeee 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go @@ -945,7 +945,7 @@ func TestNameConflict(t *testing.T) { } noxu2Definition := fixtures.NewNoxu2CustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) - _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), noxu2Definition) + _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), noxu2Definition, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -1020,7 +1020,7 @@ func TestStatusGetAndPatch(t *testing.T) { // make sure we don't get 405 Method Not Allowed from Patching CRD/status subresource _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions(). Patch(context.TODO(), noxuDefinition.Name, types.StrategicMergePatchType, - []byte(fmt.Sprintf(`{"labels":{"test-label":"dummy"}}`)), + []byte(fmt.Sprintf(`{"labels":{"test-label":"dummy"}}`)), metav1.PatchOptions{}, "status") if err != nil { t.Fatal(err) diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/change_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/change_test.go index fd335cb6dd7..6166f0e1e1b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/change_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/change_test.go @@ -90,7 +90,7 @@ func TestChangeCRD(t *testing.T) { } else { noxuDefinitionToUpdate.Spec.Versions = noxuDefinitionToUpdate.Spec.Versions[0:1] } - if _, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), noxuDefinitionToUpdate); err != nil && !apierrors.IsConflict(err) { + if _, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), noxuDefinitionToUpdate, metav1.UpdateOptions{}); err != nil && !apierrors.IsConflict(err) { t.Error(err) continue } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/conversion/conversion_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/conversion/conversion_test.go index fcce485eb9b..c783d6af98e 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/conversion/conversion_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/conversion/conversion_test.go @@ -953,7 +953,7 @@ func (c *conversionTestContext) setConversionWebhook(t *testing.T, webhookClient WebhookClientConfig: webhookClientConfig, ConversionReviewVersions: reviewVersions, } - crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd) + crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd, metav1.UpdateOptions{}) if err != nil { t.Fatal(err) } @@ -970,7 +970,7 @@ func (c *conversionTestContext) removeConversionWebhook(t *testing.T) { Strategy: apiextensionsv1beta1.NoneConverter, } - crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd) + crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd, metav1.UpdateOptions{}) if err != nil { t.Fatal(err) } @@ -1005,7 +1005,7 @@ func (c *conversionTestContext) setStorageVersion(t *testing.T, version string) for i, v := range crd.Spec.Versions { crd.Spec.Versions[i].Storage = v.Name == version } - crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd) + crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd, metav1.UpdateOptions{}) if err != nil { t.Fatal(err) } @@ -1036,7 +1036,7 @@ func (c *conversionTestContext) setServed(t *testing.T, version string, served b crd.Spec.Versions[i].Served = served } } - crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd) + crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd, metav1.UpdateOptions{}) if err != nil { t.Fatal(err) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/defaulting_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/defaulting_test.go index d5c7883bc1f..d5d6f67bfe3 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/defaulting_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/defaulting_test.go @@ -212,7 +212,7 @@ func testDefaulting(t *testing.T, watchCache bool) { t.Fatal(err) } update(obj) - obj, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), obj) + obj, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), obj, metav1.UpdateOptions{}) if err != nil && apierrors.IsConflict(err) { continue } else if err != nil { diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go index a48d7f40988..120dc5b9701 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go @@ -326,7 +326,7 @@ func existsInDiscoveryV1(crd *apiextensionsv1.CustomResourceDefinition, apiExten // the created CR. Please call CreateNewCustomResourceDefinition if you need to // watch the CR. func CreateNewCustomResourceDefinitionWatchUnsafe(crd *apiextensionsv1beta1.CustomResourceDefinition, apiExtensionsClient clientset.Interface) (*apiextensionsv1beta1.CustomResourceDefinition, error) { - crd, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), crd) + crd, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}) if err != nil { return nil, err } @@ -375,7 +375,7 @@ func CreateNewCustomResourceDefinition(crd *apiextensionsv1beta1.CustomResourceD // the created CR. Please call CreateNewV1CustomResourceDefinition if you need to // watch the CR. func CreateNewV1CustomResourceDefinitionWatchUnsafe(v1CRD *apiextensionsv1.CustomResourceDefinition, apiExtensionsClient clientset.Interface) (*apiextensionsv1.CustomResourceDefinition, error) { - v1CRD, err := apiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), v1CRD) + v1CRD, err := apiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), v1CRD, metav1.CreateOptions{}) if err != nil { return nil, err } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/helpers.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/helpers.go index 9b5e9196014..16443e4df08 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/helpers.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/helpers.go @@ -86,7 +86,7 @@ func UpdateCustomResourceDefinitionWithRetry(client clientset.Interface, name st return nil, fmt.Errorf("failed to get CustomResourceDefinition %q: %v", name, err) } update(crd) - crd, err = client.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd) + crd, err = client.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd, metav1.UpdateOptions{}) if err == nil { return crd, nil } @@ -105,7 +105,7 @@ func UpdateV1CustomResourceDefinitionWithRetry(client clientset.Interface, name return nil, fmt.Errorf("failed to get CustomResourceDefinition %q: %v", name, err) } update(crd) - crd, err = client.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), crd) + crd, err = client.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), crd, metav1.UpdateOptions{}) if err == nil { return crd, nil } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/table_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/table_test.go index ca250c34586..19a3b535887 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/table_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/table_test.go @@ -388,7 +388,7 @@ func TestColumnsPatch(t *testing.T) { // error about top-level and per-version columns being mutual exclusive. patch := []byte(`{"spec":{"versions":[{"name":"v1beta1","served":true,"storage":true,"additionalPrinterColumns":[{"name":"Age","type":"date","JSONPath":".metadata.creationTimestamp"}]},{"name":"v1","served":true,"storage":false,"additionalPrinterColumns":[{"name":"Age2","type":"date","JSONPath":".metadata.creationTimestamp"}]}]}}`) - _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Patch(context.TODO(), crd.Name, types.MergePatchType, patch) + _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Patch(context.TODO(), crd.Name, types.MergePatchType, patch, metav1.PatchOptions{}) if err != nil { t.Fatal(err) } @@ -434,7 +434,7 @@ func TestPatchCleanTopLevelColumns(t *testing.T) { // the top-level columns. patch := []byte(`{"spec":{"additionalPrinterColumns":null,"versions":[{"name":"v1beta1","served":true,"storage":true,"additionalPrinterColumns":[{"name":"Age","type":"date","JSONPath":".metadata.creationTimestamp"}]},{"name":"v1","served":true,"storage":false,"additionalPrinterColumns":[{"name":"Age2","type":"date","JSONPath":".metadata.creationTimestamp"}]}]}}`) - _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Patch(context.TODO(), crd.Name, types.MergePatchType, patch) + _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Patch(context.TODO(), crd.Name, types.MergePatchType, patch, metav1.PatchOptions{}) if err != nil { t.Fatal(err) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go index 221e9e9cab0..2b16459dd9a 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go @@ -759,7 +759,7 @@ spec: // create CRDs t.Logf("Creating CRD %s", crd.Name) - if _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), crd); err != nil { + if _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err != nil { t.Fatalf("unexpected create error: %v", err) } @@ -789,7 +789,7 @@ spec: t.Fatalf("unexpected get error: %v", err) } crd.Spec.Validation = nil - if _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd); apierrors.IsConflict(err) { + if _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd, metav1.UpdateOptions{}); apierrors.IsConflict(err) { continue } if err != nil { @@ -822,7 +822,7 @@ spec: t.Fatalf("unexpected get error: %v", err) } crd.Spec.Validation = &apiextensionsv1beta1.CustomResourceValidation{OpenAPIV3Schema: origSchema} - if _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd); apierrors.IsConflict(err) { + if _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd, metav1.UpdateOptions{}); apierrors.IsConflict(err) { continue } if err != nil { @@ -1609,7 +1609,7 @@ properties: crd.Name = fmt.Sprintf("foos.%s", crd.Spec.Group) // create CRDs - crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), crd) + crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}) if len(tst.expectedCreateErrors) > 0 && err == nil { t.Fatalf("expected create errors, got none") } else if len(tst.expectedCreateErrors) == 0 && err != nil { diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go index 469585be28e..dd470662afb 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go @@ -221,7 +221,7 @@ func testStoragedVersionInCRDStatus(t *testing.T, ns string, noxuDefinition *api // Changing CRD storage version should be reflected immediately crd.Spec.Versions = versionsV1Beta2Storage - _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd) + _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd, metav1.UpdateOptions{}) if err != nil { t.Fatal(err) } diff --git a/staging/src/k8s.io/client-go/examples/create-update-delete-deployment/main.go b/staging/src/k8s.io/client-go/examples/create-update-delete-deployment/main.go index 9d005d828cc..eff29931d4b 100644 --- a/staging/src/k8s.io/client-go/examples/create-update-delete-deployment/main.go +++ b/staging/src/k8s.io/client-go/examples/create-update-delete-deployment/main.go @@ -101,7 +101,7 @@ func main() { // Create Deployment fmt.Println("Creating deployment...") - result, err := deploymentsClient.Create(context.TODO(), deployment) + result, err := deploymentsClient.Create(context.TODO(), deployment, metav1.CreateOptions{}) if err != nil { panic(err) } @@ -133,7 +133,7 @@ func main() { result.Spec.Replicas = int32Ptr(1) // reduce replica count result.Spec.Template.Spec.Containers[0].Image = "nginx:1.13" // change nginx version - _, updateErr := deploymentsClient.Update(context.TODO(), result) + _, updateErr := deploymentsClient.Update(context.TODO(), result, metav1.UpdateOptions{}) return updateErr }) if retryErr != nil { diff --git a/staging/src/k8s.io/client-go/examples/fake-client/main_test.go b/staging/src/k8s.io/client-go/examples/fake-client/main_test.go index f523279ec7a..456a4bbcdc9 100644 --- a/staging/src/k8s.io/client-go/examples/fake-client/main_test.go +++ b/staging/src/k8s.io/client-go/examples/fake-client/main_test.go @@ -59,7 +59,7 @@ func TestFakeClient(t *testing.T) { // Inject an event into the fake client. p := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "my-pod"}} - _, err := client.CoreV1().Pods("test-ns").Create(context.TODO(), p) + _, err := client.CoreV1().Pods("test-ns").Create(context.TODO(), p, metav1.CreateOptions{}) if err != nil { t.Fatalf("error injecting pod add: %v", err) } diff --git a/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go b/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go index 4a9c030fb0b..63f805886a3 100644 --- a/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go +++ b/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go @@ -75,7 +75,7 @@ func (cml *ConfigMapLock) Create(ler LeaderElectionRecord) error { LeaderElectionRecordAnnotationKey: string(recordBytes), }, }, - }) + }, metav1.CreateOptions{}) return err } @@ -89,7 +89,7 @@ func (cml *ConfigMapLock) Update(ler LeaderElectionRecord) error { return err } cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) - cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(context.TODO(), cml.cm) + cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(context.TODO(), cml.cm, metav1.UpdateOptions{}) return err } diff --git a/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go b/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go index 5127a925e76..1c24736ce3c 100644 --- a/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go +++ b/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go @@ -70,7 +70,7 @@ func (el *EndpointsLock) Create(ler LeaderElectionRecord) error { LeaderElectionRecordAnnotationKey: string(recordBytes), }, }, - }) + }, metav1.CreateOptions{}) return err } @@ -87,7 +87,7 @@ func (el *EndpointsLock) Update(ler LeaderElectionRecord) error { el.e.Annotations = make(map[string]string) } el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) - el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(context.TODO(), el.e) + el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(context.TODO(), el.e, metav1.UpdateOptions{}) return err } diff --git a/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go index 5d46ae38f4c..8695ca9a485 100644 --- a/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go +++ b/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go @@ -61,7 +61,7 @@ func (ll *LeaseLock) Create(ler LeaderElectionRecord) error { Namespace: ll.LeaseMeta.Namespace, }, Spec: LeaderElectionRecordToLeaseSpec(&ler), - }) + }, metav1.CreateOptions{}) return err } @@ -72,7 +72,7 @@ func (ll *LeaseLock) Update(ler LeaderElectionRecord) error { } ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler) var err error - ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Update(context.TODO(), ll.lease) + ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Update(context.TODO(), ll.lease, metav1.UpdateOptions{}) return err } diff --git a/staging/src/k8s.io/client-go/util/certificate/csr/csr.go b/staging/src/k8s.io/client-go/util/certificate/csr/csr.go index 63862a1361a..f26c0859a0a 100644 --- a/staging/src/k8s.io/client-go/util/certificate/csr/csr.go +++ b/staging/src/k8s.io/client-go/util/certificate/csr/csr.go @@ -62,7 +62,7 @@ func RequestCertificate(client certificatesclient.CertificateSigningRequestInter csr.GenerateName = "csr-" } - req, err = client.Create(context.TODO(), csr) + req, err = client.Create(context.TODO(), csr, metav1.CreateOptions{}) switch { case err == nil: case errors.IsAlreadyExists(err) && len(name) > 0: diff --git a/staging/src/k8s.io/cloud-provider/node/helpers/labels.go b/staging/src/k8s.io/cloud-provider/node/helpers/labels.go index eea04350e32..94f53edab93 100644 --- a/staging/src/k8s.io/cloud-provider/node/helpers/labels.go +++ b/staging/src/k8s.io/cloud-provider/node/helpers/labels.go @@ -95,7 +95,7 @@ func addOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la if err != nil { return fmt.Errorf("failed to create a two-way merge patch: %v", err) } - if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes); err != nil { + if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { return fmt.Errorf("failed to patch the node: %v", err) } return nil diff --git a/staging/src/k8s.io/cloud-provider/node/helpers/taints.go b/staging/src/k8s.io/cloud-provider/node/helpers/taints.go index 6fa0a82f602..fa8009dd491 100644 --- a/staging/src/k8s.io/cloud-provider/node/helpers/taints.go +++ b/staging/src/k8s.io/cloud-provider/node/helpers/taints.go @@ -107,7 +107,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) } - _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes) + _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) return err } diff --git a/staging/src/k8s.io/cloud-provider/service/helpers/BUILD b/staging/src/k8s.io/cloud-provider/service/helpers/BUILD index 793b6d39f9f..fa63eecc3a9 100644 --- a/staging/src/k8s.io/cloud-provider/service/helpers/BUILD +++ b/staging/src/k8s.io/cloud-provider/service/helpers/BUILD @@ -8,6 +8,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", diff --git a/staging/src/k8s.io/cloud-provider/service/helpers/helper.go b/staging/src/k8s.io/cloud-provider/service/helpers/helper.go index 5ec2a49fa5d..0609361762b 100644 --- a/staging/src/k8s.io/cloud-provider/service/helpers/helper.go +++ b/staging/src/k8s.io/cloud-provider/service/helpers/helper.go @@ -23,6 +23,7 @@ import ( "strings" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -137,7 +138,7 @@ func PatchService(c corev1.CoreV1Interface, oldSvc, newSvc *v1.Service) (*v1.Ser return nil, err } - return c.Services(oldSvc.Namespace).Patch(context.TODO(), oldSvc.Name, types.StrategicMergePatchType, patchBytes, "status") + return c.Services(oldSvc.Namespace).Patch(context.TODO(), oldSvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") } diff --git a/staging/src/k8s.io/cloud-provider/service/helpers/helper_test.go b/staging/src/k8s.io/cloud-provider/service/helpers/helper_test.go index 927013223b1..f29a1ff2e96 100644 --- a/staging/src/k8s.io/cloud-provider/service/helpers/helper_test.go +++ b/staging/src/k8s.io/cloud-provider/service/helpers/helper_test.go @@ -288,7 +288,7 @@ func TestPatchService(t *testing.T) { // Issue a separate update and verify patch doesn't fail after this. svcToUpdate := svcOrigin.DeepCopy() addAnnotations(svcToUpdate) - if _, err := fakeCs.CoreV1().Services(svcOrigin.Namespace).Update(context.TODO(), svcToUpdate); err != nil { + if _, err := fakeCs.CoreV1().Services(svcOrigin.Namespace).Update(context.TODO(), svcToUpdate, metav1.UpdateOptions{}); err != nil { t.Fatalf("Failed to update service: %v", err) } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go index 2b5c58f8bbc..a72a4f0b2ff 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go @@ -241,7 +241,7 @@ func (c *autoRegisterController) checkAPIService(name string) (err error) { // we don't have an entry and we do want one (2B,2C) case apierrors.IsNotFound(err) && desired != nil: - _, err := c.apiServiceClient.APIServices().Create(context.TODO(), desired) + _, err := c.apiServiceClient.APIServices().Create(context.TODO(), desired, metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { // created in the meantime, we'll get called again return nil @@ -278,7 +278,7 @@ func (c *autoRegisterController) checkAPIService(name string) (err error) { // we have an entry and we have a desired, now we deconflict. Only a few fields matter. (5B,5C,6B,6C) apiService := curr.DeepCopy() apiService.Spec = desired.Spec - _, err = c.apiServiceClient.APIServices().Update(context.TODO(), apiService) + _, err = c.apiServiceClient.APIServices().Update(context.TODO(), apiService, metav1.UpdateOptions{}) if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { // deleted or changed in the meantime, we'll get called again return nil diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go index 73dfa12dadc..12b056d70d8 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go @@ -370,7 +370,7 @@ func updateAPIServiceStatus(client apiregistrationclient.APIServicesGetter, orig return newAPIService, nil } - newAPIService, err := client.APIServices().UpdateStatus(context.TODO(), newAPIService) + newAPIService, err := client.APIServices().UpdateStatus(context.TODO(), newAPIService, metav1.UpdateOptions{}) if err != nil { return nil, err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/BUILD b/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/BUILD index dfc6fc8d15f..34286c3547e 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/BUILD +++ b/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/BUILD @@ -9,6 +9,7 @@ go_library( deps = [ "//staging/src/k8s.io/api/autoscaling/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/printers:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/resource:go_default_library", diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go b/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go index ef22223b53c..c9ff127acdf 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go @@ -25,6 +25,7 @@ import ( autoscalingv1 "k8s.io/api/autoscaling/v1" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/printers" "k8s.io/cli-runtime/pkg/resource" @@ -263,7 +264,7 @@ func (o *AutoscaleOptions) Run() error { return err } - actualHPA, err := o.HPAClient.HorizontalPodAutoscalers(o.namespace).Create(context.TODO(), hpa) + actualHPA, err := o.HPAClient.HorizontalPodAutoscalers(o.namespace).Create(context.TODO(), hpa, metav1.CreateOptions{}) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go index 043012c3dee..901af7e61b4 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go @@ -201,7 +201,7 @@ func (c *CreateClusterRoleOptions) RunCreateRole() error { // Create ClusterRole. if !c.DryRun { - clusterRole, err = c.Client.ClusterRoles().Create(context.TODO(), clusterRole) + clusterRole, err = c.Client.ClusterRoles().Create(context.TODO(), clusterRole, metav1.CreateOptions{}) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go index adbdac48342..e360a7367aa 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go @@ -165,7 +165,7 @@ func (o *CreateCronJobOptions) Run() error { if !o.DryRun { var err error - cronjob, err = o.Client.CronJobs(o.Namespace).Create(context.TODO(), cronjob) + cronjob, err = o.Client.CronJobs(o.Namespace).Create(context.TODO(), cronjob, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("failed to create cronjob: %v", err) } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go index 003cff4744b..83e640e45fc 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go @@ -193,7 +193,7 @@ func (o *CreateJobOptions) Run() error { } if !o.DryRun { var err error - job, err = o.Client.Jobs(o.Namespace).Create(context.TODO(), job) + job, err = o.Client.Jobs(o.Namespace).Create(context.TODO(), job, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("failed to create job: %v", err) } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go index 8e540b5487d..cd07caf31e4 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go @@ -342,7 +342,7 @@ func (o *CreateRoleOptions) RunCreateRole() error { // Create role. if !o.DryRun { - role, err = o.Client.Roles(o.Namespace).Create(context.TODO(), role) + role, err = o.Client.Roles(o.Namespace).Create(context.TODO(), role, metav1.CreateOptions{}) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rolling_updater.go b/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rolling_updater.go index 15f1af0cb69..d955ac9117c 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rolling_updater.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rolling_updater.go @@ -483,7 +483,7 @@ func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *corev controller.Annotations[desiredReplicasAnnotation] = fmt.Sprintf("%d", valOrZero(controller.Spec.Replicas)) controller.Annotations[sourceIDAnnotation] = sourceID controller.Spec.Replicas = utilpointer.Int32Ptr(0) - newRc, err := r.rcClient.ReplicationControllers(r.ns).Create(context.TODO(), controller) + newRc, err := r.rcClient.ReplicationControllers(r.ns).Create(context.TODO(), controller, metav1.CreateOptions{}) return newRc, false, err } // Validate and use the existing controller. @@ -576,7 +576,7 @@ func Rename(c corev1client.ReplicationControllersGetter, rc *corev1.ReplicationC return err } // Then create the same RC with the new name. - _, err = c.ReplicationControllers(rc.Namespace).Create(context.TODO(), rc) + _, err = c.ReplicationControllers(rc.Namespace).Create(context.TODO(), rc, metav1.CreateOptions{}) return err } @@ -783,7 +783,7 @@ func updateRcWithRetries(rcClient corev1client.ReplicationControllersGetter, nam err := retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) { // Apply the update, then attempt to push it to the apiserver. applyUpdate(rc) - if rc, e = rcClient.ReplicationControllers(namespace).Update(context.TODO(), rc); e == nil { + if rc, e = rcClient.ReplicationControllers(namespace).Update(context.TODO(), rc, metav1.UpdateOptions{}); e == nil { // rc contains the latest controller post update return } @@ -814,7 +814,7 @@ func updatePodWithRetries(podClient corev1client.PodsGetter, namespace string, p err := retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) { // Apply the update, then attempt to push it to the apiserver. applyUpdate(pod) - if pod, e = podClient.Pods(namespace).Update(context.TODO(), pod); e == nil { + if pod, e = podClient.Pods(namespace).Update(context.TODO(), pod, metav1.UpdateOptions{}); e == nil { return } updateErr := e diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate.go b/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate.go index f50afb4616e..899240fe255 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate.go @@ -432,7 +432,7 @@ func (o *RollingUpdateOptions) Run() error { if err != nil { return err } - coreClient.ReplicationControllers(config.NewRc.Namespace).Update(context.TODO(), config.NewRc) + coreClient.ReplicationControllers(config.NewRc.Namespace).Update(context.TODO(), config.NewRc, metav1.UpdateOptions{}) } err = updater.Update(config) if err != nil { diff --git a/staging/src/k8s.io/kubectl/pkg/drain/cordon.go b/staging/src/k8s.io/kubectl/pkg/drain/cordon.go index 7ce6bc13247..f7bb7d4d425 100644 --- a/staging/src/k8s.io/kubectl/pkg/drain/cordon.go +++ b/staging/src/k8s.io/kubectl/pkg/drain/cordon.go @@ -21,6 +21,7 @@ import ( "fmt" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -88,9 +89,9 @@ func (c *CordonHelper) PatchOrReplace(clientset kubernetes.Interface) (error, er patchBytes, patchErr := strategicpatch.CreateTwoWayMergePatch(oldData, newData, c.node) if patchErr == nil { - _, err = client.Patch(context.TODO(), c.node.Name, types.StrategicMergePatchType, patchBytes) + _, err = client.Patch(context.TODO(), c.node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) } else { - _, err = client.Update(context.TODO(), c.node) + _, err = client.Update(context.TODO(), c.node, metav1.UpdateOptions{}) } return err, patchErr } diff --git a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/history_test.go b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/history_test.go index 45bb569e008..8b2af5704f9 100644 --- a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/history_test.go +++ b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/history_test.go @@ -85,7 +85,7 @@ func TestViewHistory(t *testing.T) { ) fakeClientSet := fake.NewSimpleClientset(ssStub) - _, err := fakeClientSet.AppsV1().ControllerRevisions("default").Create(context.TODO(), ssStub1) + _, err := fakeClientSet.AppsV1().ControllerRevisions("default").Create(context.TODO(), ssStub1, metav1.CreateOptions{}) if err != nil { t.Fatalf("create controllerRevisions error %v occurred ", err) } diff --git a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go index 4ea1dc5cee6..bcead995b9c 100644 --- a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go +++ b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go @@ -154,7 +154,7 @@ func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations m } // Restore revision - if _, err = r.c.AppsV1().Deployments(namespace).Patch(context.TODO(), name, patchType, patch); err != nil { + if _, err = r.c.AppsV1().Deployments(namespace).Patch(context.TODO(), name, patchType, patch, metav1.PatchOptions{}); err != nil { return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) } return rollbackSuccess, nil @@ -294,7 +294,7 @@ func (r *DaemonSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations ma } // Restore revision - if _, err = r.c.AppsV1().DaemonSets(accessor.GetNamespace()).Patch(context.TODO(), accessor.GetName(), types.StrategicMergePatchType, toHistory.Data.Raw); err != nil { + if _, err = r.c.AppsV1().DaemonSets(accessor.GetNamespace()).Patch(context.TODO(), accessor.GetName(), types.StrategicMergePatchType, toHistory.Data.Raw, metav1.PatchOptions{}); err != nil { return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) } @@ -381,7 +381,7 @@ func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations } // Restore revision - if _, err = r.c.AppsV1().StatefulSets(sts.Namespace).Patch(context.TODO(), sts.Name, types.StrategicMergePatchType, toHistory.Data.Raw); err != nil { + if _, err = r.c.AppsV1().StatefulSets(sts.Namespace).Patch(context.TODO(), sts.Name, types.StrategicMergePatchType, toHistory.Data.Raw, metav1.PatchOptions{}); err != nil { return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config_test.go index b5a2c8d19e1..2c3a331f989 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config_test.go @@ -154,7 +154,7 @@ func TestGetConfigFromSecret(t *testing.T) { "cloud-config": secretData, } } - _, err := az.kubeClient.CoreV1().Secrets(cloudConfigNamespace).Create(context.TODO(), secret) + _, err := az.kubeClient.CoreV1().Secrets(cloudConfigNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) assert.NoError(t, err, test.name) } diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_clusterid.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_clusterid.go index 7bb10208c3f..824381b2b9d 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_clusterid.go +++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_clusterid.go @@ -200,7 +200,7 @@ func (ci *ClusterID) getOrInitialize() error { UIDProvider: newID, } - if _, err := ci.client.CoreV1().ConfigMaps(UIDNamespace).Create(context.TODO(), cfg); err != nil { + if _, err := ci.client.CoreV1().ConfigMaps(UIDNamespace).Create(context.TODO(), cfg, metav1.CreateOptions{}); err != nil { klog.Errorf("GCE cloud provider failed to create %v config map to store cluster id: %v", ci.cfgMapKey, err) return err } diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal_test.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal_test.go index 1a75bc072d0..a8a7b1a321c 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal_test.go @@ -157,7 +157,7 @@ func TestEnsureInternalLoadBalancer(t *testing.T) { gce, err := fakeGCECloud(vals) require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) assert.NoError(t, err) @@ -182,7 +182,7 @@ func TestEnsureInternalLoadBalancerDeprecatedAnnotation(t *testing.T) { } svc := fakeLoadBalancerServiceDeprecatedAnnotation(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) if err != nil { t.Errorf("Failed to create service %s, err %v", svc.Name, err) } @@ -220,7 +220,7 @@ func TestEnsureInternalLoadBalancerWithExistingResources(t *testing.T) { gce, err := fakeGCECloud(vals) require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) // Create the expected resources necessary for an Internal Load Balancer nm := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace} @@ -257,7 +257,7 @@ func TestEnsureInternalLoadBalancerClearPreviousResources(t *testing.T) { require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) lbName := gce.GetLoadBalancerName(context.TODO(), "", svc) @@ -336,7 +336,7 @@ func TestEnsureInternalLoadBalancerHealthCheckConfigurable(t *testing.T) { require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) lbName := gce.GetLoadBalancerName(context.TODO(), "", svc) @@ -368,7 +368,7 @@ func TestUpdateInternalLoadBalancerBackendServices(t *testing.T) { require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) _, err = createInternalLoadBalancer(gce, svc, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName) assert.NoError(t, err) @@ -424,7 +424,7 @@ func TestUpdateInternalLoadBalancerNodes(t *testing.T) { node1Name := []string{"test-node-1"} svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) nodes, err := createAndInsertNodes(gce, node1Name, vals.ZoneName) require.NoError(t, err) @@ -491,7 +491,7 @@ func TestEnsureInternalLoadBalancerDeleted(t *testing.T) { require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) _, err = createInternalLoadBalancer(gce, svc, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName) assert.NoError(t, err) @@ -509,7 +509,7 @@ func TestEnsureInternalLoadBalancerDeletedTwiceDoesNotError(t *testing.T) { gce, err := fakeGCECloud(vals) require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) _, err = createInternalLoadBalancer(gce, svc, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName) @@ -535,7 +535,7 @@ func TestEnsureInternalLoadBalancerWithSpecialHealthCheck(t *testing.T) { svc.Spec.HealthCheckNodePort = healthCheckNodePort svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, []string{nodeName}, vals.ClusterName, vals.ClusterID, vals.ZoneName) assert.NoError(t, err) @@ -554,7 +554,7 @@ func TestClearPreviousInternalResources(t *testing.T) { svc := fakeLoadbalancerService(string(LBTypeInternal)) gce, err := fakeGCECloud(vals) require.NoError(t, err) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) loadBalancerName := gce.GetLoadBalancerName(context.TODO(), "", svc) nm := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace} @@ -619,7 +619,7 @@ func TestEnsureInternalFirewallDeletesLegacyFirewall(t *testing.T) { require.NoError(t, err) vals := DefaultTestClusterValues() svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) lbName := gce.GetLoadBalancerName(context.TODO(), "", svc) fwName := MakeFirewallName(lbName) @@ -695,7 +695,7 @@ func TestEnsureInternalFirewallSucceedsOnXPN(t *testing.T) { require.NoError(t, err) vals := DefaultTestClusterValues() svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) lbName := gce.GetLoadBalancerName(context.TODO(), "", svc) fwName := MakeFirewallName(lbName) @@ -772,7 +772,7 @@ func TestEnsureLoadBalancerDeletedSucceedsOnXPN(t *testing.T) { require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) _, err = createInternalLoadBalancer(gce, svc, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName) assert.NoError(t, err) @@ -796,7 +796,7 @@ func TestEnsureInternalInstanceGroupsDeleted(t *testing.T) { igName := makeInstanceGroupName(vals.ClusterID) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) _, err = createInternalLoadBalancer(gce, svc, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName) assert.NoError(t, err) @@ -916,7 +916,7 @@ func TestEnsureInternalLoadBalancerErrors(t *testing.T) { if tc.injectMock != nil { tc.injectMock(gce.c.(*cloud.MockGCE)) } - _, err = gce.client.CoreV1().Services(params.service.Namespace).Create(context.TODO(), params.service) + _, err = gce.client.CoreV1().Services(params.service.Namespace).Create(context.TODO(), params.service, metav1.CreateOptions{}) require.NoError(t, err) status, err := gce.ensureInternalLoadBalancer( params.clusterName, @@ -1029,7 +1029,7 @@ func TestEnsureInternalLoadBalancerSubsetting(t *testing.T) { require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) assert.EqualError(t, err, cloudprovider.ImplementedElsewhere.Error()) @@ -1064,7 +1064,7 @@ func TestEnsureInternalLoadBalancerDeletedSubsetting(t *testing.T) { nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName) require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) @@ -1095,7 +1095,7 @@ func TestEnsureInternalLoadBalancerGlobalAccess(t *testing.T) { nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName) require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) lbName := gce.GetLoadBalancerName(context.TODO(), "", svc) @@ -1160,7 +1160,7 @@ func TestEnsureInternalLoadBalancerDisableGlobalAccess(t *testing.T) { nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName) require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) svc.Annotations[ServiceAnnotationILBAllowGlobalAccess] = "true" lbName := gce.GetLoadBalancerName(context.TODO(), "", svc) @@ -1211,7 +1211,7 @@ func TestGlobalAccessChangeScheme(t *testing.T) { nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName) require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) lbName := gce.GetLoadBalancerName(context.TODO(), "", svc) @@ -1372,7 +1372,7 @@ func TestEnsureInternalLoadBalancerCustomSubnet(t *testing.T) { nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName) require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) lbName := gce.GetLoadBalancerName(context.TODO(), "", svc) @@ -1526,7 +1526,7 @@ func TestEnsureInternalLoadBalancerFinalizer(t *testing.T) { require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) require.NoError(t, err) @@ -1564,7 +1564,7 @@ func TestEnsureLoadBalancerSkipped(t *testing.T) { svc := fakeLoadbalancerService(string(LBTypeInternal)) // Add the V2 finalizer svc.Finalizers = append(svc.Finalizers, ILBFinalizerV2) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) assert.EqualError(t, err, cloudprovider.ImplementedElsewhere.Error()) diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_test.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_test.go index df9862feee6..9f8e1a8e047 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestGetLoadBalancer(t *testing.T) { @@ -83,7 +84,7 @@ func TestEnsureLoadBalancerCreatesInternalLb(t *testing.T) { require.NoError(t, err) apiService := fakeLoadbalancerService(string(LBTypeInternal)) - apiService, err = gce.client.CoreV1().Services(apiService.Namespace).Create(context.TODO(), apiService) + apiService, err = gce.client.CoreV1().Services(apiService.Namespace).Create(context.TODO(), apiService, metav1.CreateOptions{}) require.NoError(t, err) status, err := gce.EnsureLoadBalancer(context.Background(), vals.ClusterName, apiService, nodes) assert.NoError(t, err) @@ -128,7 +129,7 @@ func TestEnsureLoadBalancerDeletesExistingExternalLb(t *testing.T) { createExternalLoadBalancer(gce, apiService, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) apiService = fakeLoadbalancerService(string(LBTypeInternal)) - apiService, err = gce.client.CoreV1().Services(apiService.Namespace).Create(context.TODO(), apiService) + apiService, err = gce.client.CoreV1().Services(apiService.Namespace).Create(context.TODO(), apiService, metav1.CreateOptions{}) require.NoError(t, err) status, err := gce.EnsureLoadBalancer(context.Background(), vals.ClusterName, apiService, nodes) assert.NoError(t, err) @@ -169,7 +170,7 @@ func TestEnsureLoadBalancerDeletedDeletesInternalLb(t *testing.T) { require.NoError(t, err) apiService := fakeLoadbalancerService(string(LBTypeInternal)) - apiService, err = gce.client.CoreV1().Services(apiService.Namespace).Create(context.TODO(), apiService) + apiService, err = gce.client.CoreV1().Services(apiService.Namespace).Create(context.TODO(), apiService, metav1.CreateOptions{}) require.NoError(t, err) createInternalLoadBalancer(gce, apiService, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util.go index 1a0937698b4..3b0ba36edc7 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util.go +++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util.go @@ -33,6 +33,7 @@ import ( "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock" "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/strategicpatch" @@ -413,7 +414,7 @@ func patchService(c v1core.CoreV1Interface, oldSvc *v1.Service, newSvc *v1.Servi return nil, err } - return c.Services(oldSvc.Namespace).Patch(context.TODO(), oldSvc.Name, types.StrategicMergePatchType, patchBytes, "status") + return c.Services(oldSvc.Namespace).Patch(context.TODO(), oldSvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") } func getPatchBytes(oldSvc *v1.Service, newSvc *v1.Service) ([]byte, error) { diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util_test.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util_test.go index 65ff1072ef5..afff6d9a1e6 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util_test.go @@ -128,7 +128,7 @@ func TestAddRemoveFinalizer(t *testing.T) { if err != nil { t.Fatalf("Failed to get GCE client, err %v", err) } - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) if err != nil { t.Errorf("Failed to create service %s, err %v", svc.Name, err) } diff --git a/staging/src/k8s.io/sample-controller/controller.go b/staging/src/k8s.io/sample-controller/controller.go index 4b8d29f0209..f0385d34126 100644 --- a/staging/src/k8s.io/sample-controller/controller.go +++ b/staging/src/k8s.io/sample-controller/controller.go @@ -274,7 +274,7 @@ func (c *Controller) syncHandler(key string) error { deployment, err := c.deploymentsLister.Deployments(foo.Namespace).Get(deploymentName) // If the resource doesn't exist, we'll create it if errors.IsNotFound(err) { - deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Create(context.TODO(), newDeployment(foo)) + deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Create(context.TODO(), newDeployment(foo), metav1.CreateOptions{}) } // If an error occurs during Get/Create, we'll requeue the item so we can @@ -297,7 +297,7 @@ func (c *Controller) syncHandler(key string) error { // should update the Deployment resource. if foo.Spec.Replicas != nil && *foo.Spec.Replicas != *deployment.Spec.Replicas { klog.V(4).Infof("Foo %s replicas: %d, deployment replicas: %d", name, *foo.Spec.Replicas, *deployment.Spec.Replicas) - deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Update(context.TODO(), newDeployment(foo)) + deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Update(context.TODO(), newDeployment(foo), metav1.UpdateOptions{}) } // If an error occurs during Update, we'll requeue the item so we can @@ -328,7 +328,7 @@ func (c *Controller) updateFooStatus(foo *samplev1alpha1.Foo, deployment *appsv1 // we must use Update instead of UpdateStatus to update the Status block of the Foo resource. // UpdateStatus will not allow changes to the Spec of the resource, // which is ideal for ensuring nothing other than resource status has been updated. - _, err := c.sampleclientset.SamplecontrollerV1alpha1().Foos(foo.Namespace).Update(context.TODO(), fooCopy) + _, err := c.sampleclientset.SamplecontrollerV1alpha1().Foos(foo.Namespace).Update(context.TODO(), fooCopy, metav1.UpdateOptions{}) return err } diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index d200bcaeb94..635517d2a6f 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -139,7 +139,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl "tls.key": certCtx.key, }, } - _, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret) + _, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace) // kubectl create -f clusterrole.yaml @@ -150,7 +150,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl rbacv1helpers.NewRule("get", "list", "watch").Groups("").Resources("namespaces").RuleOrDie(), rbacv1helpers.NewRule("get", "list", "watch").Groups("admissionregistration.k8s.io").Resources("*").RuleOrDie(), }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating cluster role %s", "sample-apiserver-reader") _, err = client.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{ @@ -170,7 +170,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl Namespace: namespace, }, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating cluster role binding %s", "wardler:"+namespace+":sample-apiserver-reader") // kubectl create -f authDelegator.yaml @@ -191,7 +191,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl Namespace: namespace, }, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating cluster role binding %s", "wardler:"+namespace+":auth-delegator") // kubectl create -f deploy.yaml @@ -272,7 +272,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl }, }, } - deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d) + deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace) err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image) framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace) @@ -298,12 +298,12 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl }, }, } - _, err = client.CoreV1().Services(namespace).Create(context.TODO(), service) + _, err = client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating service %s in namespace %s", "sample-apiserver", namespace) // kubectl create -f serviceAccount.yaml sa := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "sample-apiserver"}} - _, err = client.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), sa) + _, err = client.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), sa, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating service account %s in namespace %s", "sample-apiserver", namespace) // kubectl create -f auth-reader.yaml @@ -326,7 +326,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl Namespace: namespace, }, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating role binding %s:sample-apiserver to access configMap", namespace) // Wait for the extension apiserver to be up and healthy @@ -351,7 +351,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl GroupPriorityMinimum: 2000, VersionPriority: 200, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating apiservice %s with namespace %s", "v1alpha1.wardle.example.com", namespace) var ( diff --git a/test/e2e/apimachinery/chunking.go b/test/e2e/apimachinery/chunking.go index 62f07a524fd..34545e59268 100644 --- a/test/e2e/apimachinery/chunking.go +++ b/test/e2e/apimachinery/chunking.go @@ -64,7 +64,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { }, }, }, - }) + }, metav1.CreateOptions{}) if err == nil { return } diff --git a/test/e2e/apimachinery/crd_conversion_webhook.go b/test/e2e/apimachinery/crd_conversion_webhook.go index ad8a4fdf1f7..01fcd3f999f 100644 --- a/test/e2e/apimachinery/crd_conversion_webhook.go +++ b/test/e2e/apimachinery/crd_conversion_webhook.go @@ -236,7 +236,7 @@ func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespa Namespace: namespace, }, }, - }) + }, metav1.CreateOptions{}) if err != nil && apierrors.IsAlreadyExists(err) { framework.Logf("role binding %s already exists", roleBindingCRDName) } else { @@ -260,7 +260,7 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string, }, } namespace := f.Namespace.Name - _, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret) + _, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace) // Create the deployment of the webhook @@ -336,7 +336,7 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string, }, }, } - deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d) + deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace) ginkgo.By("Wait for the deployment to be ready") err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentCRDName, "1", image) @@ -364,7 +364,7 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string, }, }, } - _, err = client.CoreV1().Services(namespace).Create(context.TODO(), service) + _, err = client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating service %s in namespace %s", serviceCRDName, namespace) ginkgo.By("Verifying the service has paired with the endpoint") diff --git a/test/e2e/apimachinery/crd_publish_openapi.go b/test/e2e/apimachinery/crd_publish_openapi.go index 20058a0d02e..7409b2f7abb 100644 --- a/test/e2e/apimachinery/crd_publish_openapi.go +++ b/test/e2e/apimachinery/crd_publish_openapi.go @@ -393,7 +393,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu {"op":"test","path":"/spec/versions/1/name","value":"v3"}, {"op": "replace", "path": "/spec/versions/1/name", "value": "v4"} ]`) - crdMultiVer.Crd, err = crdMultiVer.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crdMultiVer.Crd.Name, types.JSONPatchType, patch) + crdMultiVer.Crd, err = crdMultiVer.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crdMultiVer.Crd.Name, types.JSONPatchType, patch, metav1.PatchOptions{}) if err != nil { framework.Failf("%v", err) } @@ -446,7 +446,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu framework.Failf("%v", err) } crd.Crd.Spec.Versions[1].Served = false - crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), crd.Crd) + crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), crd.Crd, metav1.UpdateOptions{}) if err != nil { framework.Failf("%v", err) } diff --git a/test/e2e/apimachinery/custom_resource_definition.go b/test/e2e/apimachinery/custom_resource_definition.go index b0609cec8ce..d9c3923c56e 100644 --- a/test/e2e/apimachinery/custom_resource_definition.go +++ b/test/e2e/apimachinery/custom_resource_definition.go @@ -170,7 +170,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin framework.Failf("Expected CustomResourceDefinition Spec to match status sub-resource Spec, but got:\n%s", diff.ObjectReflectDiff(status.Spec, crd.Spec)) } status.Status.Conditions = append(status.Status.Conditions, updateCondition) - updated, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().UpdateStatus(context.TODO(), status) + updated, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().UpdateStatus(context.TODO(), status, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err, "updating CustomResourceDefinition status") @@ -179,7 +179,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin patchCondition := v1.CustomResourceDefinitionCondition{Message: "patched"} patched, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crd.GetName(), types.JSONPatchType, - []byte(`[{"op": "add", "path": "/status/conditions", "value": [{"message": "patched"}]}]`), + []byte(`[{"op": "add", "path": "/status/conditions", "value": [{"message": "patched"}]}]`), metav1.PatchOptions{}, "status") framework.ExpectNoError(err, "patching CustomResourceDefinition status") expectCondition(updated.Status.Conditions, updateCondition) @@ -306,7 +306,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin // Setting default for a to "A" and waiting for the CR to get defaulted on read crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crd.Name, types.JSONPatchType, []byte(`[ {"op":"add","path":"/spec/versions/0/schema/openAPIV3Schema/properties/a/default", "value": "A"} - ]`)) + ]`), metav1.PatchOptions{}) framework.ExpectNoError(err, "setting default for a to \"A\" in schema") err = wait.PollImmediate(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) { @@ -346,7 +346,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crd.Name, types.JSONPatchType, []byte(`[ {"op":"remove","path":"/spec/versions/0/schema/openAPIV3Schema/properties/a/default"}, {"op":"add","path":"/spec/versions/0/schema/openAPIV3Schema/properties/b/default", "value": "B"} - ]`)) + ]`), metav1.PatchOptions{}) framework.ExpectNoError(err, "setting default for b to \"B\" and remove default for a") err = wait.PollImmediate(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) { diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index 0822287858f..c8b5543cd5f 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -321,7 +321,7 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabels := getUniqLabel("gctest", "delete_pods") rc := newOwnerRC(f, rcName, 2, uniqLabels) ginkgo.By("create the rc") - rc, err := rcClient.Create(context.TODO(), rc) + rc, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } @@ -379,7 +379,7 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabels := getUniqLabel("gctest", "orphan_pods") rc := newOwnerRC(f, rcName, estimateMaximumPods(clientSet, 10, 100), uniqLabels) ginkgo.By("create the rc") - rc, err := rcClient.Create(context.TODO(), rc) + rc, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } @@ -445,7 +445,7 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabels := getUniqLabel("gctest", "orphan_pods_nil_option") rc := newOwnerRC(f, rcName, 2, uniqLabels) ginkgo.By("create the rc") - rc, err := rcClient.Create(context.TODO(), rc) + rc, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } @@ -493,7 +493,7 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabels := getUniqLabel("gctest", "delete_rs") deployment := newOwnerDeployment(f, deploymentName, uniqLabels) ginkgo.By("create the deployment") - createdDeployment, err := deployClient.Create(context.TODO(), deployment) + createdDeployment, err := deployClient.Create(context.TODO(), deployment, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create deployment: %v", err) } @@ -552,7 +552,7 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabels := getUniqLabel("gctest", "orphan_rs") deployment := newOwnerDeployment(f, deploymentName, uniqLabels) ginkgo.By("create the deployment") - createdDeployment, err := deployClient.Create(context.TODO(), deployment) + createdDeployment, err := deployClient.Create(context.TODO(), deployment, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create deployment: %v", err) } @@ -636,7 +636,7 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabels := getUniqLabel("gctest", "delete_pods_foreground") rc := newOwnerRC(f, rcName, estimateMaximumPods(clientSet, 10, 100), uniqLabels) ginkgo.By("create the rc") - rc, err := rcClient.Create(context.TODO(), rc) + rc, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } @@ -723,7 +723,7 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabelsDeleted := getUniqLabel("gctest_d", "valid_and_pending_owners_d") rc1 := newOwnerRC(f, rc1Name, replicas, uniqLabelsDeleted) ginkgo.By("create the rc1") - rc1, err := rcClient.Create(context.TODO(), rc1) + rc1, err := rcClient.Create(context.TODO(), rc1, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } @@ -731,7 +731,7 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabelsStay := getUniqLabel("gctest_s", "valid_and_pending_owners_s") rc2 := newOwnerRC(f, rc2Name, 0, uniqLabelsStay) ginkgo.By("create the rc2") - rc2, err = rcClient.Create(context.TODO(), rc2) + rc2, err = rcClient.Create(context.TODO(), rc2, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } @@ -754,7 +754,7 @@ var _ = SIGDescribe("Garbage collector", func() { patch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"ReplicationController","name":"%s","uid":"%s"}]}}`, rc2.ObjectMeta.Name, rc2.ObjectMeta.UID) for i := 0; i < halfReplicas; i++ { pod := pods.Items[i] - _, err := podClient.Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, []byte(patch)) + _, err := podClient.Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod.Name, f.Namespace.Name, patch) } @@ -829,30 +829,30 @@ var _ = SIGDescribe("Garbage collector", func() { podClient := clientSet.CoreV1().Pods(f.Namespace.Name) pod1Name := "pod1" pod1 := newGCPod(pod1Name) - pod1, err := podClient.Create(context.TODO(), pod1) + pod1, err := podClient.Create(context.TODO(), pod1, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod1Name, f.Namespace.Name) pod2Name := "pod2" pod2 := newGCPod(pod2Name) - pod2, err = podClient.Create(context.TODO(), pod2) + pod2, err = podClient.Create(context.TODO(), pod2, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod2Name, f.Namespace.Name) pod3Name := "pod3" pod3 := newGCPod(pod3Name) - pod3, err = podClient.Create(context.TODO(), pod3) + pod3, err = podClient.Create(context.TODO(), pod3, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod3Name, f.Namespace.Name) // create circular dependency addRefPatch := func(name string, uid types.UID) []byte { return []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"Pod","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}]}}`, name, uid)) } patch1 := addRefPatch(pod3.Name, pod3.UID) - pod1, err = podClient.Patch(context.TODO(), pod1.Name, types.StrategicMergePatchType, patch1) + pod1, err = podClient.Patch(context.TODO(), pod1.Name, types.StrategicMergePatchType, patch1, metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1) framework.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences) patch2 := addRefPatch(pod1.Name, pod1.UID) - pod2, err = podClient.Patch(context.TODO(), pod2.Name, types.StrategicMergePatchType, patch2) + pod2, err = podClient.Patch(context.TODO(), pod2.Name, types.StrategicMergePatchType, patch2, metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2) framework.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences) patch3 := addRefPatch(pod2.Name, pod2.UID) - pod3, err = podClient.Patch(context.TODO(), pod3.Name, types.StrategicMergePatchType, patch3) + pod3, err = podClient.Patch(context.TODO(), pod3.Name, types.StrategicMergePatchType, patch3, metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3) framework.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences) // delete one pod, should result in the deletion of all pods @@ -1125,7 +1125,7 @@ var _ = SIGDescribe("Garbage collector", func() { ginkgo.By("Create the cronjob") cronJob := newCronJob("simple", "*/1 * * * ?") - cronJob, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Create(context.TODO(), cronJob) + cronJob, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Create(context.TODO(), cronJob, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name) ginkgo.By("Wait for the CronJob to create new Job") diff --git a/test/e2e/apimachinery/generated_clientset.go b/test/e2e/apimachinery/generated_clientset.go index 8b248be0396..3e1d4b69b76 100644 --- a/test/e2e/apimachinery/generated_clientset.go +++ b/test/e2e/apimachinery/generated_clientset.go @@ -126,7 +126,7 @@ var _ = SIGDescribe("Generated clientset", func() { } ginkgo.By("creating the pod") - pod, err = podClient.Create(context.TODO(), pod) + pod, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create pod: %v", err) } @@ -242,7 +242,7 @@ var _ = SIGDescribe("Generated clientset", func() { } ginkgo.By("creating the cronJob") - cronJob, err = cronJobClient.Create(context.TODO(), cronJob) + cronJob, err = cronJobClient.Create(context.TODO(), cronJob, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create cronJob: %v", err) } diff --git a/test/e2e/apimachinery/namespace.go b/test/e2e/apimachinery/namespace.go index e99f4461032..180eb873b07 100644 --- a/test/e2e/apimachinery/namespace.go +++ b/test/e2e/apimachinery/namespace.go @@ -110,7 +110,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { }, }, } - pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, namespace.Name) ginkgo.By("Waiting for the pod to have running status") @@ -170,7 +170,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { }}, }, } - service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(context.TODO(), service) + service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(context.TODO(), service, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create service %s in namespace %s", serviceName, namespace.Name) ginkgo.By("Deleting the namespace") @@ -271,7 +271,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { }, }) framework.ExpectNoError(err, "failed to marshal JSON patch data") - _, err = f.ClientSet.CoreV1().Namespaces().Patch(context.TODO(), namespaceName, types.StrategicMergePatchType, []byte(nspatch)) + _, err = f.ClientSet.CoreV1().Namespaces().Patch(context.TODO(), namespaceName, types.StrategicMergePatchType, []byte(nspatch), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch Namespace") ginkgo.By("get the Namespace and ensuring it has the label") diff --git a/test/e2e/apimachinery/resource_quota.go b/test/e2e/apimachinery/resource_quota.go index 26857f29cfb..a5d0e4cb964 100644 --- a/test/e2e/apimachinery/resource_quota.go +++ b/test/e2e/apimachinery/resource_quota.go @@ -101,7 +101,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Creating a Service") service := newTestServiceForQuota("test-service", v1.ServiceTypeClusterIP) - service, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), service) + service, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), service, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures service creation") @@ -168,7 +168,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Creating a Secret") secret := newTestSecretForQuota("test-secret") - secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret) + secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures secret creation") @@ -225,7 +225,7 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2") limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2") pod := newTestPodForQuota(f, podName, requests, limits) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) podToUpdate := pod @@ -244,7 +244,7 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceCPU] = resource.MustParse("600m") requests[v1.ResourceMemory] = resource.MustParse("100Mi") pod = newTestPodForQuota(f, "fail-pod", requests, v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectError(err) ginkgo.By("Not allowing a pod to be created that exceeds remaining quota(validation on extended resources)") @@ -256,7 +256,7 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2") limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2") pod = newTestPodForQuota(f, "fail-pod-for-extended-resource", requests, limits) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectError(err) ginkgo.By("Ensuring a pod cannot update its resource requirements") @@ -266,7 +266,7 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceMemory] = resource.MustParse("100Mi") requests[v1.ResourceEphemeralStorage] = resource.MustParse("10Gi") podToUpdate.Spec.Containers[0].Resources.Requests = requests - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), podToUpdate) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), podToUpdate, metav1.UpdateOptions{}) framework.ExpectError(err) ginkgo.By("Ensuring attempts to update pod resource requirements did not change quota usage") @@ -332,7 +332,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Creating a ConfigMap") configMap := newTestConfigMapForQuota("test-configmap") - configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap) + configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures configMap creation") @@ -382,7 +382,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Creating a ReplicationController") replicationController := newTestReplicationControllerForQuota("test-rc", "nginx", 0) - replicationController, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), replicationController) + replicationController, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), replicationController, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures replication controller creation") @@ -438,7 +438,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Creating a ReplicaSet") replicaSet := newTestReplicaSetForQuota("test-rs", "nginx", 0) - replicaSet, err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), replicaSet) + replicaSet, err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), replicaSet, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures replicaset creation") @@ -486,7 +486,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Creating a PersistentVolumeClaim") pvc := newTestPersistentVolumeClaimForQuota("test-claim") - pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(context.TODO(), pvc) + pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures persistent volume claim creation") @@ -540,7 +540,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Creating a PersistentVolumeClaim with storage class") pvc := newTestPersistentVolumeClaimForQuota("test-claim") pvc.Spec.StorageClassName = &classGold - pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(context.TODO(), pvc) + pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures persistent volume claim creation") @@ -690,7 +690,7 @@ var _ = SIGDescribe("ResourceQuota", func() { limits[v1.ResourceCPU] = resource.MustParse("1") limits[v1.ResourceMemory] = resource.MustParse("400Mi") pod := newTestPodForQuota(f, podName, requests, limits) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with not terminating scope captures the pod usage") @@ -729,7 +729,7 @@ var _ = SIGDescribe("ResourceQuota", func() { pod = newTestPodForQuota(f, podName, requests, limits) activeDeadlineSeconds := int64(3600) pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with terminating scope captures the pod usage") @@ -794,7 +794,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Creating a best-effort pod") pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with best effort scope captures the pod usage") @@ -824,7 +824,7 @@ var _ = SIGDescribe("ResourceQuota", func() { limits[v1.ResourceCPU] = resource.MustParse("1") limits[v1.ResourceMemory] = resource.MustParse("400Mi") pod = newTestPodForQuota(f, "burstable-pod", requests, limits) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with not best effort scope captures the pod usage") @@ -880,7 +880,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Updating a ResourceQuota") resourceQuota.Spec.Hard[v1.ResourceCPU] = resource.MustParse("2") resourceQuota.Spec.Hard[v1.ResourceMemory] = resource.MustParse("1Gi") - resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Update(context.TODO(), resourceQuota) + resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Update(context.TODO(), resourceQuota, metav1.UpdateOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceCPU], resource.MustParse("2")) framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceMemory], resource.MustParse("1Gi")) @@ -924,7 +924,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { ginkgo.By("Creating a best-effort pod") pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with best effort scope captures the pod usage") @@ -954,7 +954,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { limits[v1.ResourceCPU] = resource.MustParse("1") limits[v1.ResourceMemory] = resource.MustParse("400Mi") pod = newTestPodForQuota(f, "burstable-pod", requests, limits) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with not best effort scope captures the pod usage") @@ -1006,7 +1006,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { limits[v1.ResourceCPU] = resource.MustParse("1") limits[v1.ResourceMemory] = resource.MustParse("400Mi") pod := newTestPodForQuota(f, podName, requests, limits) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with not terminating scope captures the pod usage") @@ -1045,7 +1045,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { pod = newTestPodForQuota(f, podName, requests, limits) activeDeadlineSeconds := int64(3600) pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with terminating scope captures the pod usage") @@ -1086,7 +1086,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)}, metav1.CreateOptions{}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} @@ -1105,7 +1105,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating a pod with priority class") podName := "testpod-pclass1" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass1") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage") @@ -1125,7 +1125,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.", func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)}, metav1.CreateOptions{}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} @@ -1144,7 +1144,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating first pod with priority class should pass") podName := "testpod-pclass2-1" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass2") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage") @@ -1155,7 +1155,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating 2nd pod with priority class should fail") podName2 := "testpod-pclass2-2" pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass2") - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2, metav1.CreateOptions{}) framework.ExpectError(err) ginkgo.By("Deleting first pod") @@ -1170,7 +1170,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.", func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)}, metav1.CreateOptions{}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} @@ -1189,7 +1189,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating a pod with priority class with pclass3") podName := "testpod-pclass3-1" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass3") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope remains same") @@ -1200,7 +1200,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating a 2nd pod with priority class pclass3") podName2 := "testpod-pclass2-2" pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass3") - pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2) + pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope remains same") @@ -1216,10 +1216,10 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { }) ginkgo.It("should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.", func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)}, metav1.CreateOptions{}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) - _, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass6"}, Value: int32(1000)}) + _, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass6"}, Value: int32(1000)}, metav1.CreateOptions{}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} @@ -1238,7 +1238,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating a pod with priority class pclass5") podName := "testpod-pclass5" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass5") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class is updated with the pod usage") @@ -1249,7 +1249,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating 2nd pod with priority class pclass6") podName2 := "testpod-pclass6" pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass6") - pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2) + pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope is updated with the pod usage") @@ -1271,7 +1271,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).", func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)}, metav1.CreateOptions{}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} @@ -1290,7 +1290,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating a pod with priority class pclass7") podName := "testpod-pclass7" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass7") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class is not used") @@ -1305,7 +1305,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).", func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)}, metav1.CreateOptions{}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} @@ -1324,7 +1324,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating a pod with priority class pclass8") podName := "testpod-pclass8" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass8") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class is updated with the pod usage") @@ -1344,7 +1344,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.", func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)}, metav1.CreateOptions{}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} @@ -1378,7 +1378,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { limit[v1.ResourceMemory] = resource.MustParse("2Gi") pod := newTestPodForQuotaWithPriority(f, podName, request, limit, "pclass9") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage") @@ -1657,7 +1657,7 @@ func newTestSecretForQuota(name string) *v1.Secret { // createResourceQuota in the specified namespace func createResourceQuota(c clientset.Interface, namespace string, resourceQuota *v1.ResourceQuota) (*v1.ResourceQuota, error) { - return c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), resourceQuota) + return c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), resourceQuota, metav1.CreateOptions{}) } // deleteResourceQuota with the specified name @@ -1723,7 +1723,7 @@ func updateResourceQuotaUntilUsageAppears(c clientset.Interface, ns, quotaName s current := resourceQuota.Spec.Hard[resourceName] current.Add(resource.MustParse("1")) resourceQuota.Spec.Hard[resourceName] = current - _, err = c.CoreV1().ResourceQuotas(ns).Update(context.TODO(), resourceQuota) + _, err = c.CoreV1().ResourceQuotas(ns).Update(context.TODO(), resourceQuota, metav1.UpdateOptions{}) // ignoring conflicts since someone else may already updated it. if apierrors.IsConflict(err) { return false, nil diff --git a/test/e2e/apimachinery/table_conversion.go b/test/e2e/apimachinery/table_conversion.go index 0ac564ef60c..8e1bb6ee9eb 100644 --- a/test/e2e/apimachinery/table_conversion.go +++ b/test/e2e/apimachinery/table_conversion.go @@ -55,7 +55,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { podName := "pod-1" framework.Logf("Creating pod %s", podName) - _, err := c.CoreV1().Pods(ns).Create(context.TODO(), newTablePod(podName)) + _, err := c.CoreV1().Pods(ns).Create(context.TODO(), newTablePod(podName), metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, ns) table := &metav1beta1.Table{} @@ -94,7 +94,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { }, }, }, - }) + }, metav1.CreateOptions{}) if err == nil { return } diff --git a/test/e2e/apimachinery/watch.go b/test/e2e/apimachinery/watch.go index b51c8d34cd1..569347324ea 100644 --- a/test/e2e/apimachinery/watch.go +++ b/test/e2e/apimachinery/watch.go @@ -85,7 +85,7 @@ var _ = SIGDescribe("Watchers", func() { } ginkgo.By("creating a configmap with label A and ensuring the correct watchers observe the notification") - testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMapA) + testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMapA, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create a configmap with label %s in namespace: %s", multipleWatchersLabelValueA, ns) expectEvent(watchA, watch.Added, testConfigMapA) expectEvent(watchAB, watch.Added, testConfigMapA) @@ -117,7 +117,7 @@ var _ = SIGDescribe("Watchers", func() { expectNoEvent(watchB, watch.Deleted, nil) ginkgo.By("creating a configmap with label B and ensuring the correct watchers observe the notification") - testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMapB) + testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMapB, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMapB, ns) expectEvent(watchB, watch.Added, testConfigMapB) expectEvent(watchAB, watch.Added, testConfigMapB) @@ -150,7 +150,7 @@ var _ = SIGDescribe("Watchers", func() { } ginkgo.By("creating a new configmap") - testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap) + testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMap.GetName(), ns) ginkgo.By("modifying the configmap once") @@ -203,7 +203,7 @@ var _ = SIGDescribe("Watchers", func() { framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", watchRestartedLabelValue) ginkgo.By("creating a new configmap") - testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap) + testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns) ginkgo.By("modifying the configmap once") @@ -268,7 +268,7 @@ var _ = SIGDescribe("Watchers", func() { framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", toBeChangedLabelValue) ginkgo.By("creating a new configmap") - testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap) + testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns) ginkgo.By("modifying the configmap once") @@ -471,14 +471,14 @@ func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWa switch op { case createEvent: cm.Name = name(i) - _, err := c.CoreV1().ConfigMaps(ns).Create(context.TODO(), cm) + _, err := c.CoreV1().ConfigMaps(ns).Create(context.TODO(), cm, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create configmap %s in namespace %s", cm.Name, ns) existing = append(existing, i) i++ case updateEvent: idx := rand.Intn(len(existing)) cm.Name = name(existing[idx]) - _, err := c.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm) + _, err := c.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update configmap %s in namespace %s", cm.Name, ns) case deleteEvent: idx := rand.Intn(len(existing)) diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index 8292ec87e27..8dd47aa6652 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -434,7 +434,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Creating a configMap that does not comply to the validation webhook rules") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) if err == nil { err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) framework.ExpectNoError(err, "Deleting successfully created configMap") @@ -451,7 +451,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { h, err := admissionClient.ValidatingWebhookConfigurations().Get(context.TODO(), f.UniqueName, metav1.GetOptions{}) framework.ExpectNoError(err, "Getting validating webhook configuration") h.Webhooks[0].Rules[0].Operations = []admissionregistrationv1.OperationType{admissionregistrationv1.Update} - _, err = admissionClient.ValidatingWebhookConfigurations().Update(context.TODO(), h) + _, err = admissionClient.ValidatingWebhookConfigurations().Update(context.TODO(), h, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err, "Updating validating webhook configuration") @@ -459,7 +459,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Creating a configMap that does not comply to the validation webhook rules") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) if err != nil { if !strings.Contains(err.Error(), "denied") { return false, err @@ -475,13 +475,13 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Patching a validating webhook configuration's rules to include the create operation") hook, err = admissionClient.ValidatingWebhookConfigurations().Patch(context.TODO(), f.UniqueName, types.JSONPatchType, - []byte(`[{"op": "replace", "path": "/webhooks/0/rules/0/operations", "value": ["CREATE"]}]`)) + []byte(`[{"op": "replace", "path": "/webhooks/0/rules/0/operations", "value": ["CREATE"]}]`), metav1.PatchOptions{}) framework.ExpectNoError(err, "Patching validating webhook configuration") ginkgo.By("Creating a configMap that does not comply to the validation webhook rules") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) if err == nil { err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) framework.ExpectNoError(err, "Deleting successfully created configMap") @@ -530,13 +530,13 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { framework.ExpectNoError(err, "Getting mutating webhook configuration") ginkgo.By("Updating a mutating webhook configuration's rules to not include the create operation") hook.Webhooks[0].Rules[0].Operations = []admissionregistrationv1.OperationType{admissionregistrationv1.Update} - hook, err = admissionClient.MutatingWebhookConfigurations().Update(context.TODO(), hook) + hook, err = admissionClient.MutatingWebhookConfigurations().Update(context.TODO(), hook, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Updating mutating webhook configuration") ginkgo.By("Creating a configMap that should not be mutated") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedToBeMutatedConfigMap(string(uuid.NewUUID()), f) - created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) + created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) if err != nil { return false, err } @@ -550,13 +550,13 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Patching a mutating webhook configuration's rules to include the create operation") hook, err = admissionClient.MutatingWebhookConfigurations().Patch(context.TODO(), f.UniqueName, types.JSONPatchType, - []byte(`[{"op": "replace", "path": "/webhooks/0/rules/0/operations", "value": ["CREATE"]}]`)) + []byte(`[{"op": "replace", "path": "/webhooks/0/rules/0/operations", "value": ["CREATE"]}]`), metav1.PatchOptions{}) framework.ExpectNoError(err, "Patching mutating webhook configuration") ginkgo.By("Creating a configMap that should be mutated") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedToBeMutatedConfigMap(string(uuid.NewUUID()), f) - created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) + created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) if err != nil { return false, err } @@ -608,7 +608,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Creating a configMap that does not comply to the validation webhook rules") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) if err == nil { err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) framework.ExpectNoError(err, "Deleting successfully created configMap") @@ -628,7 +628,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Creating a configMap that does not comply to the validation webhook rules") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) if err != nil { if !strings.Contains(err.Error(), "denied") { return false, err @@ -682,7 +682,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Creating a configMap that should be mutated") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedToBeMutatedConfigMap(string(uuid.NewUUID()), f) - created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) + created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) if err != nil { return false, err } @@ -700,7 +700,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Creating a configMap that should not be mutated") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedToBeMutatedConfigMap(string(uuid.NewUUID()), f) - created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) + created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) if err != nil { return false, err } @@ -737,7 +737,7 @@ func createAuthReaderRoleBinding(f *framework.Framework, namespace string) { Namespace: namespace, }, }, - }) + }, metav1.CreateOptions{}) if err != nil && apierrors.IsAlreadyExists(err) { framework.Logf("role binding %s already exists", roleBindingName) } else { @@ -761,7 +761,7 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert }, } namespace := f.Namespace.Name - _, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret) + _, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace) // Create the deployment of the webhook @@ -837,7 +837,7 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert }, }, } - deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d) + deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace) ginkgo.By("Wait for the deployment to be ready") err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image) @@ -865,7 +865,7 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert }, }, } - _, err = client.CoreV1().Services(namespace).Create(context.TODO(), service) + _, err = client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating service %s in namespace %s", serviceName, namespace) ginkgo.By("Verifying the service has paired with the endpoint") @@ -995,7 +995,7 @@ func testMutatingConfigMapWebhook(f *framework.Framework) { ginkgo.By("create a configmap that should be updated by the webhook") client := f.ClientSet configMap := toBeMutatedConfigMap(f) - mutatedConfigMap, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap) + mutatedConfigMap, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}) framework.ExpectNoError(err) expectedConfigMapData := map[string]string{ "mutation-start": "yes", @@ -1063,7 +1063,7 @@ func testMutatingPodWebhook(f *framework.Framework) { ginkgo.By("create a pod that should be updated by the webhook") client := f.ClientSet pod := toBeMutatedPod(f) - mutatedPod, err := client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + mutatedPod, err := client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) gomega.Expect(err).To(gomega.BeNil()) if len(mutatedPod.Spec.InitContainers) != 1 { framework.Failf("expect pod to have 1 init container, got %#v", mutatedPod.Spec.InitContainers) @@ -1097,7 +1097,7 @@ func testWebhook(f *framework.Framework) { client := f.ClientSet // Creating the pod, the request should be rejected pod := nonCompliantPod(f) - _, err := client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + _, err := client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectError(err, "create pod %s in namespace %s should have been denied by webhook", pod.Name, f.Namespace.Name) expectedErrMsg1 := "the pod contains unwanted container name" if !strings.Contains(err.Error(), expectedErrMsg1) { @@ -1112,7 +1112,7 @@ func testWebhook(f *framework.Framework) { client = f.ClientSet // Creating the pod, the request should be rejected pod = hangingPod(f) - _, err = client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + _, err = client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectError(err, "create pod %s in namespace %s should have caused webhook to hang", pod.Name, f.Namespace.Name) // ensure the error is webhook-related, not client-side if !strings.Contains(err.Error(), "webhook") { @@ -1130,7 +1130,7 @@ func testWebhook(f *framework.Framework) { ginkgo.By("create a configmap that should be denied by the webhook") // Creating the configmap, the request should be rejected configmap := nonCompliantConfigMap(f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configmap) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configmap, metav1.CreateOptions{}) framework.ExpectError(err, "create configmap %s in namespace %s should have been denied by the webhook", configmap.Name, f.Namespace.Name) expectedErrMsg := "the configmap contains unwanted key and value" if !strings.Contains(err.Error(), expectedErrMsg) { @@ -1147,7 +1147,7 @@ func testWebhook(f *framework.Framework) { "admit": "this", }, } - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configmap) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configmap, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configmap.Name, f.Namespace.Name) ginkgo.By("update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook") @@ -1165,7 +1165,7 @@ func testWebhook(f *framework.Framework) { ginkgo.By("update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook") patch := nonCompliantConfigMapPatch() - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Patch(context.TODO(), allowedConfigMapName, types.StrategicMergePatchType, []byte(patch)) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Patch(context.TODO(), allowedConfigMapName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) framework.ExpectError(err, "update admitted configmap %s in namespace %s by strategic merge patch to a non-compliant one should be rejected by webhook. Patch: %+v", allowedConfigMapName, f.Namespace.Name, patch) if !strings.Contains(err.Error(), expectedErrMsg) { framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error()) @@ -1185,7 +1185,7 @@ func testWebhook(f *framework.Framework) { ginkgo.By("create a configmap that violates the webhook policy but is in a whitelisted namespace") configmap = nonCompliantConfigMap(f) - _, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(context.TODO(), configmap) + _, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(context.TODO(), configmap, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configmap.Name, skippedNamespaceName) } @@ -1193,7 +1193,7 @@ func testAttachingPodWebhook(f *framework.Framework) { ginkgo.By("create a pod") client := f.ClientSet pod := toBeAttachedPod(f) - _, err := client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + _, err := client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod.Name, f.Namespace.Name) err = e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, f.Namespace.Name) framework.ExpectNoError(err, "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, f.Namespace.Name) @@ -1297,7 +1297,7 @@ func testFailClosedWebhook(f *framework.Framework) { Name: "foo", }, } - _, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(context.TODO(), configmap) + _, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(context.TODO(), configmap, metav1.CreateOptions{}) framework.ExpectError(err, "create configmap in namespace: %s should be unconditionally rejected by the webhook", failNamespaceName) if !apierrors.IsInternalError(err) { framework.Failf("expect an internal error, got %#v", err) @@ -1551,7 +1551,7 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework, configName str func createNamespace(f *framework.Framework, ns *v1.Namespace) error { return wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { - _, err := f.ClientSet.CoreV1().Namespaces().Create(context.TODO(), ns) + _, err := f.ClientSet.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) if err != nil { if strings.HasPrefix(err.Error(), "object is being deleted:") { return false, nil @@ -1660,7 +1660,7 @@ func updateConfigMap(c clientset.Interface, ns, name string, update updateConfig return false, err } update(cm) - if cm, err = c.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm); err == nil { + if cm, err = c.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm, metav1.UpdateOptions{}); err == nil { return true, nil } // Only retry update on conflict @@ -1987,7 +1987,7 @@ func testMultiVersionCustomResourceWebhook(f *framework.Framework, testcrd *crd. ] } }` - _, err = testcrd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), testcrd.Crd.Name, types.StrategicMergePatchType, []byte(apiVersionWithV2StoragePatch)) + _, err = testcrd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), testcrd.Crd.Name, types.StrategicMergePatchType, []byte(apiVersionWithV2StoragePatch), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch custom resource definition %s in namespace: %s", testcrd.Crd.Name, f.Namespace.Name) ginkgo.By("Patching the custom resource while v2 is storage version") @@ -2116,7 +2116,7 @@ func testCRDDenyWebhook(f *framework.Framework) { } // create CRD - _, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd) + _, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}) framework.ExpectError(err, "create custom resource definition %s should be denied by webhook", crd.Name) expectedErrMsg := "the crd contains unwanted label" if !strings.Contains(err.Error(), expectedErrMsg) { @@ -2134,7 +2134,7 @@ func labelNamespace(f *framework.Framework, namespace string) { ns.Labels = map[string]string{} } ns.Labels[f.UniqueName] = "true" - _, err = client.CoreV1().Namespaces().Update(context.TODO(), ns) + _, err = client.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{}) framework.ExpectNoError(err, "error labeling namespace %s", namespace) } @@ -2196,7 +2196,7 @@ func testSlowWebhookTimeoutFailEarly(f *framework.Framework) { ginkgo.By("Request fails when timeout (1s) is shorter than slow webhook latency (5s)") client := f.ClientSet name := "e2e-test-slow-webhook-configmap" - _, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}}) + _, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}}, metav1.CreateOptions{}) framework.ExpectError(err, "create configmap in namespace %s should have timed-out reaching slow webhook", f.Namespace.Name) // http timeout message: context deadline exceeded // dial timeout message: dial tcp {address}: i/o timeout @@ -2210,7 +2210,7 @@ func testSlowWebhookTimeoutFailEarly(f *framework.Framework) { func testSlowWebhookTimeoutNoError(f *framework.Framework) { client := f.ClientSet name := "e2e-test-slow-webhook-configmap" - _, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}}) + _, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}}, metav1.CreateOptions{}) gomega.Expect(err).To(gomega.BeNil()) err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), name, &metav1.DeleteOptions{}) gomega.Expect(err).To(gomega.BeNil()) @@ -2271,7 +2271,7 @@ func createValidatingWebhookConfiguration(f *framework.Framework, config *admiss } framework.Failf(`webhook %s in config %s has no namespace or object selector with %s="true", and can interfere with other tests`, webhook.Name, config.Name, f.UniqueName) } - return f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(context.TODO(), config) + return f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(context.TODO(), config, metav1.CreateOptions{}) } // createMutatingWebhookConfiguration ensures the webhook config scopes object or namespace selection @@ -2286,7 +2286,7 @@ func createMutatingWebhookConfiguration(f *framework.Framework, config *admissio } framework.Failf(`webhook %s in config %s has no namespace or object selector with %s="true", and can interfere with other tests`, webhook.Name, config.Name, f.UniqueName) } - return f.ClientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(context.TODO(), config) + return f.ClientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(context.TODO(), config, metav1.CreateOptions{}) } func newDenyPodWebhookFixture(f *framework.Framework, certCtx *certContext, servicePort int32) admissionregistrationv1.ValidatingWebhook { @@ -2394,7 +2394,7 @@ func createWebhookConfigurationReadyNamespace(f *framework.Framework) { Name: f.Namespace.Name + "-markers", Labels: map[string]string{f.UniqueName + "-markers": "true"}, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating namespace for webhook configuration ready markers") f.AddNamespacesToDelete(ns) } @@ -2413,7 +2413,7 @@ func waitWebhookConfigurationReady(f *framework.Framework) error { }, }, } - _, err := cmClient.Create(context.TODO(), marker) + _, err := cmClient.Create(context.TODO(), marker, metav1.CreateOptions{}) if err != nil { // The always-deny webhook does not provide a reason, so check for the error string we expect if strings.Contains(err.Error(), "denied") { diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index 4877984fc0e..3786a98b717 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -355,7 +355,7 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batchv1beta1.Concur } func createCronJob(c clientset.Interface, ns string, cronJob *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) { - return c.BatchV1beta1().CronJobs(ns).Create(context.TODO(), cronJob) + return c.BatchV1beta1().CronJobs(ns).Create(context.TODO(), cronJob, metav1.CreateOptions{}) } func getCronJob(c clientset.Interface, ns, name string) (*batchv1beta1.CronJob, error) { diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index 4fcbae9d12b..ed345e0950f 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -76,7 +76,7 @@ func updateDaemonSetWithRetries(c clientset.Interface, namespace, name string, a } // Apply the update, then attempt to push it to the apiserver. applyUpdate(ds) - if ds, err = daemonsets.Update(context.TODO(), ds); err == nil { + if ds, err = daemonsets.Update(context.TODO(), ds, metav1.UpdateOptions{}); err == nil { framework.Logf("Updating DaemonSet %s", name) return true, nil } @@ -154,7 +154,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { label := map[string]string{daemonsetNameLabel: dsName} ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName)) - ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSet(dsName, image, label)) + ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSet(dsName, image, label), metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") @@ -183,7 +183,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.Logf("Creating daemon %q with a node selector", dsName) ds := newDaemonSet(dsName, image, complexLabel) ds.Spec.Template.Spec.NodeSelector = nodeSelector - ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds) + ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Initially, daemon pods should not be running on any nodes.") @@ -212,7 +212,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate") patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`, daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel]) - ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch)) + ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) framework.ExpectNoError(err, "error patching daemon set") daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels) framework.ExpectEqual(len(daemonSetLabels), 1) @@ -246,7 +246,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { }, }, } - ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds) + ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Initially, daemon pods should not be running on any nodes.") @@ -280,7 +280,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { label := map[string]string{daemonsetNameLabel: dsName} ginkgo.By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName)) - ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSet(dsName, image, label)) + ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSet(dsName, image, label), metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") @@ -294,7 +294,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { pod := podList.Items[0] pod.ResourceVersion = "" pod.Status.Phase = v1.PodFailed - _, err = c.CoreV1().Pods(ns).UpdateStatus(context.TODO(), &pod) + _, err = c.CoreV1().Pods(ns).UpdateStatus(context.TODO(), &pod, metav1.UpdateOptions{}) framework.ExpectNoError(err, "error failing a daemon pod") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to revive") @@ -312,7 +312,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.Logf("Creating simple daemon set %s", dsName) ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.OnDeleteDaemonSetStrategyType} - ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds) + ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") @@ -330,7 +330,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Update daemon pods image.") patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, AgnhostImage) - ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch)) + ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods images aren't updated.") @@ -361,7 +361,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.Logf("Creating simple daemon set %s", dsName) ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType} - ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds) + ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") @@ -379,7 +379,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Update daemon pods image.") patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, AgnhostImage) - ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch)) + ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) framework.ExpectNoError(err) // Time to complete the rolling upgrade is proportional to the number of nodes in the cluster. @@ -420,7 +420,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { label := map[string]string{daemonsetNameLabel: dsName} ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType} - ds, err = c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds) + ds, err = c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.Logf("Check that daemon pods launch on every node of the cluster") @@ -569,7 +569,7 @@ func updateNamespaceAnnotations(c clientset.Interface, nsName string) (*v1.Names ns.Annotations[n] = "" } - return nsClient.Update(context.TODO(), ns) + return nsClient.Update(context.TODO(), ns, metav1.UpdateOptions{}) } func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) { @@ -592,7 +592,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s for k, v := range labels { node.Labels[k] = v } - newNode, err = nodeClient.Update(context.TODO(), node) + newNode, err = nodeClient.Update(context.TODO(), node, metav1.UpdateOptions{}) if err == nil { newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels) return true, err diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index a6c5481c443..aae984c3e74 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -234,7 +234,7 @@ func testDeleteDeployment(f *framework.Framework) { framework.Logf("Creating simple deployment %s", deploymentName) d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} - deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d) + deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) // Wait for it to be updated to revision 1 @@ -270,7 +270,7 @@ func testRollingUpdateDeployment(f *framework.Framework) { rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) rs.Annotations = annotations framework.Logf("Creating replica set %q (going to be adopted)", rs.Name) - _, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs) + _, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{}) framework.ExpectNoError(err) // Verify that the required pods have come up. err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) @@ -280,7 +280,7 @@ func testRollingUpdateDeployment(f *framework.Framework) { deploymentName := "test-rolling-update-deployment" framework.Logf("Creating deployment %q", deploymentName) d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) - deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d) + deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) // Wait for it to be updated to revision 3546343826724305833. @@ -309,7 +309,7 @@ func testRecreateDeployment(f *framework.Framework) { deploymentName := "test-recreate-deployment" framework.Logf("Creating deployment %q", deploymentName) d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, AgnhostImageName, AgnhostImage, appsv1.RecreateDeploymentStrategyType) - deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d) + deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) // Wait for it to be updated to revision 1 @@ -347,7 +347,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { rsName := "test-cleanup-controller" replicas := int32(1) revisionHistoryLimit := utilpointer.Int32Ptr(0) - _, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)) + _, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{}) framework.ExpectNoError(err) // Verify that the required pods have come up. @@ -396,7 +396,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { }() d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) d.Spec.RevisionHistoryLimit = revisionHistoryLimit - _, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d) + _, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName)) @@ -418,7 +418,7 @@ func testRolloverDeployment(f *framework.Framework) { rsName := "test-rollover-controller" rsReplicas := int32(1) - _, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage, nil)) + _, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{}) framework.ExpectNoError(err) // Verify that the required pods have come up. err = e2epod.VerifyPodsRunning(c, ns, podName, false, rsReplicas) @@ -442,7 +442,7 @@ func testRolloverDeployment(f *framework.Framework) { MaxSurge: intOrStrP(1), } newDeployment.Spec.MinReadySeconds = int32(10) - _, err = c.AppsV1().Deployments(ns).Create(context.TODO(), newDeployment) + _, err = c.AppsV1().Deployments(ns).Create(context.TODO(), newDeployment, metav1.CreateOptions{}) framework.ExpectNoError(err) // Verify that the pods were scaled up and down as expected. @@ -532,7 +532,7 @@ func testIterativeDeployments(f *framework.Framework) { d.Spec.RevisionHistoryLimit = &two d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero framework.Logf("Creating deployment %q", deploymentName) - deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d) + deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) iterations := 20 @@ -646,7 +646,7 @@ func testDeploymentsControllerRef(f *framework.Framework) { podLabels := map[string]string{"name": WebserverImageName} replicas := int32(1) d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) - deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d) + deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2edeploy.WaitForDeploymentComplete(c, deploy) framework.ExpectNoError(err) @@ -673,7 +673,7 @@ func testDeploymentsControllerRef(f *framework.Framework) { deploymentName = "test-adopt-deployment" framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName) d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) - deploy, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d) + deploy, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2edeploy.WaitForDeploymentComplete(c, deploy) framework.ExpectNoError(err) @@ -708,7 +708,7 @@ func testProportionalScalingDeployment(f *framework.Framework) { d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2) framework.Logf("Creating deployment %q", deploymentName) - deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d) + deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.Logf("Waiting for observed generation %d", deployment.Generation) @@ -890,7 +890,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew MaxSurge: intOrStrP(1), MaxUnavailable: intOrStrP(0), } - deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d) + deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2edeploy.WaitForDeploymentComplete(c, deployment) framework.ExpectNoError(err) diff --git a/test/e2e/apps/disruption.go b/test/e2e/apps/disruption.go index f35ce6a12f5..a8cbbf8daeb 100644 --- a/test/e2e/apps/disruption.go +++ b/test/e2e/apps/disruption.go @@ -241,7 +241,7 @@ func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable MinAvailable: &minAvailable, }, } - _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(context.TODO(), &pdb) + _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(context.TODO(), &pdb, metav1.CreateOptions{}) framework.ExpectNoError(err, "Waiting for the pdb to be created with minAvailable %d in namespace %s", minAvailable.IntVal, ns) waitForPdbToBeProcessed(cs, ns) } @@ -257,7 +257,7 @@ func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavail MaxUnavailable: &maxUnavailable, }, } - _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(context.TODO(), &pdb) + _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(context.TODO(), &pdb, metav1.CreateOptions{}) framework.ExpectNoError(err, "Waiting for the pdb to be created with maxUnavailable %d in namespace %s", maxUnavailable.IntVal, ns) waitForPdbToBeProcessed(cs, ns) } @@ -269,7 +269,7 @@ func updatePDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable return err } old.Spec.MinAvailable = &minAvailable - if _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Update(context.TODO(), old); err != nil { + if _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Update(context.TODO(), old, metav1.UpdateOptions{}); err != nil { return err } return nil @@ -298,7 +298,7 @@ func createPodsOrDie(cs kubernetes.Interface, ns string, n int) { }, } - _, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod) + _, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Creating pod %q in namespace %q", pod.Name, ns) } } @@ -365,7 +365,7 @@ func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclu }, } - _, err := cs.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs) + _, err := cs.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{}) framework.ExpectNoError(err, "Creating replica set %q in namespace %q", rs.Name, ns) } diff --git a/test/e2e/apps/network_partition.go b/test/e2e/apps/network_partition.go index 5440de4e8e9..8672e63a93d 100644 --- a/test/e2e/apps/network_partition.go +++ b/test/e2e/apps/network_partition.go @@ -101,7 +101,7 @@ func podOnNode(podName, nodeName string, image string) *v1.Pod { } func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) error { - pod, err := c.CoreV1().Pods(namespace).Create(context.TODO(), podOnNode(podName, nodeName, framework.ServeHostnameImage)) + pod, err := c.CoreV1().Pods(namespace).Create(context.TODO(), podOnNode(podName, nodeName, framework.ServeHostnameImage), metav1.CreateOptions{}) if err == nil { framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName) } else { @@ -368,7 +368,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { e2eskipper.SkipUnlessProviderIs("gke") ginkgo.By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name) headlessService := e2eservice.CreateServiceSpec(headlessSvcName, "", true, labels) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{}) framework.ExpectNoError(err) c = f.ClientSet ns = f.Namespace.Name @@ -386,7 +386,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { petMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}} podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}} ps := e2esset.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels) - _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ps) + _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ps, metav1.CreateOptions{}) framework.ExpectNoError(err) nn, err := e2enode.TotalRegistered(f.ClientSet) @@ -403,7 +403,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { e2eskipper.SkipUnlessSSHKeyPresent() ps := e2esset.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels) - _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ps) + _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ps, metav1.CreateOptions{}) framework.ExpectNoError(err) e2esset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 44d3f8cbcbd..c5b3246ed7b 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -127,7 +127,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri ginkgo.By(fmt.Sprintf("Creating replication controller %s", name)) newRC := newRC(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"}) newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}} - _, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), newRC) + _, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), newRC, metav1.CreateOptions{}) framework.ExpectNoError(err) // Check that pods for the new RC were created. @@ -184,7 +184,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name) quota := newPodQuota(name, "2") - _, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), quota) + _, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), quota, metav1.CreateOptions{}) framework.ExpectNoError(err) err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { @@ -203,7 +203,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { ginkgo.By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name)) rc := newRC(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil) - rc, err = c.CoreV1().ReplicationControllers(namespace).Create(context.TODO(), rc) + rc, err = c.CoreV1().ReplicationControllers(namespace).Create(context.TODO(), rc, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name)) @@ -282,7 +282,7 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) { replicas := int32(1) rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil) rcSt.Spec.Selector = map[string]string{"name": name} - rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rcSt) + rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rcSt, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Then the orphan pod is adopted") @@ -311,7 +311,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) { replicas := int32(1) rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil) rcSt.Spec.Selector = map[string]string{"name": name} - rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rcSt) + rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rcSt, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("When the matched label of one of its pods change") @@ -324,7 +324,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) { framework.ExpectNoError(err) pod.Labels = map[string]string{"name": "not-matching-name"} - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), pod, metav1.UpdateOptions{}) if err != nil && apierrors.IsConflict(err) { return false, nil } @@ -367,7 +367,7 @@ func updateReplicationControllerWithRetries(c clientset.Interface, namespace, na } // Apply the update, then attempt to push it to the apiserver. applyUpdate(rc) - if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(context.TODO(), rc); err == nil { + if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(context.TODO(), rc, metav1.UpdateOptions{}); err == nil { framework.Logf("Updating replication controller %q", name) return true, nil } diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index 5170e00d79f..de56a574ff0 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -128,7 +128,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s framework.Logf("Creating ReplicaSet %s", name) newRS := newRS(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"}) newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}} - _, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), newRS) + _, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), newRS, metav1.CreateOptions{}) framework.ExpectNoError(err) // Check that pods for the new RS were created. @@ -185,7 +185,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) { ginkgo.By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name)) quota := newPodQuota(name, "2") - _, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), quota) + _, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), quota, metav1.CreateOptions{}) framework.ExpectNoError(err) err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { @@ -204,7 +204,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) { ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name)) rs := newRS(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil) - rs, err = c.AppsV1().ReplicaSets(namespace).Create(context.TODO(), rs) + rs, err = c.AppsV1().ReplicaSets(namespace).Create(context.TODO(), rs, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name)) @@ -284,7 +284,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { replicas := int32(1) rsSt := newRS(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil) rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}} - rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), rsSt) + rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), rsSt, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Then the orphan pod is adopted") @@ -316,7 +316,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { framework.ExpectNoError(err) pod.Labels = map[string]string{"name": "not-matching-name"} - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), pod, metav1.UpdateOptions{}) if err != nil && apierrors.IsConflict(err) { return false, nil } diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index f10bfdbde8b..4fb21042804 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -103,7 +103,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns) headlessService := e2eservice.CreateServiceSpec(headlessSvcName, "", true, labels) - _, err := c.CoreV1().Services(ns).Create(context.TODO(), headlessService) + _, err := c.CoreV1().Services(ns).Create(context.TODO(), headlessService, metav1.CreateOptions{}) framework.ExpectNoError(err) }) @@ -123,7 +123,7 @@ var _ = SIGDescribe("StatefulSet", func() { *(ss.Spec.Replicas) = 3 e2esset.PauseNewPods(ss) - _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) + _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Saturating stateful set " + ss.Name) @@ -165,7 +165,7 @@ var _ = SIGDescribe("StatefulSet", func() { // Replace ss with the one returned from Create() so it has the UID. // Save Kind since it won't be populated in the returned ss. kind := ss.Kind - ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) + ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) ss.Kind = kind @@ -247,7 +247,7 @@ var _ = SIGDescribe("StatefulSet", func() { *(ss.Spec.Replicas) = 2 e2esset.PauseNewPods(ss) - _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) + _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) e2esset.WaitForRunning(c, 1, 0, ss) @@ -314,7 +314,7 @@ var _ = SIGDescribe("StatefulSet", func() { }()} }(), } - ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) + ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) ss = waitForStatus(c, ss) @@ -499,7 +499,7 @@ var _ = SIGDescribe("StatefulSet", func() { ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{ Type: appsv1.OnDeleteStatefulSetStrategyType, } - ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) + ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) ss = waitForStatus(c, ss) @@ -581,7 +581,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns) ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels) setHTTPProbe(ss) - ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) + ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) @@ -661,7 +661,7 @@ var _ = SIGDescribe("StatefulSet", func() { ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels) ss.Spec.PodManagementPolicy = appsv1.ParallelPodManagement setHTTPProbe(ss) - ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) + ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) @@ -720,7 +720,7 @@ var _ = SIGDescribe("StatefulSet", func() { NodeName: node.Name, }, } - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name) @@ -728,7 +728,7 @@ var _ = SIGDescribe("StatefulSet", func() { statefulPodContainer := &ss.Spec.Template.Spec.Containers[0] statefulPodContainer.Ports = append(statefulPodContainer.Ports, conflictingPort) ss.Spec.Template.Spec.NodeName = node.Name - _, err = f.ClientSet.AppsV1().StatefulSets(f.Namespace.Name).Create(context.TODO(), ss) + _, err = f.ClientSet.AppsV1().StatefulSets(f.Namespace.Name).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name) @@ -793,7 +793,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels) setHTTPProbe(ss) - ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) + ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) ss = waitForStatus(c, ss) @@ -809,7 +809,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("updating a scale subresource") scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = 2 - scaleResult, err := c.AppsV1().StatefulSets(ns).UpdateScale(context.TODO(), ssName, scale) + scaleResult, err := c.AppsV1().StatefulSets(ns).UpdateScale(context.TODO(), ssName, scale, metav1.UpdateOptions{}) if err != nil { framework.Failf("Failed to put scale subresource: %v", err) } @@ -1086,7 +1086,7 @@ func pollReadWithTimeout(statefulPod statefulPodTester, statefulPodNumber int, k // PVCs and one using no storage. func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) { setHTTPProbe(ss) - ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) + ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) ss = waitForStatus(c, ss) @@ -1292,7 +1292,7 @@ func updateStatefulSetWithRetries(c clientset.Interface, namespace, name string, } // Apply the update, then attempt to push it to the apiserver. applyUpdate(statefulSet) - if statefulSet, err = statefulSets.Update(context.TODO(), statefulSet); err == nil { + if statefulSet, err = statefulSets.Update(context.TODO(), statefulSet, metav1.UpdateOptions{}); err == nil { framework.Logf("Updating stateful set %s", name) return true, nil } diff --git a/test/e2e/auth/audit.go b/test/e2e/auth/audit.go index 796c8b906f1..00dd5289ffe 100644 --- a/test/e2e/auth/audit.go +++ b/test/e2e/auth/audit.go @@ -96,7 +96,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { _, err = f.PodClient().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list pods") - _, err = f.PodClient().Patch(context.TODO(), pod.Name, types.JSONPatchType, patch) + _, err = f.PodClient().Patch(context.TODO(), pod.Name, types.JSONPatchType, patch, metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch pod") f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) @@ -206,7 +206,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { podLabels := map[string]string{"name": "audit-deployment-pod"} d := e2edeploy.NewDeployment("audit-deployment", int32(1), podLabels, "agnhost", imageutils.GetE2EImage(imageutils.Agnhost), appsv1.RecreateDeploymentStrategyType) - _, err := f.ClientSet.AppsV1().Deployments(namespace).Create(context.TODO(), d) + _, err := f.ClientSet.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create audit-deployment") _, err = f.ClientSet.AppsV1().Deployments(namespace).Get(context.TODO(), d.Name, metav1.GetOptions{}) @@ -216,10 +216,10 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { framework.ExpectNoError(err, "failed to create watch for deployments") deploymentChan.Stop() - _, err = f.ClientSet.AppsV1().Deployments(namespace).Update(context.TODO(), d) + _, err = f.ClientSet.AppsV1().Deployments(namespace).Update(context.TODO(), d, metav1.UpdateOptions{}) framework.ExpectNoError(err, "failed to update audit-deployment") - _, err = f.ClientSet.AppsV1().Deployments(namespace).Patch(context.TODO(), d.Name, types.JSONPatchType, patch) + _, err = f.ClientSet.AppsV1().Deployments(namespace).Patch(context.TODO(), d.Name, types.JSONPatchType, patch, metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch deployment") _, err = f.ClientSet.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{}) @@ -339,7 +339,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { }, } - _, err := f.ClientSet.CoreV1().ConfigMaps(namespace).Create(context.TODO(), configMap) + _, err := f.ClientSet.CoreV1().ConfigMaps(namespace).Create(context.TODO(), configMap, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create audit-configmap") _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Get(context.TODO(), configMap.Name, metav1.GetOptions{}) @@ -349,10 +349,10 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { framework.ExpectNoError(err, "failed to create watch for config maps") configMapChan.Stop() - _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Update(context.TODO(), configMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{}) framework.ExpectNoError(err, "failed to update audit-configmap") - _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Patch(context.TODO(), configMap.Name, types.JSONPatchType, patch) + _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Patch(context.TODO(), configMap.Name, types.JSONPatchType, patch, metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch configmap") _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) @@ -471,7 +471,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { "top-secret": []byte("foo-bar"), }, } - _, err := f.ClientSet.CoreV1().Secrets(namespace).Create(context.TODO(), secret) + _, err := f.ClientSet.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create audit-secret") _, err = f.ClientSet.CoreV1().Secrets(namespace).Get(context.TODO(), secret.Name, metav1.GetOptions{}) @@ -481,10 +481,10 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { framework.ExpectNoError(err, "failed to create watch for secrets") secretChan.Stop() - _, err = f.ClientSet.CoreV1().Secrets(namespace).Update(context.TODO(), secret) + _, err = f.ClientSet.CoreV1().Secrets(namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}) framework.ExpectNoError(err, "failed to update audit-secret") - _, err = f.ClientSet.CoreV1().Secrets(namespace).Patch(context.TODO(), secret.Name, types.JSONPatchType, patch) + _, err = f.ClientSet.CoreV1().Secrets(namespace).Patch(context.TODO(), secret.Name, types.JSONPatchType, patch, metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch secret") _, err = f.ClientSet.CoreV1().Secrets(namespace).List(context.TODO(), metav1.ListOptions{}) diff --git a/test/e2e/auth/audit_dynamic.go b/test/e2e/auth/audit_dynamic.go index 1a069064079..5882e2cd3d1 100644 --- a/test/e2e/auth/audit_dynamic.go +++ b/test/e2e/auth/audit_dynamic.go @@ -63,7 +63,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { ObjectMeta: metav1.ObjectMeta{ Name: "audit", }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create namespace") _, err = f.ClientSet.CoreV1().Pods(namespace).Create(context.TODO(), &v1.Pod{ @@ -87,7 +87,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { }, }, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create proxy pod") _, err = f.ClientSet.CoreV1().Services(namespace).Create(context.TODO(), &v1.Service{ @@ -105,7 +105,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { "app": "audit", }, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create proxy service") var podIP string @@ -151,7 +151,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { }, } - _, err = f.ClientSet.AuditregistrationV1alpha1().AuditSinks().Create(context.TODO(), &sink) + _, err = f.ClientSet.AuditregistrationV1alpha1().AuditSinks().Create(context.TODO(), &sink, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create audit sink") framework.Logf("created audit sink") @@ -208,7 +208,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { _, err = f.PodClient().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list pods") - _, err = f.PodClient().Patch(context.TODO(), pod.Name, types.JSONPatchType, patch) + _, err = f.PodClient().Patch(context.TODO(), pod.Name, types.JSONPatchType, patch, metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch pod") f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) diff --git a/test/e2e/auth/certificates.go b/test/e2e/auth/certificates.go index 94c03d75eee..561b9583dae 100644 --- a/test/e2e/auth/certificates.go +++ b/test/e2e/auth/certificates.go @@ -68,7 +68,7 @@ var _ = SIGDescribe("Certificates API", func() { csrs := f.ClientSet.CertificatesV1beta1().CertificateSigningRequests() framework.Logf("creating CSR") - csr, err = csrs.Create(context.TODO(), csr) + csr, err = csrs.Create(context.TODO(), csr, metav1.CreateOptions{}) framework.ExpectNoError(err) csrName := csr.Name diff --git a/test/e2e/auth/node_authn.go b/test/e2e/auth/node_authn.go index 47b88153bc9..ebbb3710cbb 100644 --- a/test/e2e/auth/node_authn.go +++ b/test/e2e/auth/node_authn.go @@ -75,7 +75,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { }, AutomountServiceAccountToken: &trueValue, } - _, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Create(context.TODO(), newSA) + _, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Create(context.TODO(), newSA, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create service account (%s:%s)", ns, newSA.Name) pod := createNodeAuthTestPod(f) diff --git a/test/e2e/auth/node_authz.go b/test/e2e/auth/node_authz.go index a20e4eb2940..0710bb04d67 100644 --- a/test/e2e/auth/node_authz.go +++ b/test/e2e/auth/node_authz.go @@ -97,7 +97,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { "data": "content", }, } - _, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(context.TODO(), configmap) + _, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(context.TODO(), configmap, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create configmap (%s:%s) %+v", ns, configmap.Name, *configmap) _, err = c.CoreV1().ConfigMaps(ns).Get(context.TODO(), configmap.Name, metav1.GetOptions{}) framework.ExpectEqual(apierrors.IsForbidden(err), true) @@ -114,7 +114,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { "data": []byte("keep it secret"), }, } - _, err := f.ClientSet.CoreV1().Secrets(ns).Create(context.TODO(), secret) + _, err := f.ClientSet.CoreV1().Secrets(ns).Create(context.TODO(), secret, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create secret (%s:%s)", ns, secret.Name) ginkgo.By("Node should not get the secret") @@ -147,7 +147,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { }, } - _, err = f.ClientSet.CoreV1().Pods(ns).Create(context.TODO(), pod) + _, err = f.ClientSet.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod (%s:%s)", ns, pod.Name) ginkgo.By("The node should able to access the secret") @@ -173,7 +173,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { }, } ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser)) - _, err := c.CoreV1().Nodes().Create(context.TODO(), node) + _, err := c.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) framework.ExpectEqual(apierrors.IsForbidden(err), true) }) diff --git a/test/e2e/auth/pod_security_policy.go b/test/e2e/auth/pod_security_policy.go index 0033b0ac8e5..d81f8663c71 100644 --- a/test/e2e/auth/pod_security_policy.go +++ b/test/e2e/auth/pod_security_policy.go @@ -79,7 +79,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { ginkgo.It("should forbid pod creation when no PSP is available", func() { ginkgo.By("Running a restricted pod") - _, err := c.CoreV1().Pods(ns).Create(context.TODO(), restrictedPod("restricted")) + _, err := c.CoreV1().Pods(ns).Create(context.TODO(), restrictedPod("restricted"), metav1.CreateOptions{}) expectForbidden(err) }) @@ -89,12 +89,12 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { defer cleanup() ginkgo.By("Running a restricted pod") - pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), restrictedPod("allowed")) + pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), restrictedPod("allowed"), metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace)) testPrivilegedPods(func(pod *v1.Pod) { - _, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) + _, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) expectForbidden(err) }) }) @@ -108,7 +108,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { defer cleanup() testPrivilegedPods(func(pod *v1.Pod) { - p, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) + p, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace)) @@ -215,7 +215,7 @@ func createAndBindPSP(f *framework.Framework, pspTemplate *policyv1beta1.PodSecu ns := f.Namespace.Name name := fmt.Sprintf("%s-%s", ns, psp.Name) psp.Name = name - psp, err := f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Create(context.TODO(), psp) + psp, err := f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Create(context.TODO(), psp, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create PSP") // Create the Role to bind it to the namespace. @@ -229,7 +229,7 @@ func createAndBindPSP(f *framework.Framework, pspTemplate *policyv1beta1.PodSecu ResourceNames: []string{name}, Verbs: []string{"use"}, }}, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create PSP role") // Bind the role to the namespace. diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index 4c4170be1f6..34659ce2106 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -125,7 +125,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{}) framework.ExpectNoError(err) sa.Secrets = nil - _, updateErr := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Update(context.TODO(), sa) + _, updateErr := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Update(context.TODO(), sa, metav1.UpdateOptions{}) framework.ExpectNoError(updateErr) } @@ -172,7 +172,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { framework.ConformanceIt("should mount an API token into pods ", func() { var rootCAContent string - sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount-test"}}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount-test"}}, metav1.CreateOptions{}) framework.ExpectNoError(err) // Standard get, update retry loop @@ -222,7 +222,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { TerminationGracePeriodSeconds: &zero, RestartPolicy: v1.RestartPolicyNever, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)) @@ -239,7 +239,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { framework.ExpectEqual(mountedNamespace, f.Namespace.Name) // Token should be a valid credential that identifies the pod's service account tokenReview := &authenticationv1.TokenReview{Spec: authenticationv1.TokenReviewSpec{Token: mountedToken}} - tokenReview, err = f.ClientSet.AuthenticationV1().TokenReviews().Create(context.TODO(), tokenReview) + tokenReview, err = f.ClientSet.AuthenticationV1().TokenReviews().Create(context.TODO(), tokenReview, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(tokenReview.Status.Authenticated, true) framework.ExpectEqual(tokenReview.Status.Error, "") @@ -282,9 +282,9 @@ var _ = SIGDescribe("ServiceAccounts", func() { falseValue := false mountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount"}, AutomountServiceAccountToken: &trueValue} nomountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "nomount"}, AutomountServiceAccountToken: &falseValue} - mountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), mountSA) + mountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), mountSA, metav1.CreateOptions{}) framework.ExpectNoError(err) - nomountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), nomountSA) + nomountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), nomountSA, metav1.CreateOptions{}) framework.ExpectNoError(err) // Standard get, update retry loop @@ -394,7 +394,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { AutomountServiceAccountToken: tc.AutomountPodSpec, }, } - createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.Logf("created pod %s", tc.PodName) @@ -426,7 +426,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { Data: map[string]string{ "ca.crt": string(cfg.TLSClientConfig.CAData), }, - }); err != nil && !apierrors.IsAlreadyExists(err) { + }, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { framework.Failf("Unexpected err creating kube-ca-crt: %v", err) } @@ -489,7 +489,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { }}, }, } - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.Logf("created pod") diff --git a/test/e2e/autoscaling/cluster_autoscaler_scalability.go b/test/e2e/autoscaling/cluster_autoscaler_scalability.go index 2e6428a0a63..dcea5aac552 100644 --- a/test/e2e/autoscaling/cluster_autoscaler_scalability.go +++ b/test/e2e/autoscaling/cluster_autoscaler_scalability.go @@ -457,7 +457,7 @@ func addAnnotation(f *framework.Framework, nodes []v1.Node, key, value string) e return err } - _, err = f.ClientSet.CoreV1().Nodes().Patch(context.TODO(), string(node.Name), types.StrategicMergePatchType, patchBytes) + _, err = f.ClientSet.CoreV1().Nodes().Patch(context.TODO(), string(node.Name), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) if err != nil { return err } diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 2f0294a2470..7f76d374d46 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -1036,7 +1036,7 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str MinAvailable: &minAvailable, }, } - _, err = f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(context.TODO(), pdb) + _, err = f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(context.TODO(), pdb, metav1.CreateOptions{}) defer func() { f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Delete(context.TODO(), pdb.Name, &metav1.DeleteOptions{}) @@ -1474,7 +1474,7 @@ func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error { Value: "DisabledForTest", Effect: v1.TaintEffectNoSchedule, }) - _, err = c.CoreV1().Nodes().Update(context.TODO(), freshNode) + _, err = c.CoreV1().Nodes().Update(context.TODO(), freshNode, metav1.UpdateOptions{}) if err == nil { return nil } @@ -1515,7 +1515,7 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAdd return nil } freshNode.Spec.Taints = newTaints - _, err = c.CoreV1().Nodes().Update(context.TODO(), freshNode) + _, err = c.CoreV1().Nodes().Update(context.TODO(), freshNode, metav1.UpdateOptions{}) if err == nil { return nil } @@ -1690,7 +1690,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa // (we retry 409 errors in case rc reference got out of sync) for j := 0; j < 3; j++ { *rc.Spec.Replicas = int32((i + 1) * podsPerNode) - rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Update(context.TODO(), rc) + rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Update(context.TODO(), rc, metav1.UpdateOptions{}) if err == nil { break } @@ -1918,7 +1918,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) { MinAvailable: &minAvailable, }, } - _, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Create(context.TODO(), pdb) + _, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Create(context.TODO(), pdb, metav1.CreateOptions{}) newPdbs = append(newPdbs, pdbName) if err != nil { @@ -1934,7 +1934,7 @@ func createPriorityClasses(f *framework.Framework) func() { highPriorityClassName: 1000, } for className, priority := range priorityClasses { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority}, metav1.CreateOptions{}) if err != nil { klog.Errorf("Error creating priority class: %v", err) } diff --git a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go index b5f72849fd7..0d7099e5609 100644 --- a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go +++ b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go @@ -276,7 +276,7 @@ func (tc *CustomMetricTestCase) Run() { waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.initialReplicas) // Autoscale the deployment - _, err = tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(context.TODO(), tc.hpa) + _, err = tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(context.TODO(), tc.hpa, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create HPA: %v", err) } @@ -287,13 +287,13 @@ func (tc *CustomMetricTestCase) Run() { func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) error { if deployment != nil { - _, err := cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Create(context.TODO(), deployment) + _, err := cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Create(context.TODO(), deployment, metav1.CreateOptions{}) if err != nil { return err } } if pod != nil { - _, err := cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(context.TODO(), pod) + _, err := cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { return err } diff --git a/test/e2e/autoscaling/dns_autoscaling.go b/test/e2e/autoscaling/dns_autoscaling.go index 7d4d51a41ba..14dff8eaafc 100644 --- a/test/e2e/autoscaling/dns_autoscaling.go +++ b/test/e2e/autoscaling/dns_autoscaling.go @@ -300,7 +300,7 @@ func packDNSScalingConfigMap(params map[string]string) *v1.ConfigMap { } func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) error { - _, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(context.TODO(), configMap) + _, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(context.TODO(), configMap, metav1.UpdateOptions{}) if err != nil { return err } diff --git a/test/e2e/cloud/gcp/resize_nodes.go b/test/e2e/cloud/gcp/resize_nodes.go index a190a4b3ca2..ee722bff517 100644 --- a/test/e2e/cloud/gcp/resize_nodes.go +++ b/test/e2e/cloud/gcp/resize_nodes.go @@ -39,7 +39,7 @@ func resizeRC(c clientset.Interface, ns, name string, replicas int32) error { return err } *(rc.Spec.Replicas) = replicas - _, err = c.CoreV1().ReplicationControllers(rc.Namespace).Update(context.TODO(), rc) + _, err = c.CoreV1().ReplicationControllers(rc.Namespace).Update(context.TODO(), rc, metav1.UpdateOptions{}) return err } diff --git a/test/e2e/common/configmap.go b/test/e2e/common/configmap.go index 378f7b58f75..db1d3ca391b 100644 --- a/test/e2e/common/configmap.go +++ b/test/e2e/common/configmap.go @@ -44,7 +44,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() { configMap := newConfigMap(f, name) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -92,7 +92,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() { configMap := newEnvFromConfigMap(f, name) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -141,14 +141,14 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() { name := "configmap-test-" + string(uuid.NewUUID()) configMap := newConfigMap(f, name) ginkgo.By(fmt.Sprintf("Creating ConfigMap %v/%v", f.Namespace.Name, configMap.Name)) - _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap) + _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create ConfigMap") configMap.Data = map[string]string{ "data": "value", } ginkgo.By(fmt.Sprintf("Updating configMap %v/%v", f.Namespace.Name, configMap.Name)) - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap, metav1.UpdateOptions{}) framework.ExpectNoError(err, "failed to update ConfigMap") configMapFromUpdate, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) @@ -171,7 +171,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() { Data: map[string]string{ "valueName": "value", }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create ConfigMap") configMapPatchPayload, err := json.Marshal(v1.ConfigMap{ @@ -186,7 +186,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() { }) framework.ExpectNoError(err, "failed to marshal patch data") - _, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Patch(context.TODO(), testConfigMapName, types.StrategicMergePatchType, []byte(configMapPatchPayload)) + _, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Patch(context.TODO(), testConfigMapName, types.StrategicMergePatchType, []byte(configMapPatchPayload), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch ConfigMap") configMap, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Get(context.TODO(), testConfigMapName, metav1.GetOptions{}) @@ -246,5 +246,5 @@ func newConfigMapWithEmptyKey(f *framework.Framework) (*v1.ConfigMap, error) { } ginkgo.By(fmt.Sprintf("Creating configMap that has name %s", configMap.Name)) - return f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap) + return f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}) } diff --git a/test/e2e/common/configmap_volume.go b/test/e2e/common/configmap_volume.go index f2da51145e9..eca4cf42ff0 100644 --- a/test/e2e/common/configmap_volume.go +++ b/test/e2e/common/configmap_volume.go @@ -139,7 +139,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -189,7 +189,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name)) configMap.ResourceVersion = "" // to force update configMap.Data["data-1"] = "value-2" - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name) ginkgo.By("waiting to observe update in volume") @@ -226,7 +226,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -344,12 +344,12 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name)) var err error - if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), deleteConfigMap); err != nil { + if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), deleteConfigMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err) } ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name)) - if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), updateConfigMap); err != nil { + if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), updateConfigMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err) } @@ -460,11 +460,11 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { updateConfigMap.ResourceVersion = "" // to force update delete(updateConfigMap.Data, "data-1") updateConfigMap.Data["data-3"] = "value-3" - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), updateConfigMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), updateConfigMap, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name)) - if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), createConfigMap); err != nil { + if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), createConfigMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err) } @@ -492,7 +492,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -558,22 +558,22 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { name := "immutable" configMap := newConfigMap(f, name) - currentConfigMap, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap) + currentConfigMap, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create config map %q in namespace %q", configMap.Name, configMap.Namespace) currentConfigMap.Data["data-4"] = "value-4" - currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap) + currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update config map %q in namespace %q", configMap.Name, configMap.Namespace) // Mark config map as immutable. trueVal := true currentConfigMap.Immutable = &trueVal - currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap) + currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to mark config map %q in namespace %q as immutable", configMap.Name, configMap.Namespace) // Ensure data can't be changed now. currentConfigMap.Data["data-5"] = "value-5" - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{}) framework.ExpectEqual(apierrors.IsInvalid(err), true) // Ensure config map can't be switched from immutable to mutable. @@ -583,14 +583,14 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { falseVal := false currentConfigMap.Immutable = &falseVal - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{}) framework.ExpectEqual(apierrors.IsInvalid(err), true) // Ensure that metadata can be changed. currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get config map %q in namespace %q", configMap.Name, configMap.Namespace) currentConfigMap.Labels = map[string]string{"label1": "value1"} - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update config map %q in namespace %q", configMap.Name, configMap.Namespace) // Ensure that immutable config map can be deleted. @@ -645,7 +645,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -721,7 +721,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -856,7 +856,7 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } // creating a pod with configMap object, but with different key which is not present in configMap object. diff --git a/test/e2e/common/lease.go b/test/e2e/common/lease.go index 251dfaf9caa..7bb1b64c71c 100644 --- a/test/e2e/common/lease.go +++ b/test/e2e/common/lease.go @@ -82,7 +82,7 @@ var _ = framework.KubeDescribe("Lease", func() { }, } - createdLease, err := leaseClient.Create(context.TODO(), lease) + createdLease, err := leaseClient.Create(context.TODO(), lease, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating Lease failed") readLease, err := leaseClient.Get(context.TODO(), name, metav1.GetOptions{}) @@ -97,7 +97,7 @@ var _ = framework.KubeDescribe("Lease", func() { LeaseTransitions: pointer.Int32Ptr(1), } - _, err = leaseClient.Update(context.TODO(), createdLease) + _, err = leaseClient.Update(context.TODO(), createdLease, metav1.UpdateOptions{}) framework.ExpectNoError(err, "updating Lease failed") readLease, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{}) @@ -115,7 +115,7 @@ var _ = framework.KubeDescribe("Lease", func() { patchBytes, err := getPatchBytes(readLease, patchedLease) framework.ExpectNoError(err, "creating patch failed") - _, err = leaseClient.Patch(context.TODO(), name, types.StrategicMergePatchType, patchBytes) + _, err = leaseClient.Patch(context.TODO(), name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) framework.ExpectNoError(err, "patching Lease failed") readLease, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{}) @@ -136,7 +136,7 @@ var _ = framework.KubeDescribe("Lease", func() { LeaseTransitions: pointer.Int32Ptr(0), }, } - _, err = leaseClient.Create(context.TODO(), lease2) + _, err = leaseClient.Create(context.TODO(), lease2, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating Lease failed") leases, err := leaseClient.List(context.TODO(), metav1.ListOptions{}) diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go index d4662244cc0..d0b7e9d80d3 100644 --- a/test/e2e/common/pods.go +++ b/test/e2e/common/pods.go @@ -505,7 +505,7 @@ var _ = framework.KubeDescribe("Pods", func() { }, }, } - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), svc) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), svc, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create service") // Make a client pod that verifies that it has the service environment variables. @@ -829,7 +829,7 @@ var _ = framework.KubeDescribe("Pods", func() { framework.ExpectEqual(podClient.PodIsReady(podName), false, "Expect pod's Ready condition to be false initially.") ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1)) - _, err := podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), "status") + _, err := podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), metav1.PatchOptions{}, "status") framework.ExpectNoError(err) // Sleep for 10 seconds. time.Sleep(syncLoopFrequency) @@ -837,12 +837,12 @@ var _ = framework.KubeDescribe("Pods", func() { framework.ExpectEqual(podClient.PodIsReady(podName), false, "Expect pod's Ready condition to be false with only one condition in readinessGates equal to True") ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2)) - _, err = podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), "status") + _, err = podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), metav1.PatchOptions{}, "status") framework.ExpectNoError(err) validatePodReadiness(true) ginkgo.By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1)) - _, err = podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), "status") + _, err = podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), metav1.PatchOptions{}, "status") framework.ExpectNoError(err) validatePodReadiness(false) diff --git a/test/e2e/common/podtemplates.go b/test/e2e/common/podtemplates.go index 5c85a27a93b..f289797466c 100644 --- a/test/e2e/common/podtemplates.go +++ b/test/e2e/common/podtemplates.go @@ -58,7 +58,7 @@ var _ = ginkgo.Describe("[sig-architecture] PodTemplates", func() { }, }, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create PodTemplate") // get template @@ -75,7 +75,7 @@ var _ = ginkgo.Describe("[sig-architecture] PodTemplates", func() { }, }) framework.ExpectNoError(err, "failed to marshal patch data") - _, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Patch(context.TODO(), podTemplateName, types.StrategicMergePatchType, []byte(podTemplatePatch)) + _, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Patch(context.TODO(), podTemplateName, types.StrategicMergePatchType, []byte(podTemplatePatch), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch PodTemplate") // get template (ensure label is there) diff --git a/test/e2e/common/projected_combined.go b/test/e2e/common/projected_combined.go index b623cee07b5..8cf96eada75 100644 --- a/test/e2e/common/projected_combined.go +++ b/test/e2e/common/projected_combined.go @@ -63,11 +63,11 @@ var _ = ginkgo.Describe("[sig-storage] Projected combined", func() { } ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } diff --git a/test/e2e/common/projected_configmap.go b/test/e2e/common/projected_configmap.go index 6b561123b7a..bbf92234f81 100644 --- a/test/e2e/common/projected_configmap.go +++ b/test/e2e/common/projected_configmap.go @@ -138,7 +138,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() { ginkgo.By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -194,7 +194,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() { ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name)) configMap.ResourceVersion = "" // to force update configMap.Data["data-1"] = "value-2" - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name) ginkgo.By("waiting to observe update in volume") @@ -253,12 +253,12 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name)) var err error - if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), deleteConfigMap); err != nil { + if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), deleteConfigMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err) } ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name)) - if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), updateConfigMap); err != nil { + if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), updateConfigMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err) } @@ -387,11 +387,11 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() { updateConfigMap.ResourceVersion = "" // to force update delete(updateConfigMap.Data, "data-1") updateConfigMap.Data["data-3"] = "value-3" - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), updateConfigMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), updateConfigMap, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name)) - if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), createConfigMap); err != nil { + if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), createConfigMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err) } @@ -419,7 +419,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -525,7 +525,7 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -606,7 +606,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fs ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } diff --git a/test/e2e/common/projected_secret.go b/test/e2e/common/projected_secret.go index 64bd5aa90b6..7f95e9bb24c 100644 --- a/test/e2e/common/projected_secret.go +++ b/test/e2e/common/projected_secret.go @@ -102,7 +102,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() { secret2.Data = map[string][]byte{ "this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"), } - if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(context.TODO(), secret2); err != nil { + if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(context.TODO(), secret2, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret2.Name, err) } doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil) @@ -128,7 +128,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -255,12 +255,12 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name)) var err error - if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), deleteSecret); err != nil { + if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), deleteSecret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err) } ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name)) - if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), updateSecret); err != nil { + if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), updateSecret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err) } @@ -389,11 +389,11 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() { updateSecret.ResourceVersion = "" // to force update delete(updateSecret.Data, "data-1") updateSecret.Data["data-3"] = []byte("value-3") - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), updateSecret) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), updateSecret, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name)) - if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), createSecret); err != nil { + if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), createSecret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", createSecret.Name, err) } @@ -435,7 +435,7 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -513,7 +513,7 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) { ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } diff --git a/test/e2e/common/runtime.go b/test/e2e/common/runtime.go index 5da4b1e67ce..2c161a355c6 100644 --- a/test/e2e/common/runtime.go +++ b/test/e2e/common/runtime.go @@ -23,6 +23,7 @@ import ( "time" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/kubelet/images" "k8s.io/kubernetes/test/e2e/framework" @@ -300,7 +301,7 @@ while true; do sleep 1; done } secret.Name = "image-pull-secret-" + string(uuid.NewUUID()) ginkgo.By("create image pull secret") - _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret) + _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) framework.ExpectNoError(err) defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, nil) container.ImagePullSecrets = []string{secret.Name} diff --git a/test/e2e/common/runtimeclass.go b/test/e2e/common/runtimeclass.go index cf8e14ddbe2..f7b8c443d04 100644 --- a/test/e2e/common/runtimeclass.go +++ b/test/e2e/common/runtimeclass.go @@ -92,7 +92,7 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() { func createRuntimeClass(f *framework.Framework, name, handler string) string { uniqueName := fmt.Sprintf("%s-%s", f.Namespace.Name, name) rc := runtimeclasstest.NewRuntimeClass(uniqueName, handler) - rc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(context.TODO(), rc) + rc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(context.TODO(), rc, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create RuntimeClass resource") return rc.GetName() } @@ -123,7 +123,7 @@ func expectPodRejection(f *framework.Framework, pod *v1.Pod) { pod = f.PodClient().Create(pod) expectSandboxFailureEvent(f, pod, fmt.Sprintf("\"%s\" not found", *pod.Spec.RuntimeClassName)) } else { - _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectError(err, "should be forbidden") framework.ExpectEqual(apierrors.IsForbidden(err), true, "should be forbidden error") } diff --git a/test/e2e/common/secrets.go b/test/e2e/common/secrets.go index 6953a3efb84..c7375160803 100644 --- a/test/e2e/common/secrets.go +++ b/test/e2e/common/secrets.go @@ -46,7 +46,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -94,7 +94,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() { secret := newEnvFromSecret(f.Namespace.Name, name) ginkgo.By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -165,7 +165,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() { "key": []byte("value"), }, Type: "Opaque", - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create secret") ginkgo.By("listing secrets in all namespaces to ensure that there are more than zero") @@ -197,7 +197,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() { "data": map[string][]byte{"key": []byte(secretPatchNewData)}, }) framework.ExpectNoError(err, "failed to marshal JSON") - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Patch(context.TODO(), secretCreatedName, types.StrategicMergePatchType, []byte(secretPatch)) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Patch(context.TODO(), secretCreatedName, types.StrategicMergePatchType, []byte(secretPatch), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch secret") secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), secretCreatedName, metav1.GetOptions{}) @@ -258,5 +258,5 @@ func createEmptyKeySecretForTest(f *framework.Framework) (*v1.Secret, error) { }, } ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) - return f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret) + return f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) } diff --git a/test/e2e/common/secrets_volume.go b/test/e2e/common/secrets_volume.go index 207802657a7..0f718378a7b 100644 --- a/test/e2e/common/secrets_volume.go +++ b/test/e2e/common/secrets_volume.go @@ -108,7 +108,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { secret2.Data = map[string][]byte{ "this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"), } - if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(context.TODO(), secret2); err != nil { + if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(context.TODO(), secret2, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret2.Name, err) } doSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil) @@ -134,7 +134,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -245,12 +245,12 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name)) var err error - if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), deleteSecret); err != nil { + if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), deleteSecret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err) } ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name)) - if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), updateSecret); err != nil { + if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), updateSecret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err) } @@ -355,11 +355,11 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { updateSecret.ResourceVersion = "" // to force update delete(updateSecret.Data, "data-1") updateSecret.Data["data-3"] = []byte("value-3") - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), updateSecret) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), updateSecret, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name)) - if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), createSecret); err != nil { + if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), createSecret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", createSecret.Name, err) } @@ -376,22 +376,22 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { name := "immutable" secret := secretForTest(f.Namespace.Name, name) - currentSecret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret) + currentSecret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create secret %q in namespace %q", secret.Name, secret.Namespace) currentSecret.Data["data-4"] = []byte("value-4\n") - currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret) + currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", secret.Name, secret.Namespace) // Mark secret as immutable. trueVal := true currentSecret.Immutable = &trueVal - currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret) + currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to mark secret %q in namespace %q as immutable", secret.Name, secret.Namespace) // Ensure data can't be changed now. currentSecret.Data["data-5"] = []byte("value-5\n") - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{}) framework.ExpectEqual(apierrors.IsInvalid(err), true) // Ensure secret can't be switched from immutable to mutable. @@ -401,14 +401,14 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { falseVal := false currentSecret.Immutable = &falseVal - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{}) framework.ExpectEqual(apierrors.IsInvalid(err), true) // Ensure that metadata can be changed. currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get secret %q in namespace %q", secret.Name, secret.Namespace) currentSecret.Labels = map[string]string{"label1": "value1"} - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", secret.Name, secret.Namespace) // Ensure that immutable secret can be deleted. @@ -461,7 +461,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -530,7 +530,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) { ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -648,7 +648,7 @@ func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPat ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } // creating a pod with secret object, with the key which is not present in secret object. diff --git a/test/e2e/common/sysctl.go b/test/e2e/common/sysctl.go index 501b26e6e07..3c26f7bc07d 100644 --- a/test/e2e/common/sysctl.go +++ b/test/e2e/common/sysctl.go @@ -171,7 +171,7 @@ var _ = framework.KubeDescribe("Sysctls [LinuxOnly] [NodeFeature:Sysctls]", func ginkgo.By("Creating a pod with one valid and two invalid sysctls") client := f.ClientSet.CoreV1().Pods(f.Namespace.Name) - _, err := client.Create(context.TODO(), pod) + _, err := client.Create(context.TODO(), pod, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.BeNil()) gomega.Expect(err.Error()).To(gomega.ContainSubstring(`Invalid value: "foo-"`)) diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go index 49883a59944..5357f7f17eb 100644 --- a/test/e2e/common/util.go +++ b/test/e2e/common/util.go @@ -141,7 +141,7 @@ func svcByName(name string, port int) *v1.Service { // NewSVCByName creates a service by name. func NewSVCByName(c clientset.Interface, ns, name string) error { const testPort = 9376 - _, err := c.CoreV1().Services(ns).Create(context.TODO(), svcByName(name, testPort)) + _, err := c.CoreV1().Services(ns).Create(context.TODO(), svcByName(name, testPort), metav1.CreateOptions{}) return err } @@ -154,7 +154,7 @@ func NewRCByName(c clientset.Interface, ns, name string, replicas int32, gracePe } return c.CoreV1().ReplicationControllers(ns).Create(context.TODO(), rcByNamePort( - name, replicas, framework.ServeHostnameImage, containerArgs, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod)) + name, replicas, framework.ServeHostnameImage, containerArgs, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod), metav1.CreateOptions{}) } // RestartNodes restarts specific nodes. diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index db73e093d1e..f9b073ed3ec 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -122,7 +122,7 @@ func runKubernetesServiceTestContainer(c clientset.Interface, ns string) { return } p.Namespace = ns - if _, err := c.CoreV1().Pods(ns).Create(context.TODO(), p); err != nil { + if _, err := c.CoreV1().Pods(ns).Create(context.TODO(), p, metav1.CreateOptions{}); err != nil { framework.Logf("Failed to create %v: %v", p.Name, err) return } diff --git a/test/e2e/framework/auth/helpers.go b/test/e2e/framework/auth/helpers.go index ae4a7edd07b..4b984263eb8 100644 --- a/test/e2e/framework/auth/helpers.go +++ b/test/e2e/framework/auth/helpers.go @@ -66,7 +66,7 @@ func WaitForNamedAuthorizationUpdate(c v1authorization.SubjectAccessReviewsGette } err := wait.Poll(policyCachePollInterval, policyCachePollTimeout, func() (bool, error) { - response, err := c.SubjectAccessReviews().Create(context.TODO(), review) + response, err := c.SubjectAccessReviews().Create(context.TODO(), review, metav1.CreateOptions{}) if err != nil { return false, err } @@ -96,7 +96,7 @@ func BindClusterRole(c bindingsGetter, clusterRole, ns string, subjects ...rbacv Name: clusterRole, }, Subjects: subjects, - }) + }, metav1.CreateOptions{}) if err != nil { return errors.Wrapf(err, "binding clusterrole/%s for %q for %v", clusterRole, ns, subjects) @@ -133,7 +133,7 @@ func bindInNamespace(c bindingsGetter, roleType, role, ns string, subjects ...rb Name: role, }, Subjects: subjects, - }) + }, metav1.CreateOptions{}) if err != nil { return errors.Wrapf(err, "binding %s/%s into %q for %v", roleType, role, ns, subjects) diff --git a/test/e2e/framework/autoscaling/autoscaling_utils.go b/test/e2e/framework/autoscaling/autoscaling_utils.go index eaa4f02bd39..c9a7e0f7360 100644 --- a/test/e2e/framework/autoscaling/autoscaling_utils.go +++ b/test/e2e/framework/autoscaling/autoscaling_utils.go @@ -440,7 +440,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st "name": name, }, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err) rcConfig := testutils.RCConfig{ @@ -494,7 +494,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st "name": controllerName, }, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err) dnsClusterFirst := v1.DNSClusterFirst @@ -534,7 +534,7 @@ func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma TargetCPUUtilizationPercentage: &cpu, }, } - hpa, errHPA := rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa) + hpa, errHPA := rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa, metav1.CreateOptions{}) framework.ExpectNoError(errHPA) return hpa } diff --git a/test/e2e/framework/deployment/fixtures.go b/test/e2e/framework/deployment/fixtures.go index 24e4b20ea73..7792c6dd16a 100644 --- a/test/e2e/framework/deployment/fixtures.go +++ b/test/e2e/framework/deployment/fixtures.go @@ -72,7 +72,7 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s // CreateDeployment creates a deployment. func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*appsv1.Deployment, error) { deploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command) - deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), deploymentSpec) + deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err) } diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 4bbbc3c9126..aafa4def823 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -612,7 +612,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str Ports: portsFunc(), Selector: serviceSelector, }, - }) + }, metav1.CreateOptions{}) ExpectNoError(err) return service } @@ -632,7 +632,7 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n Labels: podLabels, }, Spec: podSpec(node), - }) + }, metav1.CreateOptions{}) ExpectNoError(err) } return podLabels diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index fb9f41bd59d..ed7dd5c719f 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -408,10 +408,10 @@ func createTLSSecret(kubeClient clientset.Interface, namespace, secretName strin // TODO: Retry the update. We don't really expect anything to conflict though. framework.Logf("Updating secret %v in ns %v with hosts %v", secret.Name, namespace, host) s.Data = secret.Data - _, err = kubeClient.CoreV1().Secrets(namespace).Update(context.TODO(), s) + _, err = kubeClient.CoreV1().Secrets(namespace).Update(context.TODO(), s, metav1.UpdateOptions{}) } else { framework.Logf("Creating secret %v in ns %v with hosts %v", secret.Name, namespace, host) - _, err = kubeClient.CoreV1().Secrets(namespace).Create(context.TODO(), secret) + _, err = kubeClient.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) } return host, cert, key, err } @@ -467,7 +467,7 @@ func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[stri framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations = svcAnnotations - _, err = j.Client.CoreV1().Services(ns).Update(context.TODO(), &svc) + _, err = j.Client.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } } @@ -537,7 +537,7 @@ func ingressToManifest(ing *networkingv1beta1.Ingress, path string) error { // runCreate runs the required command to create the given ingress. func (j *TestJig) runCreate(ing *networkingv1beta1.Ingress) (*networkingv1beta1.Ingress, error) { if j.Class != MulticlusterIngressClassValue { - return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Create(context.TODO(), ing) + return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Create(context.TODO(), ing, metav1.CreateOptions{}) } // Use kubemci to create a multicluster ingress. filePath := framework.TestContext.OutputDir + "/mci.yaml" @@ -551,7 +551,7 @@ func (j *TestJig) runCreate(ing *networkingv1beta1.Ingress) (*networkingv1beta1. // runUpdate runs the required command to update the given ingress. func (j *TestJig) runUpdate(ing *networkingv1beta1.Ingress) (*networkingv1beta1.Ingress, error) { if j.Class != MulticlusterIngressClassValue { - return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Update(context.TODO(), ing) + return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Update(context.TODO(), ing, metav1.UpdateOptions{}) } // Use kubemci to update a multicluster ingress. // kubemci does not have an update command. We use "create --force" to update an existing ingress. @@ -1120,11 +1120,11 @@ func generateBacksideHTTPSDeploymentSpec() *appsv1.Deployment { // SetUpBacksideHTTPSIngress sets up deployment, service and ingress with backside HTTPS configured. func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace string, staticIPName string) (*appsv1.Deployment, *v1.Service, *networkingv1beta1.Ingress, error) { - deployCreated, err := cs.AppsV1().Deployments(namespace).Create(context.TODO(), generateBacksideHTTPSDeploymentSpec()) + deployCreated, err := cs.AppsV1().Deployments(namespace).Create(context.TODO(), generateBacksideHTTPSDeploymentSpec(), metav1.CreateOptions{}) if err != nil { return nil, nil, nil, err } - svcCreated, err := cs.CoreV1().Services(namespace).Create(context.TODO(), generateBacksideHTTPSServiceSpec()) + svcCreated, err := cs.CoreV1().Services(namespace).Create(context.TODO(), generateBacksideHTTPSServiceSpec(), metav1.CreateOptions{}) if err != nil { return nil, nil, nil, err } diff --git a/test/e2e/framework/job/rest.go b/test/e2e/framework/job/rest.go index 80995ae1afd..e642fd50da0 100644 --- a/test/e2e/framework/job/rest.go +++ b/test/e2e/framework/job/rest.go @@ -40,5 +40,5 @@ func GetJobPods(c clientset.Interface, ns, jobName string) (*v1.PodList, error) // CreateJob uses c to create job in namespace ns. If the returned error is nil, the returned Job is valid and has // been created. func CreateJob(c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job, error) { - return c.BatchV1().Jobs(ns).Create(context.TODO(), job) + return c.BatchV1().Jobs(ns).Create(context.TODO(), job, metav1.CreateOptions{}) } diff --git a/test/e2e/framework/network/utils.go b/test/e2e/framework/network/utils.go index d849a12c2e9..cea7e30ed0c 100644 --- a/test/e2e/framework/network/utils.go +++ b/test/e2e/framework/network/utils.go @@ -585,7 +585,7 @@ func (config *NetworkingTestConfig) createTestPods() { } func (config *NetworkingTestConfig) createService(serviceSpec *v1.Service) *v1.Service { - _, err := config.getServiceClient().Create(context.TODO(), serviceSpec) + _, err := config.getServiceClient().Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err)) err = framework.WaitForService(config.f.ClientSet, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second) diff --git a/test/e2e/framework/pod/create.go b/test/e2e/framework/pod/create.go index 6765b8dc6c0..af7122ed3b0 100644 --- a/test/e2e/framework/pod/create.go +++ b/test/e2e/framework/pod/create.go @@ -36,7 +36,7 @@ var ( // CreateUnschedulablePod with given claims based on node selector func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) { pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command) - pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod) + pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("pod Create API error: %v", err) } @@ -61,7 +61,7 @@ func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeC // CreatePod with given claims based on node selector func CreatePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) { pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command) - pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod) + pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("pod Create API error: %v", err) } @@ -91,7 +91,7 @@ func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pod.Spec.NodeSelector = node.Selector pod.Spec.Affinity = node.Affinity - pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod) + pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("pod Create API error: %v", err) } diff --git a/test/e2e/framework/pod/resource.go b/test/e2e/framework/pod/resource.go index a58b71af02e..4d8e484781f 100644 --- a/test/e2e/framework/pod/resource.go +++ b/test/e2e/framework/pod/resource.go @@ -436,7 +436,7 @@ func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tw if tweak != nil { tweak(pod) } - execPod, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod) + execPod, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) expectNoError(err, "failed to create new exec pod in namespace: %s", ns) err = wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) { retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(context.TODO(), execPod.Name, metav1.GetOptions{}) diff --git a/test/e2e/framework/pods.go b/test/e2e/framework/pods.go index 56510b72b2d..f3b4c2e37c2 100644 --- a/test/e2e/framework/pods.go +++ b/test/e2e/framework/pods.go @@ -79,7 +79,7 @@ type PodClient struct { // Create creates a new pod according to the framework specifications (don't wait for it to start). func (c *PodClient) Create(pod *v1.Pod) *v1.Pod { c.mungeSpec(pod) - p, err := c.PodInterface.Create(context.TODO(), pod) + p, err := c.PodInterface.Create(context.TODO(), pod, metav1.CreateOptions{}) ExpectNoError(err, "Error creating Pod") return p } @@ -121,7 +121,7 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) { return false, fmt.Errorf("failed to get pod %q: %v", name, err) } updateFn(pod) - _, err = c.PodInterface.Update(context.TODO(), pod) + _, err = c.PodInterface.Update(context.TODO(), pod, metav1.UpdateOptions{}) if err == nil { Logf("Successfully updated pod %q", name) return true, nil diff --git a/test/e2e/framework/psp.go b/test/e2e/framework/psp.go index b14874a20bf..c7208d16da4 100644 --- a/test/e2e/framework/psp.go +++ b/test/e2e/framework/psp.go @@ -118,7 +118,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string } psp := privilegedPSP(podSecurityPolicyPrivileged) - _, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(context.TODO(), psp) + _, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(context.TODO(), psp, metav1.CreateOptions{}) if !apierrors.IsAlreadyExists(err) { ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged) } @@ -133,7 +133,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string ResourceNames: []string{podSecurityPolicyPrivileged}, Verbs: []string{"use"}, }}, - }) + }, metav1.CreateOptions{}) if !apierrors.IsAlreadyExists(err) { ExpectNoError(err, "Failed to create PSP role") } diff --git a/test/e2e/framework/pv/pv.go b/test/e2e/framework/pv/pv.go index d946b9a7ba2..7fa5f4a24e3 100644 --- a/test/e2e/framework/pv/pv.go +++ b/test/e2e/framework/pv/pv.go @@ -293,7 +293,7 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap, // create the PV resource. Fails test on error. func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { - pv, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), pv) + pv, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("PV Create API error: %v", err) } @@ -307,7 +307,7 @@ func CreatePV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVol // CreatePVC creates the PVC resource. Fails test on error. func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) { - pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc) + pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("PVC Create API error: %v", err) } diff --git a/test/e2e/framework/security/apparmor.go b/test/e2e/framework/security/apparmor.go index 806f0dffebb..5d0a175a924 100644 --- a/test/e2e/framework/security/apparmor.go +++ b/test/e2e/framework/security/apparmor.go @@ -156,7 +156,7 @@ profile %s flags=(attach_disconnected) { profileName: profile, }, } - _, err := clientset.CoreV1().ConfigMaps(nsName).Create(context.TODO(), cm) + _, err := clientset.CoreV1().ConfigMaps(nsName).Create(context.TODO(), cm, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create apparmor-profiles ConfigMap") } @@ -224,7 +224,7 @@ func createAppArmorProfileLoader(nsName string, clientset clientset.Interface) { }, }, } - _, err := clientset.CoreV1().ReplicationControllers(nsName).Create(context.TODO(), loader) + _, err := clientset.CoreV1().ReplicationControllers(nsName).Create(context.TODO(), loader, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create apparmor-loader ReplicationController") // Wait for loader to be ready. diff --git a/test/e2e/framework/service/jig.go b/test/e2e/framework/service/jig.go index 0ea48ac8dc0..71b6ecb3ef6 100644 --- a/test/e2e/framework/service/jig.go +++ b/test/e2e/framework/service/jig.go @@ -106,7 +106,7 @@ func (j *TestJig) CreateTCPServiceWithPort(tweak func(svc *v1.Service), port int if tweak != nil { tweak(svc) } - result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc) + result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("failed to create TCP Service %q: %v", svc.Name, err) } @@ -121,7 +121,7 @@ func (j *TestJig) CreateTCPService(tweak func(svc *v1.Service)) (*v1.Service, er if tweak != nil { tweak(svc) } - result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc) + result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("failed to create TCP Service %q: %v", svc.Name, err) } @@ -136,7 +136,7 @@ func (j *TestJig) CreateUDPService(tweak func(svc *v1.Service)) (*v1.Service, er if tweak != nil { tweak(svc) } - result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc) + result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("failed to create UDP Service %q: %v", svc.Name, err) } @@ -161,7 +161,7 @@ func (j *TestJig) CreateExternalNameService(tweak func(svc *v1.Service)) (*v1.Se if tweak != nil { tweak(svc) } - result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc) + result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("failed to create ExternalName Service %q: %v", svc.Name, err) } @@ -253,7 +253,7 @@ func (j *TestJig) CreateLoadBalancerService(timeout time.Duration, tweak func(sv if tweak != nil { tweak(svc) } - _, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc) + _, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %v", svc.Name, err) } @@ -443,7 +443,7 @@ func (j *TestJig) UpdateService(update func(*v1.Service)) (*v1.Service, error) { return nil, fmt.Errorf("failed to get Service %q: %v", j.Name, err) } update(service) - result, err := j.Client.CoreV1().Services(j.Namespace).Update(context.TODO(), service) + result, err := j.Client.CoreV1().Services(j.Namespace).Update(context.TODO(), service, metav1.UpdateOptions{}) if err == nil { return j.sanityCheckService(result, service.Spec.Type) } @@ -619,7 +619,7 @@ func (j *TestJig) AddRCAntiAffinity(rc *v1.ReplicationController) { // CreatePDB returns a PodDisruptionBudget for the given ReplicationController, or returns an error if a PodDisruptionBudget isn't ready func (j *TestJig) CreatePDB(rc *v1.ReplicationController) (*policyv1beta1.PodDisruptionBudget, error) { pdb := j.newPDBTemplate(rc) - newPdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(j.Namespace).Create(context.TODO(), pdb) + newPdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(j.Namespace).Create(context.TODO(), pdb, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("failed to create PDB %q %v", pdb.Name, err) } @@ -659,7 +659,7 @@ func (j *TestJig) Run(tweak func(rc *v1.ReplicationController)) (*v1.Replication if tweak != nil { tweak(rc) } - result, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).Create(context.TODO(), rc) + result, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).Create(context.TODO(), rc, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("failed to create RC %q: %v", rc.Name, err) } @@ -683,7 +683,7 @@ func (j *TestJig) Scale(replicas int) error { scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = int32(replicas) - _, err = j.Client.CoreV1().ReplicationControllers(j.Namespace).UpdateScale(context.TODO(), rc, scale) + _, err = j.Client.CoreV1().ReplicationControllers(j.Namespace).UpdateScale(context.TODO(), rc, scale, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("failed to scale RC %q: %v", rc, err) } diff --git a/test/e2e/framework/service/resource.go b/test/e2e/framework/service/resource.go index ada215aa739..91fd3d4c99c 100644 --- a/test/e2e/framework/service/resource.go +++ b/test/e2e/framework/service/resource.go @@ -72,7 +72,7 @@ func UpdateService(c clientset.Interface, namespace, serviceName string, update update(service) - service, err = c.CoreV1().Services(namespace).Update(context.TODO(), service) + service, err = c.CoreV1().Services(namespace).Update(context.TODO(), service, metav1.UpdateOptions{}) if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) { return service, err diff --git a/test/e2e/framework/statefulset/rest.go b/test/e2e/framework/statefulset/rest.go index ca1056c10d5..d93fa565967 100644 --- a/test/e2e/framework/statefulset/rest.go +++ b/test/e2e/framework/statefulset/rest.go @@ -50,11 +50,11 @@ func CreateStatefulSet(c clientset.Interface, manifestPath, ns string) *appsv1.S framework.ExpectNoError(err) framework.Logf(fmt.Sprintf("creating " + ss.Name + " service")) - _, err = c.CoreV1().Services(ns).Create(context.TODO(), svc) + _, err = c.CoreV1().Services(ns).Create(context.TODO(), svc, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector)) - _, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) + _, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) return ss @@ -253,7 +253,7 @@ func update(c clientset.Interface, ns, name string, update func(ss *appsv1.State framework.Failf("failed to get statefulset %q: %v", name, err) } update(ss) - ss, err = c.AppsV1().StatefulSets(ns).Update(context.TODO(), ss) + ss, err = c.AppsV1().StatefulSets(ns).Update(context.TODO(), ss, metav1.UpdateOptions{}) if err == nil { return ss } diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 18900d3edb1..f32227c7bc5 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -383,7 +383,7 @@ func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]s var got *v1.Namespace if err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) { var err error - got, err = c.CoreV1().Namespaces().Create(context.TODO(), namespaceObj) + got, err = c.CoreV1().Namespaces().Create(context.TODO(), namespaceObj, metav1.CreateOptions{}) if err != nil { Logf("Unexpected error while creating namespace: %v", err) return false, nil diff --git a/test/e2e/framework/volume/fixtures.go b/test/e2e/framework/volume/fixtures.go index 7f6f0273dbb..30d0000f75d 100644 --- a/test/e2e/framework/volume/fixtures.go +++ b/test/e2e/framework/volume/fixtures.go @@ -199,7 +199,7 @@ func NewGlusterfsServer(cs clientset.Interface, namespace string) (config TestCo }, }, } - _, err := cs.CoreV1().Endpoints(namespace).Create(context.TODO(), endpoints) + _, err := cs.CoreV1().Endpoints(namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create endpoints for Gluster server") return config, pod, ip @@ -303,7 +303,7 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod { } var pod *v1.Pod - serverPod, err := podClient.Create(context.TODO(), serverPod) + serverPod, err := podClient.Create(context.TODO(), serverPod, metav1.CreateOptions{}) // ok if the server pod already exists. TODO: make this controllable by callers if err != nil { if apierrors.IsAlreadyExists(err) { @@ -424,7 +424,7 @@ func runVolumeTesterPod(client clientset.Interface, config TestConfig, podSuffix }) } podsNamespacer := client.CoreV1().Pods(config.Namespace) - clientPod, err := podsNamespacer.Create(context.TODO(), clientPod) + clientPod, err := podsNamespacer.Create(context.TODO(), clientPod, metav1.CreateOptions{}) if err != nil { return nil, err } diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go index ac015b712f4..553ba62e215 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go @@ -120,7 +120,7 @@ func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, c } defer CleanupAdapter(f.Namespace.Name, adapterDeployment) - _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), HPAPermissions) + _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), HPAPermissions, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create ClusterRoleBindings: %v", err) } @@ -168,7 +168,7 @@ func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface, } defer CleanupAdapter(f.Namespace.Name, AdapterForOldResourceModel) - _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), HPAPermissions) + _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), HPAPermissions, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create ClusterRoleBindings: %v", err) } @@ -269,10 +269,10 @@ func cleanupSDExporterPod(f *framework.Framework, cs clientset.Interface) { } func createSDExporterPods(f *framework.Framework, cs clientset.Interface) (*v1.Pod, error) { - pod, err := cs.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), StackdriverExporterPod(stackdriverExporterPod1, f.Namespace.Name, stackdriverExporterLabel, CustomMetricName, CustomMetricValue)) + pod, err := cs.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), StackdriverExporterPod(stackdriverExporterPod1, f.Namespace.Name, stackdriverExporterLabel, CustomMetricName, CustomMetricValue), metav1.CreateOptions{}) if err != nil { return nil, err } - _, err = cs.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), StackdriverExporterPod(stackdriverExporterPod2, f.Namespace.Name, stackdriverExporterLabel, UnusedMetricName, UnusedMetricValue)) + _, err = cs.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), StackdriverExporterPod(stackdriverExporterPod2, f.Namespace.Name, stackdriverExporterLabel, UnusedMetricName, UnusedMetricValue), metav1.CreateOptions{}) return pod, err } diff --git a/test/e2e/kubectl/portforward.go b/test/e2e/kubectl/portforward.go index 85cc658441d..f72a42c44f4 100644 --- a/test/e2e/kubectl/portforward.go +++ b/test/e2e/kubectl/portforward.go @@ -208,7 +208,7 @@ func runPortForward(ns, podName string, port int) *portForwardCommand { func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { ginkgo.By("Creating the target pod") pod := pfPod("", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) - if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod); err != nil { + if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { framework.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodReady(pod.Name); err != nil { @@ -256,7 +256,7 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) { ginkgo.By("Creating the target pod") pod := pfPod("abc", "1", "1", "1", fmt.Sprintf("%s", bindAddress)) - if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod); err != nil { + if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { framework.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodReady(pod.Name); err != nil { @@ -293,7 +293,7 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) { func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) { ginkgo.By("Creating the target pod") pod := pfPod("abc", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) - if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod); err != nil { + if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { framework.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodReady(pod.Name); err != nil { @@ -357,7 +357,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { ginkgo.By("Creating the pod") pod := pfPod("def", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) - if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod); err != nil { + if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { framework.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodReady(pod.Name); err != nil { diff --git a/test/e2e/lifecycle/bootstrap/bootstrap_signer.go b/test/e2e/lifecycle/bootstrap/bootstrap_signer.go index 853e824fbb3..d4d5ca27179 100644 --- a/test/e2e/lifecycle/bootstrap/bootstrap_signer.go +++ b/test/e2e/lifecycle/bootstrap/bootstrap_signer.go @@ -57,7 +57,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { tokenID, err := GenerateTokenID() framework.ExpectNoError(err) secret := newTokenSecret(tokenID, "tokenSecret") - _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret) + _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret, metav1.CreateOptions{}) secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenID framework.ExpectNoError(err) @@ -72,7 +72,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { tokenID, err := GenerateTokenID() framework.ExpectNoError(err) secret := newTokenSecret(tokenID, "tokenSecret") - _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret) + _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret, metav1.CreateOptions{}) framework.ExpectNoError(err) secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenID @@ -89,14 +89,14 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { updatedKubeConfig, err := randBytes(20) framework.ExpectNoError(err) cfgMap.Data[bootstrapapi.KubeConfigKey] = updatedKubeConfig - _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(context.TODO(), cfgMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(context.TODO(), cfgMap, metav1.UpdateOptions{}) framework.ExpectNoError(err) defer func() { ginkgo.By("update back the cluster-info ConfigMap") cfgMap, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.TODO(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) framework.ExpectNoError(err) cfgMap.Data[bootstrapapi.KubeConfigKey] = originalData - _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(context.TODO(), cfgMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(context.TODO(), cfgMap, metav1.UpdateOptions{}) framework.ExpectNoError(err) }() @@ -110,7 +110,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { tokenID, err := GenerateTokenID() framework.ExpectNoError(err) secret := newTokenSecret(tokenID, "tokenSecret") - _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret) + _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("wait for the bootstrap secret be signed") diff --git a/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go b/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go index dd021872862..7eaeeb3bb68 100644 --- a/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go +++ b/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go @@ -57,7 +57,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { secret := newTokenSecret(tokenID, tokenSecret) addSecretExpiration(secret, TimeStringFromNow(-time.Hour)) - _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret) + _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret, metav1.CreateOptions{}) framework.ExpectNoError(err) @@ -74,7 +74,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { framework.ExpectNoError(err) secret := newTokenSecret(tokenID, tokenSecret) addSecretExpiration(secret, TimeStringFromNow(time.Hour)) - _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret) + _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret, metav1.CreateOptions{}) secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenID framework.ExpectNoError(err) diff --git a/test/e2e/network/dns.go b/test/e2e/network/dns.go index 6bdb65a0f2c..664e8c2c860 100644 --- a/test/e2e/network/dns.go +++ b/test/e2e/network/dns.go @@ -138,7 +138,7 @@ var _ = SIGDescribe("DNS", func() { "dns-test": "true", } headlessService := e2eservice.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName) defer func() { ginkgo.By("deleting the test headless service") @@ -148,7 +148,7 @@ var _ = SIGDescribe("DNS", func() { regularServiceName := "test-service-2" regularService := e2eservice.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) - regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), regularService) + regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), regularService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName) defer func() { @@ -193,7 +193,7 @@ var _ = SIGDescribe("DNS", func() { "dns-test": "true", } headlessService := e2eservice.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName) defer func() { ginkgo.By("deleting the test headless service") @@ -203,7 +203,7 @@ var _ = SIGDescribe("DNS", func() { regularServiceName := "test-service-2" regularService := e2eservice.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) - regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), regularService) + regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), regularService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName) defer func() { ginkgo.By("deleting the test service") @@ -250,7 +250,7 @@ var _ = SIGDescribe("DNS", func() { serviceName := "dns-test-service-2" podHostname := "dns-querier-2" headlessService := e2eservice.CreateServiceSpec(serviceName, "", true, testServiceSelector) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create headless service: %s", serviceName) defer func() { @@ -292,7 +292,7 @@ var _ = SIGDescribe("DNS", func() { serviceName := "dns-test-service-2" podHostname := "dns-querier-2" headlessService := e2eservice.CreateServiceSpec(serviceName, "", true, testServiceSelector) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create headless service: %s", serviceName) defer func() { @@ -331,7 +331,7 @@ var _ = SIGDescribe("DNS", func() { ginkgo.By("Creating a test externalName service") serviceName := "dns-test-service-3" externalNameService := e2eservice.CreateServiceSpec(serviceName, "foo.example.com", false, nil) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameService) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create ExternalName service: %s", serviceName) defer func() { @@ -414,7 +414,7 @@ var _ = SIGDescribe("DNS", func() { Nameservers: []string{testServerIP}, Searches: []string{testSearchPath}, } - testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testAgnhostPod) + testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testAgnhostPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod: %s", testAgnhostPod.Name) framework.Logf("Created pod %v", testAgnhostPod) defer func() { @@ -463,7 +463,7 @@ var _ = SIGDescribe("DNS", func() { testServerPod := generateDNSServerPod(map[string]string{ testDNSNameFull: testInjectedIP, }) - testServerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testServerPod) + testServerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testServerPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod: %s", testServerPod.Name) framework.Logf("Created pod %v", testServerPod) defer func() { @@ -495,7 +495,7 @@ var _ = SIGDescribe("DNS", func() { }, }, } - testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testUtilsPod) + testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testUtilsPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod: %s", testUtilsPod.Name) framework.Logf("Created pod %v", testUtilsPod) defer func() { diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go index 00c75833cae..dfef0b3f226 100644 --- a/test/e2e/network/dns_common.go +++ b/test/e2e/network/dns_common.go @@ -157,11 +157,11 @@ func (t *dnsTestCommon) setConfigMap(cm *v1.ConfigMap) { if len(cmList.Items) == 0 { ginkgo.By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)) - _, err := t.c.CoreV1().ConfigMaps(t.ns).Create(context.TODO(), cm) + _, err := t.c.CoreV1().ConfigMaps(t.ns).Create(context.TODO(), cm, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create ConfigMap (%s:%s) %+v", t.ns, t.name, *cm) } else { ginkgo.By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)) - _, err := t.c.CoreV1().ConfigMaps(t.ns).Update(context.TODO(), cm) + _, err := t.c.CoreV1().ConfigMaps(t.ns).Update(context.TODO(), cm, metav1.UpdateOptions{}) framework.ExpectNoError(err, "failed to update ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm) } } @@ -219,7 +219,7 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) { } var err error - t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(context.TODO(), t.utilPod) + t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(context.TODO(), t.utilPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod: %v", t.utilPod) framework.Logf("Created pod %v", t.utilPod) err = t.f.WaitForPodRunning(t.utilPod.Name) @@ -245,7 +245,7 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) { }, } - t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(context.TODO(), t.utilService) + t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(context.TODO(), t.utilService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create service: %s/%s", t.f.Namespace.Name, t.utilService.ObjectMeta.Name) framework.Logf("Created service %v", t.utilService) } @@ -312,7 +312,7 @@ func (t *dnsTestCommon) createDNSPodFromObj(pod *v1.Pod) { t.dnsServerPod = pod var err error - t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(context.TODO(), t.dnsServerPod) + t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(context.TODO(), t.dnsServerPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod: %v", t.dnsServerPod) framework.Logf("Created pod %v", t.dnsServerPod) err = t.f.WaitForPodRunning(t.dnsServerPod.Name) @@ -563,7 +563,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) defer ginkgo.GinkgoRecover() podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) }() - if _, err := podClient.Create(context.TODO(), pod); err != nil { + if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) } @@ -591,7 +591,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames defer ginkgo.GinkgoRecover() podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) }() - if _, err := podClient.Create(context.TODO(), pod); err != nil { + if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) } diff --git a/test/e2e/network/dns_configmap.go b/test/e2e/network/dns_configmap.go index c937c590089..7f49bf69edc 100644 --- a/test/e2e/network/dns_configmap.go +++ b/test/e2e/network/dns_configmap.go @@ -22,6 +22,7 @@ import ( "time" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" @@ -413,12 +414,12 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) { f := t.f serviceName := "dns-externalname-upstream-test" externalNameService := e2eservice.CreateServiceSpec(serviceName, googleDNSHostname, false, nil) - if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameService); err != nil { + if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameService, metav1.CreateOptions{}); err != nil { ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err)) } serviceNameLocal := "dns-externalname-upstream-local" externalNameServiceLocal := e2eservice.CreateServiceSpec(serviceNameLocal, fooHostname, false, nil) - if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameServiceLocal); err != nil { + if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameServiceLocal, metav1.CreateOptions{}); err != nil { ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err)) } defer func() { diff --git a/test/e2e/network/dual_stack.go b/test/e2e/network/dual_stack.go index 32eff31c6e0..45f086c4755 100644 --- a/test/e2e/network/dual_stack.go +++ b/test/e2e/network/dual_stack.go @@ -190,10 +190,10 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() { }, } - serverDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(context.TODO(), serverDeploymentSpec) + serverDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(context.TODO(), serverDeploymentSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) - clientDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(context.TODO(), clientDeploymentSpec) + clientDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(context.TODO(), clientDeploymentSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2edeploy.WaitForDeploymentComplete(cs, serverDeployment) diff --git a/test/e2e/network/endpointslice.go b/test/e2e/network/endpointslice.go index 4aa0b0d0ffe..eb70dd34735 100644 --- a/test/e2e/network/endpointslice.go +++ b/test/e2e/network/endpointslice.go @@ -428,7 +428,7 @@ func ensurePodTargetRef(pod *v1.Pod, targetRef *v1.ObjectReference) { // createServiceReportErr creates a Service and reports any associated error. func createServiceReportErr(cs clientset.Interface, ns string, service *v1.Service) *v1.Service { - svc, err := cs.CoreV1().Services(ns).Create(context.TODO(), service) + svc, err := cs.CoreV1().Services(ns).Create(context.TODO(), service, metav1.CreateOptions{}) framework.ExpectNoError(err) return svc } diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index fa809e2088b..426fa5527ca 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -23,6 +23,7 @@ import ( "time" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" @@ -151,7 +152,7 @@ var _ = SIGDescribe("Firewall rule", func() { pod.ObjectMeta.Labels = jig.Labels pod.Spec.NodeName = nodeName pod.Spec.HostNetwork = true - _, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod) + _, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNoError(f.WaitForPodReady(podName)) framework.Logf("Netexec pod %q in namespace %q running", podName, ns) diff --git a/test/e2e/network/fixture.go b/test/e2e/network/fixture.go index e53e854a1d5..d53ad50011c 100644 --- a/test/e2e/network/fixture.go +++ b/test/e2e/network/fixture.go @@ -85,7 +85,7 @@ func (t *TestFixture) BuildServiceSpec() *v1.Service { // CreateRC creates a replication controller and records it for cleanup. func (t *TestFixture) CreateRC(rc *v1.ReplicationController) (*v1.ReplicationController, error) { - rc, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Create(context.TODO(), rc) + rc, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Create(context.TODO(), rc, metav1.CreateOptions{}) if err == nil { t.rcs[rc.Name] = true } @@ -94,7 +94,7 @@ func (t *TestFixture) CreateRC(rc *v1.ReplicationController) (*v1.ReplicationCon // CreateService creates a service, and record it for cleanup func (t *TestFixture) CreateService(service *v1.Service) (*v1.Service, error) { - result, err := t.Client.CoreV1().Services(t.Namespace).Create(context.TODO(), service) + result, err := t.Client.CoreV1().Services(t.Namespace).Create(context.TODO(), service, metav1.CreateOptions{}) if err == nil { t.services[service.Name] = true } @@ -126,7 +126,7 @@ func (t *TestFixture) Cleanup() []error { } x := int32(0) old.Spec.Replicas = &x - if _, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Update(context.TODO(), old); err != nil { + if _, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Update(context.TODO(), old, metav1.UpdateOptions{}); err != nil { if apierrors.IsNotFound(err) { return nil } diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index 694ed1e7ce1..2586f15ada4 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -300,7 +300,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress": false}` - _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc) + _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } err = wait.Poll(5*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) { @@ -318,7 +318,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress": true}` - _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc) + _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } err = wait.Poll(5*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) { @@ -354,7 +354,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { if scale.Spec.Replicas != int32(num) { scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = int32(num) - _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(context.TODO(), name, scale) + _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(context.TODO(), name, scale, metav1.UpdateOptions{}) framework.ExpectNoError(err) } err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) { @@ -405,7 +405,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { framework.ExpectNoError(err) scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = int32(replicas) - _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(context.TODO(), name, scale) + _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(context.TODO(), name, scale, metav1.UpdateOptions{}) framework.ExpectNoError(err) err = wait.Poll(10*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) { @@ -423,7 +423,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // trigger by changing graceful termination period to 60 seconds gracePeriod := int64(60) deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod - _, err = f.ClientSet.AppsV1().Deployments(ns).Update(context.TODO(), deploy) + _, err = f.ClientSet.AppsV1().Deployments(ns).Update(context.TODO(), deploy, metav1.UpdateOptions{}) framework.ExpectNoError(err) err = wait.Poll(10*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) { res, err := jig.GetDistinctResponseFromIngress() @@ -454,7 +454,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { if scale.Spec.Replicas != int32(num) { scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = int32(num) - _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(context.TODO(), name, scale) + _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(context.TODO(), name, scale, metav1.UpdateOptions{}) framework.ExpectNoError(err) } err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) { @@ -542,7 +542,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}` - _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc) + _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } detectNegAnnotation(f, jig, gceController, ns, name, 2) @@ -553,7 +553,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}` - _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc) + _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } detectNegAnnotation(f, jig, gceController, ns, name, 2) @@ -564,7 +564,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}` - _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc) + _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } detectNegAnnotation(f, jig, gceController, ns, name, 1) @@ -577,7 +577,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { delete(svc.Annotations, ingress.NEGAnnotation) // Service cannot be ClusterIP if it's using Instance Groups. svc.Spec.Type = v1.ServiceTypeNodePort - _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc) + _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } detectNegAnnotation(f, jig, gceController, ns, name, 0) diff --git a/test/e2e/network/kube_proxy.go b/test/e2e/network/kube_proxy.go index 0fb9a0affdc..635b4198193 100644 --- a/test/e2e/network/kube_proxy.go +++ b/test/e2e/network/kube_proxy.go @@ -268,7 +268,7 @@ var _ = SIGDescribe("Network", func() { }, }, } - _, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(context.TODO(), serverPod) + _, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(context.TODO(), serverPod, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2epod.WaitForPodsRunningReady(fr.ClientSet, fr.Namespace.Name, 1, 0, framework.PodReadyBeforeTimeout, map[string]string{}) @@ -290,7 +290,7 @@ var _ = SIGDescribe("Network", func() { }, }, } - _, err = fr.ClientSet.CoreV1().Services(fr.Namespace.Name).Create(context.TODO(), svc) + _, err = fr.ClientSet.CoreV1().Services(fr.Namespace.Name).Create(context.TODO(), svc, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Server service created") @@ -325,7 +325,7 @@ var _ = SIGDescribe("Network", func() { RestartPolicy: v1.RestartPolicyNever, }, } - _, err = fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(context.TODO(), pod) + _, err = fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Client pod created") diff --git a/test/e2e/network/network_policy.go b/test/e2e/network/network_policy.go index 3388bd6bf65..d6eae58a6bc 100644 --- a/test/e2e/network/network_policy.go +++ b/test/e2e/network/network_policy.go @@ -88,7 +88,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy) @@ -141,7 +141,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy) @@ -194,7 +194,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }}, }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy) @@ -228,7 +228,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) defer cleanupNetworkPolicy(f, policy) @@ -280,7 +280,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) defer cleanupNetworkPolicy(f, policy) @@ -326,7 +326,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) defer cleanupNetworkPolicy(f, policy) @@ -372,7 +372,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) defer cleanupNetworkPolicy(f, policy) @@ -439,7 +439,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policy.") defer cleanupNetworkPolicy(f, policy) @@ -478,7 +478,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }}, }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy) @@ -508,7 +508,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }}, }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy) @@ -532,7 +532,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }}, }, } - policy2, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy2) + policy2, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy2, metav1.CreateOptions{}) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy2) @@ -555,7 +555,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { Ingress: []networkingv1.NetworkPolicyIngressRule{{}}, }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy) @@ -585,7 +585,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy) @@ -632,7 +632,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) defer cleanupNetworkPolicy(f, policy) @@ -670,7 +670,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy) @@ -714,7 +714,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }}, }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) testCanConnect(f, f.Namespace, "client-a", service, clientAAllowedPort) @@ -760,7 +760,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }}, }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Update(context.TODO(), policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Update(context.TODO(), policy, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Error updating Network Policy %v: %v", policy.ObjectMeta.Name, err) defer cleanupNetworkPolicy(f, policy) @@ -806,7 +806,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) defer cleanupNetworkPolicy(f, policy) @@ -816,7 +816,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { framework.ExpectNoError(err, "Error getting Namespace %v: %v", nsB.ObjectMeta.Name, err) nsB.ObjectMeta.Labels["ns-name"] = newNsBName - nsB, err = f.ClientSet.CoreV1().Namespaces().Update(context.TODO(), nsB) + nsB, err = f.ClientSet.CoreV1().Namespaces().Update(context.TODO(), nsB, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Error updating Namespace %v: %v", nsB.ObjectMeta.Name, err) testCanConnect(f, nsB, "client-b", service, allowedPort) @@ -848,7 +848,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) defer cleanupNetworkPolicy(f, policy) @@ -943,7 +943,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyAllowToServerInNSB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowToServerInNSB) + policyAllowToServerInNSB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowToServerInNSB, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowToServerInNSB.") defer cleanupNetworkPolicy(f, policyAllowToServerInNSB) @@ -986,7 +986,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyAllowOnlyFromClientB, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowOnlyFromClientB) + policyAllowOnlyFromClientB, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowOnlyFromClientB, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowOnlyFromClientB.") defer cleanupNetworkPolicy(f, policyAllowOnlyFromClientB) @@ -1013,7 +1013,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyIngressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyIngressAllowAll) + policyIngressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyIngressAllowAll, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyIngressAllowAll.") defer cleanupNetworkPolicy(f, policyIngressAllowAll) @@ -1079,7 +1079,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, }, } - policyAllowOnlyToServerA, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowOnlyToServerA) + policyAllowOnlyToServerA, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowOnlyToServerA, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowOnlyToServerA.") defer cleanupNetworkPolicy(f, policyAllowOnlyToServerA) @@ -1105,7 +1105,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyEgressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyEgressAllowAll) + policyEgressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyEgressAllowAll, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyEgressAllowAll.") defer cleanupNetworkPolicy(f, policyEgressAllowAll) @@ -1132,7 +1132,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyDenyAll, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyDenyAll) + policyDenyAll, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyDenyAll, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyDenyAll.") ginkgo.By("Creating client-a which should not be able to contact the server.", func() { @@ -1166,7 +1166,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyAllowFromClientA, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowFromClientA) + policyAllowFromClientA, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowFromClientA, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowFromClientA.") ginkgo.By("Creating client-a which should be able to contact the server.", func() { @@ -1256,7 +1256,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyAllowCIDR, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDR) + policyAllowCIDR, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDR, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDR.") defer cleanupNetworkPolicy(f, policyAllowCIDR) @@ -1335,7 +1335,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyAllowToPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowToPodB) + policyAllowToPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowToPodB, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowToPodB.") defer cleanupNetworkPolicy(f, policyAllowToPodB) @@ -1358,7 +1358,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyDenyFromPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyDenyFromPodB) + policyDenyFromPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyDenyFromPodB, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyDenyFromPodB.") defer cleanupNetworkPolicy(f, policyDenyFromPodB) @@ -1542,7 +1542,7 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, Containers: containers, RestartPolicy: v1.RestartPolicyNever, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.Logf("Created pod %v", pod.ObjectMeta.Name) @@ -1558,7 +1558,7 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, "pod-name": podName, }, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.Logf("Created service %s", svc.Name) @@ -1602,7 +1602,7 @@ func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, pod }, }, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err) return pod @@ -1623,7 +1623,7 @@ func updateNetworkClientPodLabel(f *framework.Framework, namespace *v1.Namespace payloadBytes, err := json.Marshal(payload) framework.ExpectNoError(err) - pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Patch(context.TODO(), podName, types.JSONPatchType, payloadBytes) + pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Patch(context.TODO(), podName, types.JSONPatchType, payloadBytes, metav1.PatchOptions{}) framework.ExpectNoError(err) return pod diff --git a/test/e2e/network/networking.go b/test/e2e/network/networking.go index bafd589b4ec..90d1c8a2310 100644 --- a/test/e2e/network/networking.go +++ b/test/e2e/network/networking.go @@ -69,7 +69,7 @@ func checkConnectivityToHost(f *framework.Framework, nodeName, podName, host str }, } podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) - _, err := podClient.Create(context.TODO(), pod) + _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { return err } diff --git a/test/e2e/network/no_snat.go b/test/e2e/network/no_snat.go index 730edca1ca0..0f8d94a3f55 100644 --- a/test/e2e/network/no_snat.go +++ b/test/e2e/network/no_snat.go @@ -151,7 +151,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { // target Pod at Node and feed Pod Node's InternalIP pod := newTestPod(node.Name, inIP) - _, err = pc.Create(context.TODO(), pod) + _, err = pc.Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) } @@ -172,7 +172,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { framework.ExpectNoError(err) proxyNodeIP := extIP + ":" + strconv.Itoa(testProxyPort) - _, err = pc.Create(context.TODO(), newTestProxyPod(node.Name)) + _, err = pc.Create(context.TODO(), newTestProxyPod(node.Name), metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("waiting for all of the no-snat-test pods to be scheduled and running") diff --git a/test/e2e/network/proxy.go b/test/e2e/network/proxy.go index d9dec789610..77cc6241d11 100644 --- a/test/e2e/network/proxy.go +++ b/test/e2e/network/proxy.go @@ -116,7 +116,7 @@ var _ = SIGDescribe("Proxy", func() { }, }, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err) // Make an RC with a single pod. The 'porter' image is diff --git a/test/e2e/network/scale/ingress.go b/test/e2e/network/scale/ingress.go index b022f421caa..27f716facc7 100644 --- a/test/e2e/network/scale/ingress.go +++ b/test/e2e/network/scale/ingress.go @@ -169,7 +169,7 @@ func (f *IngressScaleFramework) RunScaleTest() []error { testDeploy := generateScaleTestBackendDeploymentSpec(scaleTestNumBackends) f.Logger.Infof("Creating deployment %s...", testDeploy.Name) - testDeploy, err := f.Jig.Client.AppsV1().Deployments(f.Namespace).Create(context.TODO(), testDeploy) + testDeploy, err := f.Jig.Client.AppsV1().Deployments(f.Namespace).Create(context.TODO(), testDeploy, metav1.CreateOptions{}) if err != nil { errs = append(errs, fmt.Errorf("failed to create deployment %s: %v", testDeploy.Name, err)) return errs @@ -279,7 +279,7 @@ func (f *IngressScaleFramework) RunScaleTest() []error { } addTestPathToIngress(ingToUpdate) start = time.Now() - ingToUpdate, err = f.Clientset.NetworkingV1beta1().Ingresses(f.Namespace).Update(context.TODO(), ingToUpdate) + ingToUpdate, err = f.Clientset.NetworkingV1beta1().Ingresses(f.Namespace).Update(context.TODO(), ingToUpdate, metav1.UpdateOptions{}) if err != nil { errs = append(errs, err) return @@ -369,11 +369,11 @@ func addTestPathToIngress(ing *networkingv1beta1.Ingress) { } func (f *IngressScaleFramework) createScaleTestServiceIngress(suffix string, enableTLS bool) (*v1.Service, *networkingv1beta1.Ingress, error) { - svcCreated, err := f.Clientset.CoreV1().Services(f.Namespace).Create(context.TODO(), generateScaleTestServiceSpec(suffix)) + svcCreated, err := f.Clientset.CoreV1().Services(f.Namespace).Create(context.TODO(), generateScaleTestServiceSpec(suffix), metav1.CreateOptions{}) if err != nil { return nil, nil, err } - ingCreated, err := f.Clientset.NetworkingV1beta1().Ingresses(f.Namespace).Create(context.TODO(), generateScaleTestIngressSpec(suffix, enableTLS)) + ingCreated, err := f.Clientset.NetworkingV1beta1().Ingresses(f.Namespace).Create(context.TODO(), generateScaleTestIngressSpec(suffix, enableTLS), metav1.CreateOptions{}) if err != nil { return nil, nil, err } diff --git a/test/e2e/network/scale/localrun/ingress_scale.go b/test/e2e/network/scale/localrun/ingress_scale.go index 31719d2e629..0f229fc16fc 100644 --- a/test/e2e/network/scale/localrun/ingress_scale.go +++ b/test/e2e/network/scale/localrun/ingress_scale.go @@ -136,7 +136,7 @@ func main() { }, } klog.Infof("Creating namespace %s...", ns.Name) - if _, err := cs.CoreV1().Namespaces().Create(context.TODO(), ns); err != nil { + if _, err := cs.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}); err != nil { klog.Errorf("Failed to create namespace %s: %v", ns.Name, err) testSuccessFlag = false return diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 78463773715..254f3fdeee3 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -200,7 +200,7 @@ func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string podNames := make([]string, replicas) name := svc.ObjectMeta.Name ginkgo.By("creating service " + name + " in namespace " + ns) - _, err := c.CoreV1().Services(ns).Create(context.TODO(), svc) + _, err := c.CoreV1().Services(ns).Create(context.TODO(), svc, metav1.CreateOptions{}) if err != nil { return podNames, "", err } @@ -898,7 +898,7 @@ var _ = SIGDescribe("Services", func() { serverPodName := "echo-sourceip" pod := f.NewAgnhostPod(serverPodName, "netexec", "--http-port", strconv.Itoa(servicePort)) pod.Labels = jig.Labels - _, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pod) + _, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNoError(f.WaitForPodReady(pod.Name)) defer func() { @@ -956,7 +956,7 @@ var _ = SIGDescribe("Services", func() { serverPodName := "hairpin" podTemplate := f.NewAgnhostPod(serverPodName, "netexec", "--http-port", strconv.Itoa(servicePort)) podTemplate.Labels = jig.Labels - pod, err := cs.CoreV1().Pods(ns).Create(context.TODO(), podTemplate) + pod, err := cs.CoreV1().Pods(ns).Create(context.TODO(), podTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNoError(f.WaitForPodReady(pod.Name)) @@ -3178,7 +3178,7 @@ func createPausePodDeployment(cs clientset.Interface, name, ns string, replicas }, } - deployment, err := cs.AppsV1().Deployments(ns).Create(context.TODO(), pauseDeployment) + deployment, err := cs.AppsV1().Deployments(ns).Create(context.TODO(), pauseDeployment, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error in creating deployment for pause pod") return deployment } @@ -3205,7 +3205,7 @@ func createPodOrFail(c clientset.Interface, ns, name string, labels map[string]s }, }, } - _, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) + _, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod %s in namespace %s", name, ns) } @@ -3213,7 +3213,7 @@ func createPodOrFail(c clientset.Interface, ns, name string, labels map[string]s // until it's Running func launchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod { hostExecPod := e2epod.NewExecPodSpec(ns, name, true) - pod, err := client.CoreV1().Pods(ns).Create(context.TODO(), hostExecPod) + pod, err := client.CoreV1().Pods(ns).Create(context.TODO(), hostExecPod, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2epod.WaitForPodRunningInNamespace(client, pod) framework.ExpectNoError(err) diff --git a/test/e2e/network/service_latency.go b/test/e2e/network/service_latency.go index f02f9d929de..8c14c57c403 100644 --- a/test/e2e/network/service_latency.go +++ b/test/e2e/network/service_latency.go @@ -345,7 +345,7 @@ func singleServiceLatency(f *framework.Framework, name string, q *endpointQuerie }, } startTime := time.Now() - gotSvc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), svc) + gotSvc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), svc, metav1.CreateOptions{}) if err != nil { return 0, err } diff --git a/test/e2e/node/events.go b/test/e2e/node/events.go index 7c282c629c4..6112aa98fbf 100644 --- a/test/e2e/node/events.go +++ b/test/e2e/node/events.go @@ -72,7 +72,7 @@ var _ = SIGDescribe("Events", func() { ginkgo.By("deleting the pod") podClient.Delete(context.TODO(), pod.Name, nil) }() - if _, err := podClient.Create(context.TODO(), pod); err != nil { + if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { framework.Failf("Failed to create pod: %v", err) } diff --git a/test/e2e/node/kubelet.go b/test/e2e/node/kubelet.go index 368cf2fbcc6..e78d2621a69 100644 --- a/test/e2e/node/kubelet.go +++ b/test/e2e/node/kubelet.go @@ -169,7 +169,7 @@ func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP, }, }, } - rtnPod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) + rtnPod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) err = f.WaitForPodReady(rtnPod.Name) // running & ready diff --git a/test/e2e/node/pod_gc.go b/test/e2e/node/pod_gc.go index f7799cca7e6..4b63235d980 100644 --- a/test/e2e/node/pod_gc.go +++ b/test/e2e/node/pod_gc.go @@ -45,7 +45,7 @@ var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]" } pod.ResourceVersion = "" pod.Status.Phase = v1.PodFailed - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).UpdateStatus(context.TODO(), pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}) if err != nil { framework.Failf("err failing pod: %v", err) } @@ -100,5 +100,5 @@ func createTerminatingPod(f *framework.Framework) (*v1.Pod, error) { SchedulerName: "please don't schedule my pods", }, } - return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) } diff --git a/test/e2e/node/pre_stop.go b/test/e2e/node/pre_stop.go index 2b762a3e53f..c805c8e6b72 100644 --- a/test/e2e/node/pre_stop.go +++ b/test/e2e/node/pre_stop.go @@ -60,7 +60,7 @@ func testPreStop(c clientset.Interface, ns string) { }, } ginkgo.By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns)) - podDescr, err := c.CoreV1().Pods(ns).Create(context.TODO(), podDescr) + podDescr, err := c.CoreV1().Pods(ns).Create(context.TODO(), podDescr, metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) // At the end of the test, clean up by removing the pod. @@ -105,7 +105,7 @@ func testPreStop(c clientset.Interface, ns string) { } ginkgo.By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns)) - preStopDescr, err = c.CoreV1().Pods(ns).Create(context.TODO(), preStopDescr) + preStopDescr, err = c.CoreV1().Pods(ns).Create(context.TODO(), preStopDescr, metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name)) deletePreStop := true diff --git a/test/e2e/node/runtimeclass.go b/test/e2e/node/runtimeclass.go index 7f51e401eb7..a25f718f93f 100644 --- a/test/e2e/node/runtimeclass.go +++ b/test/e2e/node/runtimeclass.go @@ -47,14 +47,14 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() { runtimeClass := newRuntimeClass(f.Namespace.Name, "conflict-runtimeclass") runtimeClass.Scheduling = scheduling - rc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(context.TODO(), runtimeClass) + rc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(context.TODO(), runtimeClass, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create RuntimeClass resource") pod := newRuntimeClassPod(rc.GetName()) pod.Spec.NodeSelector = map[string]string{ "foo": "bar", } - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectError(err, "should be forbidden") framework.ExpectEqual(apierrors.IsForbidden(err), true, "should be forbidden error") }) @@ -98,7 +98,7 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() { ginkgo.By("Trying to create runtimeclass and pod") runtimeClass := newRuntimeClass(f.Namespace.Name, "non-conflict-runtimeclass") runtimeClass.Scheduling = scheduling - rc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(context.TODO(), runtimeClass) + rc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(context.TODO(), runtimeClass, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create RuntimeClass resource") pod := newRuntimeClassPod(rc.GetName()) diff --git a/test/e2e/node/security_context.go b/test/e2e/node/security_context.go index f2733b03c29..569bd0aec81 100644 --- a/test/e2e/node/security_context.go +++ b/test/e2e/node/security_context.go @@ -207,7 +207,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) pod.Spec.Containers[0].Command = []string{"sleep", "6000"} client := f.ClientSet.CoreV1().Pods(f.Namespace.Name) - pod, err := client.Create(context.TODO(), pod) + pod, err := client.Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating pod %v", pod) framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)) @@ -263,7 +263,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{ Level: "s0:c2,c3", } - _, err = client.Create(context.TODO(), pod) + _, err = client.Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating pod %v", pod) err = f.WaitForPodRunning(pod.Name) diff --git a/test/e2e/node/ttlafterfinished.go b/test/e2e/node/ttlafterfinished.go index 7f655574c3e..814c1e69d13 100644 --- a/test/e2e/node/ttlafterfinished.go +++ b/test/e2e/node/ttlafterfinished.go @@ -125,7 +125,7 @@ func updateJobWithRetries(c clientset.Interface, namespace, name string, applyUp } // Apply the update, then attempt to push it to the apiserver. applyUpdate(job) - if job, err = jobs.Update(context.TODO(), job); err == nil { + if job, err = jobs.Update(context.TODO(), job, metav1.UpdateOptions{}); err == nil { framework.Logf("Updating job %s", name) return true, nil } diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go index 3da4504ddbf..19c3991111a 100644 --- a/test/e2e/scheduling/limit_range.go +++ b/test/e2e/scheduling/limit_range.go @@ -102,7 +102,7 @@ var _ = SIGDescribe("LimitRange", func() { defer w.Stop() ginkgo.By("Submitting a LimitRange") - limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(context.TODO(), limitRange) + limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(context.TODO(), limitRange, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Verifying LimitRange creation was observed") @@ -130,7 +130,7 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Creating a Pod with no resource requirements") pod := f.NewTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring Pod has resource requirements applied from LimitRange") @@ -147,7 +147,7 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Creating a Pod with partial resource requirements") pod = f.NewTestPod("pod-partial-resources", getResourceList("", "150Mi", "150Gi"), getResourceList("300m", "", "")) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring Pod has merged resource requirements applied from LimitRange") @@ -168,18 +168,18 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Failing to create a Pod with less than min resources") pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectError(err) ginkgo.By("Failing to create a Pod with more than max resources") pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectError(err) ginkgo.By("Updating a LimitRange") newMin := getResourceList("9m", "49Mi", "49Gi") limitRange.Spec.Limits[0].Min = newMin - limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(context.TODO(), limitRange) + limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(context.TODO(), limitRange, metav1.UpdateOptions{}) framework.ExpectNoError(err) ginkgo.By("Verifying LimitRange updating is effective") @@ -192,12 +192,12 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Creating a Pod with less than former min resources") pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Failing to create a Pod with more than max resources") pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectError(err) ginkgo.By("Deleting a LimitRange") @@ -236,7 +236,7 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Creating a Pod with more than former max resources") pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) }) diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index 20bea26fced..098924fee39 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -139,7 +139,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra ds, err := framework.DsFromManifest(dsYamlURL) framework.ExpectNoError(err) ds.Namespace = f.Namespace.Name - _, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), ds) + _, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), ds, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset") framework.Logf("Successfully created daemonset to install Nvidia drivers.") diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index d0e3bc9a7bc..edfc80c4762 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -663,7 +663,7 @@ func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod { if len(namespace) == 0 { namespace = f.Namespace.Name } - pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(context.TODO(), initPausePod(f, conf)) + pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(context.TODO(), initPausePod(f, conf), metav1.CreateOptions{}) framework.ExpectNoError(err) return pod } @@ -718,7 +718,7 @@ func removeTaintFromNodeAction(cs clientset.Interface, nodeName string, testTain // createPausePodAction returns a closure that creates a pause pod upon invocation. func createPausePodAction(f *framework.Framework, conf pausePodConfig) e2eevents.Action { return func() error { - _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), initPausePod(f, conf)) + _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), initPausePod(f, conf), metav1.CreateOptions{}) return err } } diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index aaa95da3079..1756eff4abf 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -82,7 +82,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { nodeList = &v1.NodeList{} var err error for _, pair := range priorityPairs { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value}, metav1.CreateOptions{}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) } @@ -305,7 +305,7 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { // force it to update nodeCopy.ResourceVersion = "0" delete(nodeCopy.Status.Capacity, fakecpu) - _, err := cs.CoreV1().Nodes().UpdateStatus(context.TODO(), nodeCopy) + _, err := cs.CoreV1().Nodes().UpdateStatus(context.TODO(), nodeCopy, metav1.UpdateOptions{}) framework.ExpectNoError(err) } for _, pair := range priorityPairs { @@ -339,7 +339,7 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { // force it to update nodeCopy.ResourceVersion = "0" nodeCopy.Status.Capacity[fakecpu] = resource.MustParse("1000") - node, err = cs.CoreV1().Nodes().UpdateStatus(context.TODO(), nodeCopy) + node, err = cs.CoreV1().Nodes().UpdateStatus(context.TODO(), nodeCopy, metav1.UpdateOptions{}) framework.ExpectNoError(err) // create four PriorityClass: p1, p2, p3, p4 @@ -347,7 +347,7 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { priorityName := fmt.Sprintf("p%d", i) priorityVal := int32(i) priorityPairs = append(priorityPairs, priorityPair{name: priorityName, value: priorityVal}) - _, err := cs.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal}) + _, err := cs.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal}, metav1.CreateOptions{}) if err != nil { framework.Logf("Failed to create priority '%v/%v': %v", priorityName, priorityVal, err) framework.Logf("Reason: %v. Msg: %v", apierrors.ReasonForError(err), err) @@ -527,7 +527,7 @@ func createPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSe if len(namespace) == 0 { namespace = f.Namespace.Name } - rs, err := f.ClientSet.AppsV1().ReplicaSets(namespace).Create(context.TODO(), initPauseRS(f, conf)) + rs, err := f.ClientSet.AppsV1().ReplicaSets(namespace).Create(context.TODO(), initPauseRS(f, conf), metav1.CreateOptions{}) framework.ExpectNoError(err) return rs } @@ -543,7 +543,7 @@ func createPod(f *framework.Framework, conf pausePodConfig) *v1.Pod { if len(namespace) == 0 { namespace = f.Namespace.Name } - pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(context.TODO(), initPausePod(f, conf)) + pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(context.TODO(), initPausePod(f, conf), metav1.CreateOptions{}) framework.ExpectNoError(err) return pod } diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index 1aa7e09f32a..629af641fff 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -84,7 +84,7 @@ func addOrUpdateAvoidPodOnNode(c clientset.Interface, nodeName string, avoidPods node.Annotations = make(map[string]string) } node.Annotations[v1.PreferAvoidPodsAnnotationKey] = string(taintsData) - _, err = c.CoreV1().Nodes().Update(context.TODO(), node) + _, err = c.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{}) if err != nil { if !apierrors.IsConflict(err) { framework.ExpectNoError(err) @@ -113,7 +113,7 @@ func removeAvoidPodsOffNode(c clientset.Interface, nodeName string) { return true, nil } delete(node.Annotations, v1.PreferAvoidPodsAnnotationKey) - _, err = c.CoreV1().Nodes().Update(context.TODO(), node) + _, err = c.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{}) if err != nil { if !apierrors.IsConflict(err) { framework.ExpectNoError(err) @@ -481,7 +481,7 @@ func createRC(ns, rsName string, replicas int32, rcPodLabels map[string]string, }, }, } - rc, err := f.ClientSet.CoreV1().ReplicationControllers(ns).Create(context.TODO(), rc) + rc, err := f.ClientSet.CoreV1().ReplicationControllers(ns).Create(context.TODO(), rc, metav1.CreateOptions{}) framework.ExpectNoError(err) return rc } diff --git a/test/e2e/scheduling/ubernetes_lite.go b/test/e2e/scheduling/ubernetes_lite.go index f94713f0e47..e4dfe7f9090 100644 --- a/test/e2e/scheduling/ubernetes_lite.go +++ b/test/e2e/scheduling/ubernetes_lite.go @@ -82,7 +82,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) }}, }, } - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), serviceSpec) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) // Now create some pods behind the service @@ -207,7 +207,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string, ar }, }, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err) // Cleanup the replication controller when we are done. defer func() { diff --git a/test/e2e/scheduling/ubernetes_lite_volumes.go b/test/e2e/scheduling/ubernetes_lite_volumes.go index 6f4f1a7a3a9..462e4230229 100644 --- a/test/e2e/scheduling/ubernetes_lite_volumes.go +++ b/test/e2e/scheduling/ubernetes_lite_volumes.go @@ -236,7 +236,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) ginkgo.By("Creating pods for each static PV") for _, config := range configs { podConfig := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "") - config.pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), podConfig) + config.pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), podConfig, metav1.CreateOptions{}) framework.ExpectNoError(err) } diff --git a/test/e2e/servicecatalog/podpreset.go b/test/e2e/servicecatalog/podpreset.go index de61908b80e..e6a544e752d 100644 --- a/test/e2e/servicecatalog/podpreset.go +++ b/test/e2e/servicecatalog/podpreset.go @@ -289,5 +289,5 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { }) func createPodPreset(c clientset.Interface, ns string, job *settingsv1alpha1.PodPreset) (*settingsv1alpha1.PodPreset, error) { - return c.SettingsV1alpha1().PodPresets(ns).Create(context.TODO(), job) + return c.SettingsV1alpha1().PodPresets(ns).Create(context.TODO(), job, metav1.CreateOptions{}) } diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 393db398f65..fa4f98ac276 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -642,7 +642,7 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e var err error _, err = cs.StorageV1().StorageClasses().Get(context.TODO(), class.Name, metav1.GetOptions{}) if err != nil { - class, err = cs.StorageV1().StorageClasses().Create(context.TODO(), class) + class, err = cs.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create class : %v", err) } @@ -651,7 +651,7 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e StorageClassName: &(class.Name), VolumeMode: &t.VolumeMode, }, ns) - claim, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim) + claim, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create claim: %v", err) pvcClaims := []*v1.PersistentVolumeClaim{claim} @@ -727,7 +727,7 @@ func startPausePodWithVolumeSource(cs clientset.Interface, volumeSource v1.Volum pod.Spec.NodeSelector = node.Selector } - return cs.CoreV1().Pods(ns).Create(context.TODO(), pod) + return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) } // checkPodLogs tests that NodePublish was called with expected volume_context and (for ephemeral inline volumes) diff --git a/test/e2e/storage/detach_mounted.go b/test/e2e/storage/detach_mounted.go index 13ed30b3497..457f7eedd6a 100644 --- a/test/e2e/storage/detach_mounted.go +++ b/test/e2e/storage/detach_mounted.go @@ -85,7 +85,7 @@ var _ = utils.SIGDescribe("Detaching volumes", func() { clientPod := getFlexVolumePod(volumeSource, node.Name) ginkgo.By("Creating pod that uses slow format volume") - pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), clientPod) + pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), clientPod, metav1.CreateOptions{}) framework.ExpectNoError(err) uniqueVolumeName := getUniqueVolumeName(pod, driverInstallAs) diff --git a/test/e2e/storage/drivers/csi_objects.go b/test/e2e/storage/drivers/csi_objects.go index f70ee46b56f..5dbc95ee274 100644 --- a/test/e2e/storage/drivers/csi_objects.go +++ b/test/e2e/storage/drivers/csi_objects.go @@ -93,7 +93,7 @@ func createGCESecrets(client clientset.Interface, ns string) { }, } - _, err = client.CoreV1().Secrets(ns).Create(context.TODO(), s) + _, err = client.CoreV1().Secrets(ns).Create(context.TODO(), s, metav1.CreateOptions{}) if !apierrors.IsAlreadyExists(err) { framework.ExpectNoError(err, "Failed to create Secret %v", s.GetName()) } diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index 9bb819cc080..fa5107ea69a 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -506,7 +506,7 @@ func newRBDServer(cs clientset.Interface, namespace string) (config volume.TestC Type: "kubernetes.io/rbd", } - secret, err := cs.CoreV1().Secrets(config.Namespace).Create(context.TODO(), secret) + secret, err := cs.CoreV1().Secrets(config.Namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create secrets for Ceph RBD: %v", err) } @@ -944,7 +944,7 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, v }, } // h.prepPod will be reused in cleanupDriver. - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), prepPod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), prepPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "while creating hostPath init pod") err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) @@ -966,7 +966,7 @@ func (v *hostPathSymlinkVolume) DeleteVolume() { cmd := fmt.Sprintf("rm -rf %v&& rm -rf %v", v.targetPath, v.sourcePath) v.prepPod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", cmd} - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), v.prepPod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), v.prepPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "while creating hostPath teardown pod") err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) diff --git a/test/e2e/storage/empty_dir_wrapper.go b/test/e2e/storage/empty_dir_wrapper.go index 382e0d91a28..19ef8039550 100644 --- a/test/e2e/storage/empty_dir_wrapper.go +++ b/test/e2e/storage/empty_dir_wrapper.go @@ -78,7 +78,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { } var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -95,7 +95,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { }, } - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -253,7 +253,7 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle }, } - if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), gitServerSvc); err != nil { + if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), gitServerSvc, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err) } @@ -303,7 +303,7 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) { "data-1": "value-1", }, } - _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap) + _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}) framework.ExpectNoError(err) } return @@ -398,7 +398,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume }, }, } - _, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rc) + _, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rc, metav1.CreateOptions{}) framework.ExpectNoError(err, "error creating replication controller") defer func() { diff --git a/test/e2e/storage/ephemeral_volume.go b/test/e2e/storage/ephemeral_volume.go index ba0b810530f..482bbc2676d 100644 --- a/test/e2e/storage/ephemeral_volume.go +++ b/test/e2e/storage/ephemeral_volume.go @@ -55,7 +55,7 @@ var _ = utils.SIGDescribe("Ephemeralstorage", func() { for _, testSource := range invalidEphemeralSource("pod-ephm-test") { ginkgo.It(fmt.Sprintf("should allow deletion of pod with invalid volume : %s", testSource.volumeType), func() { pod := testEphemeralVolumePod(f, testSource.volumeType, testSource.source) - pod, err := c.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err := c.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) // Allow it to sleep for 30 seconds diff --git a/test/e2e/storage/flexvolume_mounted_volume_resize.go b/test/e2e/storage/flexvolume_mounted_volume_resize.go index d1f82ced048..9b2d273771d 100644 --- a/test/e2e/storage/flexvolume_mounted_volume_resize.go +++ b/test/e2e/storage/flexvolume_mounted_volume_resize.go @@ -91,7 +91,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { Provisioner: "flex-expand", } - resizableSc, err = c.StorageV1().StorageClasses().Create(context.TODO(), newStorageClass(test, ns, "resizing")) + resizableSc, err = c.StorageV1().StorageClasses().Create(context.TODO(), newStorageClass(test, ns, "resizing"), metav1.CreateOptions{}) if err != nil { fmt.Printf("storage class creation error: %v\n", err) } @@ -102,7 +102,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { StorageClassName: &(resizableSc.Name), ClaimSize: "2Gi", }, ns) - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating pvc") }) diff --git a/test/e2e/storage/flexvolume_online_resize.go b/test/e2e/storage/flexvolume_online_resize.go index a91d6326859..5436fca1a04 100644 --- a/test/e2e/storage/flexvolume_online_resize.go +++ b/test/e2e/storage/flexvolume_online_resize.go @@ -84,7 +84,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa Provisioner: "flex-expand", } - resizableSc, err = c.StorageV1().StorageClasses().Create(context.TODO(), newStorageClass(test, ns, "resizing")) + resizableSc, err = c.StorageV1().StorageClasses().Create(context.TODO(), newStorageClass(test, ns, "resizing"), metav1.CreateOptions{}) if err != nil { fmt.Printf("storage class creation error: %v\n", err) } @@ -95,7 +95,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa StorageClassName: &(resizableSc.Name), ClaimSize: "2Gi", }, ns) - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating pvc: %v", err) }) @@ -188,7 +188,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa // createNginxPod creates an nginx pod. func createNginxPod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) (*v1.Pod, error) { pod := makeNginxPod(namespace, nodeSelector, pvclaims) - pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod) + pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("pod Create API error: %v", err) } diff --git a/test/e2e/storage/generic_persistent_volume-disruptive.go b/test/e2e/storage/generic_persistent_volume-disruptive.go index e4eb34e0c6e..dde4b384909 100644 --- a/test/e2e/storage/generic_persistent_volume-disruptive.go +++ b/test/e2e/storage/generic_persistent_volume-disruptive.go @@ -21,6 +21,7 @@ import ( "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -97,7 +98,7 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string ClaimSize: test.ClaimSize, VolumeMode: &test.VolumeMode, }, ns) - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating pvc") pvcClaims := []*v1.PersistentVolumeClaim{pvc} pvs, err := e2epv.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index 99599a72a35..c1e28cbe35e 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -82,7 +82,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { AllowVolumeExpansion: true, DelayBinding: true, } - resizableSc, err = c.StorageV1().StorageClasses().Create(context.TODO(), newStorageClass(test, ns, "resizing")) + resizableSc, err = c.StorageV1().StorageClasses().Create(context.TODO(), newStorageClass(test, ns, "resizing"), metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating resizable storage class") framework.ExpectEqual(*resizableSc.AllowVolumeExpansion, true) @@ -91,7 +91,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { StorageClassName: &(resizableSc.Name), VolumeMode: &test.VolumeMode, }, ns) - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating pvc") }) diff --git a/test/e2e/storage/nfs_persistent_volume-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go index d197fbafe06..f95f081a26e 100644 --- a/test/e2e/storage/nfs_persistent_volume-disruptive.go +++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go @@ -303,7 +303,7 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig e2epv. pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "") pod.Spec.NodeName = nodeName framework.Logf("Creating NFS client pod.") - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.Logf("NFS client Pod %q created on Node %q", pod.Name, nodeName) framework.ExpectNoError(err) defer func() { diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index a81d196ef51..b3a04a297d6 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -146,7 +146,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { // if all test pods are RO then need a RW pod to format pd ginkgo.By("creating RW fmt Pod to ensure PD is formatted") fmtPod = testPDPod([]string{diskName}, host0Name, false, 1) - _, err = podClient.Create(context.TODO(), fmtPod) + _, err = podClient.Create(context.TODO(), fmtPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create fmtPod") framework.ExpectNoError(f.WaitForPodRunningSlow(fmtPod.Name)) @@ -174,7 +174,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { }() ginkgo.By("creating host0Pod on node0") - _, err = podClient.Create(context.TODO(), host0Pod) + _, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name)) framework.Logf("host0Pod: %q, node0: %q", host0Pod.Name, host0Name) @@ -198,7 +198,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { } ginkgo.By("creating host1Pod on node1") - _, err = podClient.Create(context.TODO(), host1Pod) + _, err = podClient.Create(context.TODO(), host1Pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create host1Pod") framework.ExpectNoError(f.WaitForPodRunningSlow(host1Pod.Name)) framework.Logf("host1Pod: %q, node1: %q", host1Pod.Name, host1Name) @@ -280,7 +280,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { framework.Logf("PD Read/Writer Iteration #%v", i) ginkgo.By(fmt.Sprintf("creating host0Pod with %d containers on node0", numContainers)) host0Pod = testPDPod(diskNames, host0Name, false /* readOnly */, numContainers) - _, err = podClient.Create(context.TODO(), host0Pod) + _, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name)) @@ -368,7 +368,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { targetNode.ObjectMeta.SetResourceVersion("0") // need to set the resource version or else the Create() fails ginkgo.By("defer: re-create host0 node object") - _, err := nodeClient.Create(context.TODO(), targetNode) + _, err := nodeClient.Create(context.TODO(), targetNode, metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("defer: Unable to re-create the deleted node object %q", targetNode.Name)) } ginkgo.By("defer: verify the number of ready nodes") @@ -382,7 +382,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { }() ginkgo.By("creating host0Pod on node0") - _, err = podClient.Create(context.TODO(), host0Pod) + _, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) ginkgo.By("waiting for host0Pod to be running") framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name)) diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 2684f93a139..f2bc0fd8a93 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -326,7 +326,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { testVol := testVols[0] pod := makeLocalPodWithNodeName(config, testVol, config.nodes[1].Name) - pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod) + pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout) @@ -552,7 +552,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { } pod := e2epod.MakeSecPod(config.ns, pvcs, nil, false, "sleep 1", false, false, selinuxLabel, nil) - pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod) + pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) pods[pod.Name] = pod numCreated++ @@ -646,7 +646,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { ginkgo.By(fmt.Sprintf("Create %d pods to use this PVC", count)) for i := 0; i < count; i++ { pod := e2epod.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{pvc}, nil, false, "", false, false, selinuxLabel, nil) - pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod) + pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) pods[pod.Name] = pod } @@ -697,7 +697,7 @@ func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeTyp testVol := testVols[0] pod := makeLocalPodFunc(config, testVol, nodeName) - pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod) + pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2epod.WaitForPodNameUnschedulableInNamespace(config.client, pod.Name, pod.Namespace) @@ -791,7 +791,7 @@ func setupStorageClass(config *localTestConfig, mode *storagev1.VolumeBindingMod VolumeBindingMode: mode, } - _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc) + _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}) framework.ExpectNoError(err) } @@ -1155,7 +1155,7 @@ func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount in spec.Spec.PodManagementPolicy = appsv1.ParallelPodManagement } - ss, err := config.client.AppsV1().StatefulSets(config.ns).Create(context.TODO(), spec) + ss, err := config.client.AppsV1().StatefulSets(config.ns).Create(context.TODO(), spec, metav1.CreateOptions{}) framework.ExpectNoError(err) e2esset.WaitForRunningAndReady(config.client, ssReplicas, ss) diff --git a/test/e2e/storage/persistent_volumes.go b/test/e2e/storage/persistent_volumes.go index 85f314e99a7..48068a88908 100644 --- a/test/e2e/storage/persistent_volumes.go +++ b/test/e2e/storage/persistent_volumes.go @@ -283,7 +283,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { ginkgo.It("should test that a PV becomes Available and is clean after the PVC is deleted.", func() { ginkgo.By("Writing to the volume.") pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')") - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns)) @@ -301,7 +301,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { ginkgo.By("Verifying the mount has been cleaned.") mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath pod = e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount)) - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns)) @@ -353,7 +353,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { } spec := makeStatefulSetWithPVCs(ns, writeCmd, mounts, claims, probe) - ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), spec) + ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), spec, metav1.CreateOptions{}) framework.ExpectNoError(err) e2esset.WaitForRunningAndReady(c, 1, ss) @@ -373,7 +373,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { validateCmd += "&& sleep 10000" spec = makeStatefulSetWithPVCs(ns, validateCmd, mounts, claims, probe) - ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), spec) + ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), spec, metav1.CreateOptions{}) framework.ExpectNoError(err) e2esset.WaitForRunningAndReady(c, 1, ss) }) @@ -435,7 +435,7 @@ func makeStatefulSetWithPVCs(ns, cmd string, mounts []v1.VolumeMount, claims []v func createWaitAndDeletePod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) { framework.Logf("Creating nfs test pod") pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command) - runPod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) + runPod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("pod Create API error: %v", err) } diff --git a/test/e2e/storage/pv_protection.go b/test/e2e/storage/pv_protection.go index 7fbe76e75d4..f742c77d920 100644 --- a/test/e2e/storage/pv_protection.go +++ b/test/e2e/storage/pv_protection.go @@ -77,7 +77,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { // make the pv definitions pv = e2epv.MakePersistentVolume(pvConfig) // create the PV - pv, err = client.CoreV1().PersistentVolumes().Create(context.TODO(), pv) + pv, err = client.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating PV") ginkgo.By("Waiting for PV to enter phase Available") @@ -106,7 +106,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { ginkgo.It("Verify that PV bound to a PVC is not removed immediately", func() { ginkgo.By("Creating a PVC") pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, nameSpace) - pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) + pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating PVC") ginkgo.By("Waiting for PVC to become Bound") diff --git a/test/e2e/storage/pvc_protection.go b/test/e2e/storage/pvc_protection.go index 213f7c6b0f7..9c2ff9b41a8 100644 --- a/test/e2e/storage/pvc_protection.go +++ b/test/e2e/storage/pvc_protection.go @@ -79,7 +79,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { ClaimSize: t.ClaimSize, VolumeMode: &t.VolumeMode, }, nameSpace) - pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) + pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating PVC") pvcCreatedAndNotDeleted = true diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 00a82befde9..3b87d27dc27 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -184,7 +184,7 @@ func testZonalFailover(c clientset.Interface, ns string) { statefulSet, service, regionalPDLabels := newStatefulSet(claimTemplate, ns) ginkgo.By("creating a StorageClass " + class.Name) - _, err := c.StorageV1().StorageClasses().Create(context.TODO(), class) + _, err := c.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.Logf("deleting storage class %s", class.Name) @@ -193,9 +193,9 @@ func testZonalFailover(c clientset.Interface, ns string) { }() ginkgo.By("creating a StatefulSet") - _, err = c.CoreV1().Services(ns).Create(context.TODO(), service) + _, err = c.CoreV1().Services(ns).Create(context.TODO(), service, metav1.CreateOptions{}) framework.ExpectNoError(err) - _, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), statefulSet) + _, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), statefulSet, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { @@ -308,13 +308,13 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string) framework.ExpectNoError(err) reversePatches[node.Name] = reversePatchBytes - _, err = c.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes) + _, err = c.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) framework.ExpectNoError(err) } return func() { for nodeName, reversePatch := range reversePatches { - _, err := c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, reversePatch) + _, err := c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, reversePatch, metav1.PatchOptions{}) framework.ExpectNoError(err) } } diff --git a/test/e2e/storage/subpath.go b/test/e2e/storage/subpath.go index 7a30a3d7b92..c927e661d54 100644 --- a/test/e2e/storage/subpath.go +++ b/test/e2e/storage/subpath.go @@ -38,13 +38,13 @@ var _ = utils.SIGDescribe("Subpath", func() { ginkgo.BeforeEach(func() { ginkgo.By("Setting up data") secret := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "my-secret"}, Data: map[string][]byte{"secret-key": []byte("secret-value")}} - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { framework.ExpectNoError(err, "while creating secret") } configmap := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "my-configmap"}, Data: map[string]string{"configmap-key": "configmap-value"}} - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configmap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configmap, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { framework.ExpectNoError(err, "while creating configmap") } diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go index b14a8525e8d..479a7c323d7 100644 --- a/test/e2e/storage/testsuites/base.go +++ b/test/e2e/storage/testsuites/base.go @@ -227,7 +227,7 @@ func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern test ginkgo.By("creating a StorageClass " + r.Sc.Name) - r.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), r.Sc) + r.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), r.Sc, metav1.CreateOptions{}) framework.ExpectNoError(err) if r.Sc != nil { diff --git a/test/e2e/storage/testsuites/ephemeral.go b/test/e2e/storage/testsuites/ephemeral.go index 7f20b774fb8..c52f02a3b1d 100644 --- a/test/e2e/storage/testsuites/ephemeral.go +++ b/test/e2e/storage/testsuites/ephemeral.go @@ -322,7 +322,7 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri }) } - pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) + pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod") return pod } @@ -365,7 +365,7 @@ func CSIInlineVolumesEnabled(c clientset.Interface, ns string) (bool, error) { }, } - pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) + pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) switch { case err == nil: diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index a98b6b907d4..72efe79d296 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -246,7 +246,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { if class != nil { framework.ExpectEqual(*claim.Spec.StorageClassName, class.Name) ginkgo.By("creating a StorageClass " + class.Name) - _, err = client.StorageV1().StorageClasses().Create(context.TODO(), class) + _, err = client.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) // The "should provision storage with snapshot data source" test already has created the class. // TODO: make class creation optional and remove the IsAlreadyExists exception framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) @@ -259,7 +259,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { } ginkgo.By("creating a claim") - claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim) + claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name) @@ -466,7 +466,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P namespace := claims[0].Namespace ginkgo.By("creating a storage class " + t.Class.Name) - class, err := t.Client.StorageV1().StorageClasses().Create(context.TODO(), t.Class) + class, err := t.Client.StorageV1().StorageClasses().Create(context.TODO(), t.Class, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { err = deleteStorageClass(t.Client, class.Name) @@ -477,7 +477,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P var claimNames []string var createdClaims []*v1.PersistentVolumeClaim for _, claim := range claims { - c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim) + c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim, metav1.CreateOptions{}) claimNames = append(claimNames, c.Name) createdClaims = append(createdClaims, c) framework.ExpectNoError(err) @@ -598,7 +598,7 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command }, } - pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) + pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pod: %v", err) return pod } @@ -639,12 +639,12 @@ func prepareSnapshotDataSourceForProvisioning( var err error if class != nil { ginkgo.By("[Initialize dataSource]creating a StorageClass " + class.Name) - _, err = client.StorageV1().StorageClasses().Create(context.TODO(), class) + _, err = client.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) framework.ExpectNoError(err) } ginkgo.By("[Initialize dataSource]creating a initClaim") - updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(context.TODO(), initClaim) + updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(context.TODO(), initClaim, metav1.CreateOptions{}) framework.ExpectNoError(err) // write namespace to the /mnt/test (= the volume). @@ -712,12 +712,12 @@ func preparePVCDataSourceForProvisioning( var err error if class != nil { ginkgo.By("[Initialize dataSource]creating a StorageClass " + class.Name) - _, err = client.StorageV1().StorageClasses().Create(context.TODO(), class) + _, err = client.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) framework.ExpectNoError(err) } ginkgo.By("[Initialize dataSource]creating a source PVC") - sourcePVC, err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(context.TODO(), source) + sourcePVC, err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(context.TODO(), source, metav1.CreateOptions{}) framework.ExpectNoError(err) // write namespace to the /mnt/test (= the volume). diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index 9c62fb34a87..847806829ff 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -131,7 +131,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", class, pvc) ginkgo.By("creating a StorageClass " + class.Name) - class, err = cs.StorageV1().StorageClasses().Create(context.TODO(), class) + class, err = cs.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.Logf("deleting storage class %s", class.Name) @@ -139,7 +139,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt }() ginkgo.By("creating a claim") - pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) + pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name) diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 7de6bdbc79b..7606ac4376e 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -448,7 +448,7 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T ginkgo.By(fmt.Sprintf("Creating pod %s", l.pod.Name)) removeUnusedContainers(l.pod) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), l.pod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), l.pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "while creating pod") defer func() { ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) @@ -734,7 +734,7 @@ func testPodFailSubpath(f *framework.Framework, pod *v1.Pod, allowContainerTermi func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg string, allowContainerTerminationError bool) { ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "while creating pod") defer func() { e2epod.DeletePodWithWait(f.ClientSet, pod) @@ -817,7 +817,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) { // Start pod ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "while creating pod") defer func() { e2epod.DeletePodWithWait(f.ClientSet, pod) @@ -919,7 +919,7 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec, ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "while creating pod") err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod) @@ -950,7 +950,7 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec, func formatVolume(f *framework.Framework, pod *v1.Pod) { ginkgo.By(fmt.Sprintf("Creating pod to format volume %s", pod.Name)) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "while creating volume init pod") err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) diff --git a/test/e2e/storage/testsuites/topology.go b/test/e2e/storage/testsuites/topology.go index da87021ebfd..26dad5c1428 100644 --- a/test/e2e/storage/testsuites/topology.go +++ b/test/e2e/storage/testsuites/topology.go @@ -325,11 +325,11 @@ func (t *topologyTestSuite) createResources(cs clientset.Interface, l *topologyT framework.Logf("Creating storage class object and pvc object for driver - sc: %v, pvc: %v", l.resource.Sc, l.resource.Pvc) ginkgo.By("Creating sc") - l.resource.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), l.resource.Sc) + l.resource.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), l.resource.Sc, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Creating pvc") - l.resource.Pvc, err = cs.CoreV1().PersistentVolumeClaims(l.resource.Pvc.Namespace).Create(context.TODO(), l.resource.Pvc) + l.resource.Pvc, err = cs.CoreV1().PersistentVolumeClaims(l.resource.Pvc.Namespace).Create(context.TODO(), l.resource.Pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Creating pod") @@ -343,7 +343,7 @@ func (t *topologyTestSuite) createResources(cs clientset.Interface, l *topologyT e2epv.SELinuxLabel, nil) l.pod.Spec.Affinity = affinity - l.pod, err = cs.CoreV1().Pods(l.pod.Namespace).Create(context.TODO(), l.pod) + l.pod, err = cs.CoreV1().Pods(l.pod.Namespace).Create(context.TODO(), l.pod, metav1.CreateOptions{}) framework.ExpectNoError(err) } diff --git a/test/e2e/storage/testsuites/volume_expand.go b/test/e2e/storage/testsuites/volume_expand.go index bbf2c1dcf18..f3e725ba030 100644 --- a/test/e2e/storage/testsuites/volume_expand.go +++ b/test/e2e/storage/testsuites/volume_expand.go @@ -278,7 +278,7 @@ func ExpandPVCSize(origPVC *v1.PersistentVolumeClaim, size resource.Quantity, c } updatedPVC.Spec.Resources.Requests[v1.ResourceStorage] = size - updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Update(context.TODO(), updatedPVC) + updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Update(context.TODO(), updatedPVC, metav1.UpdateOptions{}) if err != nil { framework.Logf("Error updating pvc %s: %v", pvcName, err) lastUpdateError = err diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go index 299de6b2a04..07412bff3de 100644 --- a/test/e2e/storage/testsuites/volume_io.go +++ b/test/e2e/storage/testsuites/volume_io.go @@ -317,7 +317,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume. ginkgo.By(fmt.Sprintf("starting %s", clientPod.Name)) podsNamespacer := cs.CoreV1().Pods(config.Namespace) - clientPod, err = podsNamespacer.Create(context.TODO(), clientPod) + clientPod, err = podsNamespacer.Create(context.TODO(), clientPod, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err) } diff --git a/test/e2e/storage/testsuites/volumelimits.go b/test/e2e/storage/testsuites/volumelimits.go index 98f6fca6110..f8f9f39b2ad 100644 --- a/test/e2e/storage/testsuites/volumelimits.go +++ b/test/e2e/storage/testsuites/volumelimits.go @@ -164,7 +164,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte ClaimSize: claimSize, StorageClassName: &l.resource.Sc.Name, }, l.ns.Name) - pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), pvc) + pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) l.pvcs = append(l.pvcs, pvc) } @@ -175,7 +175,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte selection := e2epod.NodeSelection{} e2epod.SetAffinity(&selection, nodeName) pod.Spec.Affinity = selection.Affinity - l.runningPod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod) + l.runningPod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting for all PVCs to get Bound") @@ -191,7 +191,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte // Use affinity to schedule everything on the right node e2epod.SetAffinity(&selection, nodeName) pod.Spec.Affinity = selection.Affinity - l.unschedulablePod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod) + l.unschedulablePod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create an extra pod with one volume to exceed the limit") ginkgo.By("Waiting for the pod to get unschedulable with the right message") diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index 648445638be..2c6a4633f5e 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -198,16 +198,16 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern var err error ginkgo.By("Creating sc") - l.Sc, err = l.cs.StorageV1().StorageClasses().Create(context.TODO(), l.Sc) + l.Sc, err = l.cs.StorageV1().StorageClasses().Create(context.TODO(), l.Sc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create sc") ginkgo.By("Creating pv and pvc") - l.Pv, err = l.cs.CoreV1().PersistentVolumes().Create(context.TODO(), l.Pv) + l.Pv, err = l.cs.CoreV1().PersistentVolumes().Create(context.TODO(), l.Pv, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pv") // Prebind pv l.Pvc.Spec.VolumeName = l.Pv.Name - l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), l.Pvc) + l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), l.Pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pvc") framework.ExpectNoError(e2epv.WaitOnPVandPVC(l.cs, l.ns.Name, l.Pv, l.Pvc), "Failed to bind pv and pvc") @@ -216,7 +216,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil) // Setting node pod.Spec.NodeName = l.config.ClientNodeName - pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod) + pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pod") defer func() { framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod), "Failed to delete pod") @@ -252,11 +252,11 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern var err error ginkgo.By("Creating sc") - l.Sc, err = l.cs.StorageV1().StorageClasses().Create(context.TODO(), l.Sc) + l.Sc, err = l.cs.StorageV1().StorageClasses().Create(context.TODO(), l.Sc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create sc") ginkgo.By("Creating pv and pvc") - l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), l.Pvc) + l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), l.Pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pvc") eventSelector := fields.Set{ @@ -297,7 +297,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern pod = swapVolumeMode(pod) // Run the pod - pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod) + pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pod") defer func() { framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod), "Failed to delete pod") @@ -348,7 +348,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern } // Run the pod - pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod) + pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod)) diff --git a/test/e2e/storage/utils/create.go b/test/e2e/storage/utils/create.go index 2a339490cd7..3483559451b 100644 --- a/test/e2e/storage/utils/create.go +++ b/test/e2e/storage/utils/create.go @@ -398,7 +398,7 @@ func (*serviceAccountFactory) Create(f *framework.Framework, i interface{}) (fun return nil, errorItemNotSupported } client := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.GetName()) - if _, err := client.Create(context.TODO(), item); err != nil { + if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { return nil, errors.Wrap(err, "create ServiceAccount") } return func() error { @@ -420,7 +420,7 @@ func (*clusterRoleFactory) Create(f *framework.Framework, i interface{}) (func() framework.Logf("Define cluster role %v", item.GetName()) client := f.ClientSet.RbacV1().ClusterRoles() - if _, err := client.Create(context.TODO(), item); err != nil { + if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { return nil, errors.Wrap(err, "create ClusterRole") } return func() error { @@ -441,7 +441,7 @@ func (*clusterRoleBindingFactory) Create(f *framework.Framework, i interface{}) } client := f.ClientSet.RbacV1().ClusterRoleBindings() - if _, err := client.Create(context.TODO(), item); err != nil { + if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { return nil, errors.Wrap(err, "create ClusterRoleBinding") } return func() error { @@ -462,7 +462,7 @@ func (*roleFactory) Create(f *framework.Framework, i interface{}) (func() error, } client := f.ClientSet.RbacV1().Roles(f.Namespace.GetName()) - if _, err := client.Create(context.TODO(), item); err != nil { + if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { return nil, errors.Wrap(err, "create Role") } return func() error { @@ -483,7 +483,7 @@ func (*roleBindingFactory) Create(f *framework.Framework, i interface{}) (func() } client := f.ClientSet.RbacV1().RoleBindings(f.Namespace.GetName()) - if _, err := client.Create(context.TODO(), item); err != nil { + if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { return nil, errors.Wrap(err, "create RoleBinding") } return func() error { @@ -504,7 +504,7 @@ func (*serviceFactory) Create(f *framework.Framework, i interface{}) (func() err } client := f.ClientSet.CoreV1().Services(f.Namespace.GetName()) - if _, err := client.Create(context.TODO(), item); err != nil { + if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { return nil, errors.Wrap(err, "create Service") } return func() error { @@ -525,7 +525,7 @@ func (*statefulSetFactory) Create(f *framework.Framework, i interface{}) (func() } client := f.ClientSet.AppsV1().StatefulSets(f.Namespace.GetName()) - if _, err := client.Create(context.TODO(), item); err != nil { + if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { return nil, errors.Wrap(err, "create StatefulSet") } return func() error { @@ -546,7 +546,7 @@ func (*daemonSetFactory) Create(f *framework.Framework, i interface{}) (func() e } client := f.ClientSet.AppsV1().DaemonSets(f.Namespace.GetName()) - if _, err := client.Create(context.TODO(), item); err != nil { + if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { return nil, errors.Wrap(err, "create DaemonSet") } return func() error { @@ -567,7 +567,7 @@ func (*storageClassFactory) Create(f *framework.Framework, i interface{}) (func( } client := f.ClientSet.StorageV1().StorageClasses() - if _, err := client.Create(context.TODO(), item); err != nil { + if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { return nil, errors.Wrap(err, "create StorageClass") } return func() error { @@ -588,7 +588,7 @@ func (*csiDriverFactory) Create(f *framework.Framework, i interface{}) (func() e } client := f.ClientSet.StorageV1beta1().CSIDrivers() - if _, err := client.Create(context.TODO(), item); err != nil { + if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { return nil, errors.Wrap(err, "create CSIDriver") } return func() error { @@ -609,7 +609,7 @@ func (*secretFactory) Create(f *framework.Framework, i interface{}) (func() erro } client := f.ClientSet.CoreV1().Secrets(f.Namespace.GetName()) - if _, err := client.Create(context.TODO(), item); err != nil { + if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { return nil, errors.Wrap(err, "create Secret") } return func() error { diff --git a/test/e2e/storage/utils/host_exec.go b/test/e2e/storage/utils/host_exec.go index 74ba1cd7265..395065b43e1 100644 --- a/test/e2e/storage/utils/host_exec.go +++ b/test/e2e/storage/utils/host_exec.go @@ -21,6 +21,7 @@ import ( "fmt" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/exec" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -98,7 +99,7 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod { return &privileged }(true), } - pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), hostExecPod) + pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), hostExecPod, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2epod.WaitForPodRunningInNamespace(cs, pod) framework.ExpectNoError(err) diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go index a2e86d85d0c..8c4c4dd2858 100644 --- a/test/e2e/storage/utils/utils.go +++ b/test/e2e/storage/utils/utils.go @@ -466,7 +466,7 @@ func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) { }, }, } - pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) + pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pod: %v", err) defer func() { e2epod.DeletePodOrFail(c, ns, pod.Name) @@ -536,7 +536,7 @@ func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginNa }, }, } - provisionerPod, err := podClient.Create(context.TODO(), provisionerPod) + provisionerPod, err := podClient.Create(context.TODO(), provisionerPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err) framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, provisionerPod)) @@ -590,7 +590,7 @@ func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface, continue } - _, err = roleBindingClient.Create(context.TODO(), binding) + _, err = roleBindingClient.Create(context.TODO(), binding, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err) } diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index 6d2bccd3a19..cf1e789b7f0 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -117,14 +117,14 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { storageOpMetrics := getControllerStorageMetrics(controllerMetrics, pluginName) - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNotEqual(pvc, nil) claims := []*v1.PersistentVolumeClaim{pvc} pod := e2epod.MakePod(ns, nil, claims, false, "") - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2epod.WaitForPodRunningInNamespace(c, pod) @@ -172,11 +172,11 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { "invalidparam": "invalidvalue", }, } - _, err = c.StorageV1().StorageClasses().Create(context.TODO(), invalidSc) + _, err = c.StorageV1().StorageClasses().Create(context.TODO(), invalidSc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating new storageclass: %v", err) pvc.Spec.StorageClassName = &invalidSc.Name - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create PVC %s/%s", pvc.Namespace, pvc.Name) framework.ExpectNotEqual(pvc, nil) @@ -184,7 +184,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { ginkgo.By("Creating a pod and expecting it to fail") pod := e2epod.MakePod(ns, nil, claims, false, "") - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name) err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, framework.PodStartShortTimeout) @@ -204,13 +204,13 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { ginkgo.It("should create volume metrics with the correct PVC ref", func() { var err error - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNotEqual(pvc, nil) claims := []*v1.PersistentVolumeClaim{pvc} pod := e2epod.MakePod(ns, nil, claims, false, "") - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2epod.WaitForPodRunningInNamespace(c, pod) @@ -261,13 +261,13 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { ginkgo.It("should create metrics for total time taken in volume operations in P/V Controller", func() { var err error - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNotEqual(pvc, nil) claims := []*v1.PersistentVolumeClaim{pvc} pod := e2epod.MakePod(ns, nil, claims, false, "") - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2epod.WaitForPodRunningInNamespace(c, pod) @@ -292,13 +292,13 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { ginkgo.It("should create volume metrics in Volume Manager", func() { var err error - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNotEqual(pvc, nil) claims := []*v1.PersistentVolumeClaim{pvc} pod := e2epod.MakePod(ns, nil, claims, false, "") - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2epod.WaitForPodRunningInNamespace(c, pod) @@ -322,7 +322,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { ginkgo.It("should create metrics for total number of volumes in A/D Controller", func() { var err error - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNotEqual(pvc, nil) @@ -336,7 +336,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } // Create pod - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2epod.WaitForPodRunningInNamespace(c, pod) framework.ExpectNoError(err, "Error starting pod ", pod.Name) diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index cb180dcad44..2595de595c6 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -359,7 +359,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ginkgo.By("Testing " + betaTest.Name + " with beta volume provisioning") class := newBetaStorageClass(*betaTest, "beta") // we need to create the class manually, testDynamicProvisioning does not accept beta class - class, err := c.StorageV1beta1().StorageClasses().Create(context.TODO(), class) + class, err := c.StorageV1beta1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) framework.ExpectNoError(err) defer deleteStorageClass(c, class.Name) @@ -455,7 +455,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "1Gi", } sc := newStorageClass(test, ns, suffix) - sc, err = c.StorageV1().StorageClasses().Create(context.TODO(), sc) + sc, err = c.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}) framework.ExpectNoError(err) defer deleteStorageClass(c, sc.Name) @@ -465,7 +465,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { StorageClassName: &sc.Name, VolumeMode: &test.VolumeMode, }, ns) - pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) @@ -495,7 +495,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { } class := newStorageClass(test, ns, "race") - class, err := c.StorageV1().StorageClasses().Create(context.TODO(), class) + class, err := c.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) framework.ExpectNoError(err) defer deleteStorageClass(c, class.Name) @@ -567,7 +567,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, } } - pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv) + pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("waiting for the PV to get Released") @@ -582,7 +582,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) framework.ExpectNoError(err) pv.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimDelete - pv, err = c.CoreV1().PersistentVolumes().Update(context.TODO(), pv) + pv, err = c.CoreV1().PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{}) framework.ExpectNoError(err) ginkgo.By("waiting for the PV to get deleted") @@ -615,7 +615,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Resources: []string{"endpoints"}, Verbs: []string{"get", "list", "watch", "create", "update", "patch"}, }}, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create leader-locking role") err = auth.BindRoleInNamespace(c.RbacV1(), roleName, ns, subject) @@ -691,7 +691,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: test.ClaimSize, VolumeMode: &test.VolumeMode, }, ns) - claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim) + claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, claim.Name, ns)) @@ -728,7 +728,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: test.ClaimSize, VolumeMode: &test.VolumeMode, }, ns) - claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim) + claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, claim.Name, ns)) @@ -786,7 +786,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ginkgo.By("creating a StorageClass") suffix := fmt.Sprintf("invalid-aws") class := newStorageClass(test, ns, suffix) - class, err := c.StorageV1().StorageClasses().Create(context.TODO(), class) + class, err := c.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.Logf("deleting storage class %s", class.Name) @@ -799,7 +799,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { StorageClassName: &class.Name, VolumeMode: &test.VolumeMode, }, ns) - claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim) + claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name) @@ -865,7 +865,7 @@ func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr sc.Annotations[storageutil.IsDefaultStorageClassAnnotation] = defaultStr } - _, err = c.StorageV1().StorageClasses().Update(context.TODO(), sc) + _, err = c.StorageV1().StorageClasses().Update(context.TODO(), sc, metav1.UpdateOptions{}) framework.ExpectNoError(err) expectedDefault := false @@ -996,7 +996,7 @@ func startGlusterDpServerPod(c clientset.Interface, ns string) *v1.Pod { }, }, } - provisionerPod, err := podClient.Create(context.TODO(), provisionerPod) + provisionerPod, err := podClient.Create(context.TODO(), provisionerPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err) framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, provisionerPod)) diff --git a/test/e2e/storage/volumes.go b/test/e2e/storage/volumes.go index 11afb9a7365..28c0442378b 100644 --- a/test/e2e/storage/volumes.go +++ b/test/e2e/storage/volumes.go @@ -65,7 +65,7 @@ var _ = utils.SIGDescribe("Volumes", func() { "third": "this is the third file", }, } - if _, err := cs.CoreV1().ConfigMaps(namespace.Name).Create(context.TODO(), configMap); err != nil { + if _, err := cs.CoreV1().ConfigMaps(namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configmap: %v", err) } defer func() { diff --git a/test/e2e/storage/vsphere/pv_reclaimpolicy.go b/test/e2e/storage/vsphere/pv_reclaimpolicy.go index e710989e629..8a956dae91c 100644 --- a/test/e2e/storage/vsphere/pv_reclaimpolicy.go +++ b/test/e2e/storage/vsphere/pv_reclaimpolicy.go @@ -187,12 +187,12 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { ginkgo.By("Creating the PV for same volume path") pv = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimRetain, nil) - pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv) + pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("creating the pvc") pvc = getVSpherePersistentVolumeClaimSpec(ns, nil) - pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("wait for the pv and pvc to bind") @@ -213,13 +213,13 @@ func testSetupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *No } ginkgo.By("creating the pv") pv = getVSpherePersistentVolumeSpec(volumePath, persistentVolumeReclaimPolicy, nil) - pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv) + pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) if err != nil { return } ginkgo.By("creating the pvc") pvc = getVSpherePersistentVolumeClaimSpec(ns, nil) - pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{}) return } diff --git a/test/e2e/storage/vsphere/pvc_label_selector.go b/test/e2e/storage/vsphere/pvc_label_selector.go index f6749c21eea..77401aa53cd 100644 --- a/test/e2e/storage/vsphere/pvc_label_selector.go +++ b/test/e2e/storage/vsphere/pvc_label_selector.go @@ -22,6 +22,7 @@ import ( "github.com/onsi/ginkgo" "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -116,21 +117,21 @@ func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ginkgo.By("creating the pv with label volume-type:ssd") pvSsd = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimDelete, ssdlabels) - pvSsd, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pvSsd) + pvSsd, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pvSsd, metav1.CreateOptions{}) if err != nil { return } ginkgo.By("creating pvc with label selector to match with volume-type:vvol") pvcVvol = getVSpherePersistentVolumeClaimSpec(ns, vvollabels) - pvcVvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvcVvol) + pvcVvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvcVvol, metav1.CreateOptions{}) if err != nil { return } ginkgo.By("creating pvc with label selector to match with volume-type:ssd") pvcSsd = getVSpherePersistentVolumeClaimSpec(ns, ssdlabels) - pvcSsd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvcSsd) + pvcSsd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvcSsd, metav1.CreateOptions{}) return } diff --git a/test/e2e/storage/vsphere/vsphere_scale.go b/test/e2e/storage/vsphere/vsphere_scale.go index 7ab49528007..3624487851b 100644 --- a/test/e2e/storage/vsphere/vsphere_scale.go +++ b/test/e2e/storage/vsphere/vsphere_scale.go @@ -136,7 +136,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { case storageclass4: scParams[Datastore] = datastoreName } - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(scname, scParams, nil, "")) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(scname, scParams, nil, ""), metav1.CreateOptions{}) gomega.Expect(sc).NotTo(gomega.BeNil(), "Storage class is empty") framework.ExpectNoError(err, "Failed to create storage class") defer client.StorageV1().StorageClasses().Delete(context.TODO(), scname, nil) diff --git a/test/e2e/storage/vsphere/vsphere_statefulsets.go b/test/e2e/storage/vsphere/vsphere_statefulsets.go index 3ddce9fe371..7eaf381c00a 100644 --- a/test/e2e/storage/vsphere/vsphere_statefulsets.go +++ b/test/e2e/storage/vsphere/vsphere_statefulsets.go @@ -76,7 +76,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { scParameters := make(map[string]string) scParameters["diskformat"] = "thin" scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil, "") - sc, err := client.StorageV1().StorageClasses().Create(context.TODO(), scSpec) + sc, err := client.StorageV1().StorageClasses().Create(context.TODO(), scSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) defer client.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, nil) diff --git a/test/e2e/storage/vsphere/vsphere_stress.go b/test/e2e/storage/vsphere/vsphere_stress.go index c89358a2911..c7644112166 100644 --- a/test/e2e/storage/vsphere/vsphere_stress.go +++ b/test/e2e/storage/vsphere/vsphere_stress.go @@ -89,23 +89,23 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun var err error switch scname { case storageclass1: - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass1, nil, nil, "")) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass1, nil, nil, ""), metav1.CreateOptions{}) case storageclass2: var scVSanParameters map[string]string scVSanParameters = make(map[string]string) scVSanParameters[PolicyHostFailuresToTolerate] = "1" - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, "")) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""), metav1.CreateOptions{}) case storageclass3: var scSPBMPolicyParameters map[string]string scSPBMPolicyParameters = make(map[string]string) scSPBMPolicyParameters[SpbmStoragePolicy] = policyName - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, "")) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, ""), metav1.CreateOptions{}) case storageclass4: var scWithDSParameters map[string]string scWithDSParameters = make(map[string]string) scWithDSParameters[Datastore] = datastoreName scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil, "") - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), scWithDatastoreSpec) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), scWithDatastoreSpec, metav1.CreateOptions{}) } gomega.Expect(sc).NotTo(gomega.BeNil()) framework.ExpectNoError(err) diff --git a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go index 3596e348f79..b9651ac5c63 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go +++ b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go @@ -89,7 +89,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nil, nil) ginkgo.By("Creating pod") - pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podspec) + pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podspec, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting for pod to be ready") gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) diff --git a/test/e2e/storage/vsphere/vsphere_volume_datastore.go b/test/e2e/storage/vsphere/vsphere_volume_datastore.go index 6f328579819..34f89f28bc2 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_datastore.go +++ b/test/e2e/storage/vsphere/vsphere_volume_datastore.go @@ -80,7 +80,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { ginkgo.By("Creating Storage Class With Invalid Datastore") - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(datastoreSCName, scParameters, nil, "")) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(datastoreSCName, scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) diff --git a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go index ed88557348e..e23c3b57c1d 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go +++ b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go @@ -110,14 +110,14 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st ginkgo.By("Creating Storage Class With DiskFormat") storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters, nil, "") - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), storageClassSpec) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), storageClassSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) ginkgo.By("Creating PVC using the Storage Class") pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass) - pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), pvclaimSpec) + pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), pvclaimSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { @@ -143,7 +143,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st ginkgo.By("Creating pod to attach PV to the node") // Create pod to attach Volume to Node podSpec := getVSpherePodSpecWithClaim(pvclaim.Name, nodeKeyValueLabel, "while true ; do sleep 2 ; done") - pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podSpec) + pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting for pod to be running") diff --git a/test/e2e/storage/vsphere/vsphere_volume_disksize.go b/test/e2e/storage/vsphere/vsphere_volume_disksize.go index 62c5e1fa50e..ca0b1a19882 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_disksize.go +++ b/test/e2e/storage/vsphere/vsphere_volume_disksize.go @@ -69,7 +69,7 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() { expectedDiskSize := "1Mi" ginkgo.By("Creating Storage Class") - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(diskSizeSCName, scParameters, nil, "")) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(diskSizeSCName, scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err) defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) diff --git a/test/e2e/storage/vsphere/vsphere_volume_fstype.go b/test/e2e/storage/vsphere/vsphere_volume_fstype.go index 10548df60c5..c305046aff3 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_fstype.go +++ b/test/e2e/storage/vsphere/vsphere_volume_fstype.go @@ -151,12 +151,12 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa } func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) { - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("fstype", scParameters, nil, "")) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("fstype", scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err) defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) ginkgo.By("Creating PVC using the Storage Class") - pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) + pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass), metav1.CreateOptions{}) framework.ExpectNoError(err) var pvclaims []*v1.PersistentVolumeClaim diff --git a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go index 7763f37c65f..e978849c447 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go @@ -115,7 +115,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup ginkgo.By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i])) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil) - pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podspec) + pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podspec, metav1.CreateOptions{}) framework.ExpectNoError(err) defer e2epod.DeletePodWithWait(client, pod) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index ece310c2003..7f99f8d3ac9 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -80,7 +80,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", ginkgo.It("verify volume status after node power off", func() { ginkgo.By("Creating a Storage Class") storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil, "") - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), storageClassSpec) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), storageClassSpec, metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) diff --git a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go index 3cdb6a3b075..e351ddcfdd8 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go +++ b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go @@ -26,6 +26,7 @@ import ( "github.com/onsi/gomega" "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -91,7 +92,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { ginkgo.By("Creating Storage Class") scParameters := make(map[string]string) scParameters["diskformat"] = "thin" - storageclass, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("thinsc", scParameters, nil, "")) + storageclass, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("thinsc", scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Creating PVCs using the Storage Class") diff --git a/test/e2e/storage/vsphere/vsphere_volume_perf.go b/test/e2e/storage/vsphere/vsphere_volume_perf.go index cbd24b48cee..37eecee6193 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_perf.go +++ b/test/e2e/storage/vsphere/vsphere_volume_perf.go @@ -25,6 +25,7 @@ import ( "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -134,23 +135,23 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName var err error switch scname { case storageclass1: - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass1, nil, nil, "")) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass1, nil, nil, ""), metav1.CreateOptions{}) case storageclass2: var scVSanParameters map[string]string scVSanParameters = make(map[string]string) scVSanParameters[PolicyHostFailuresToTolerate] = "1" - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, "")) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""), metav1.CreateOptions{}) case storageclass3: var scSPBMPolicyParameters map[string]string scSPBMPolicyParameters = make(map[string]string) scSPBMPolicyParameters[SpbmStoragePolicy] = policyName - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, "")) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, ""), metav1.CreateOptions{}) case storageclass4: var scWithDSParameters map[string]string scWithDSParameters = make(map[string]string) scWithDSParameters[Datastore] = datastoreName scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil, "") - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), scWithDatastoreSpec) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), scWithDatastoreSpec, metav1.CreateOptions{}) } gomega.Expect(sc).NotTo(gomega.BeNil()) framework.ExpectNoError(err) diff --git a/test/e2e/storage/vsphere/vsphere_volume_placement.go b/test/e2e/storage/vsphere/vsphere_volume_placement.go index 859dee9e8ea..7222e9e072c 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_placement.go +++ b/test/e2e/storage/vsphere/vsphere_volume_placement.go @@ -25,6 +25,7 @@ import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -362,7 +363,7 @@ func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace st ginkgo.By(fmt.Sprintf("Creating pod on the node: %v", nodeName)) podspec := getVSpherePodSpecWithVolumePaths(volumePaths, nodeKeyValueLabel, nil) - pod, err = client.CoreV1().Pods(namespace).Create(context.TODO(), podspec) + pod, err = client.CoreV1().Pods(namespace).Create(context.TODO(), podspec, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting for pod to be ready") gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) diff --git a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go index 1c9759bd063..f6d39166c01 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go @@ -121,7 +121,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs ginkgo.By(fmt.Sprintf("Creating pod %d on node %v", i, node.name)) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, node.kvLabels, nil) - pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podspec) + pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podspec, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting for pod %d to be ready", i)) diff --git a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go index 6ca8cd1932d..6570b90a8bc 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go @@ -259,7 +259,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, namespace string, scParameters map[string]string) { ginkgo.By("Creating Storage Class With storage policy params") - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, "")) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) @@ -291,7 +291,7 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { ginkgo.By("Creating Storage Class With storage policy params") - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, "")) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) @@ -311,7 +311,7 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterNode string, namespace string, clusterName string, scParameters map[string]string) { ginkgo.By("Creating Storage Class With storage policy params") - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, "")) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) diff --git a/test/e2e/storage/vsphere/vsphere_zone_support.go b/test/e2e/storage/vsphere/vsphere_zone_support.go index 8cb072add27..270af5f7c98 100644 --- a/test/e2e/storage/vsphere/vsphere_zone_support.go +++ b/test/e2e/storage/vsphere/vsphere_zone_support.go @@ -376,7 +376,7 @@ var _ = utils.SIGDescribe("Zone Support", func() { }) func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) { - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode)) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) @@ -418,7 +418,7 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin } func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) error { - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer)) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) @@ -432,7 +432,7 @@ func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.I ginkgo.By("Creating a pod") pod := e2epod.MakePod(namespace, nil, pvclaims, false, "") - pod, err = client.CoreV1().Pods(namespace).Create(context.TODO(), pod) + pod, err = client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) defer e2epod.DeletePodWithWait(client, pod) @@ -460,7 +460,7 @@ func waitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist } func verifyPodSchedulingFails(client clientset.Interface, namespace string, nodeSelector map[string]string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) { - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode)) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) @@ -479,7 +479,7 @@ func verifyPodSchedulingFails(client clientset.Interface, namespace string, node } func verifyPVCCreationFails(client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) error { - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode)) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) @@ -500,7 +500,7 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara } func verifyPVZoneLabels(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) { - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", nil, zones, "")) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", nil, zones, ""), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) diff --git a/test/e2e/upgrades/apps/daemonsets.go b/test/e2e/upgrades/apps/daemonsets.go index 18a76520a96..832ac1f1c0b 100644 --- a/test/e2e/upgrades/apps/daemonsets.go +++ b/test/e2e/upgrades/apps/daemonsets.go @@ -80,7 +80,7 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating a DaemonSet") var err error - if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(context.TODO(), t.daemonSet); err != nil { + if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(context.TODO(), t.daemonSet, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err) } diff --git a/test/e2e/upgrades/apps/deployments.go b/test/e2e/upgrades/apps/deployments.go index f76659fab2c..d9ce080b32d 100644 --- a/test/e2e/upgrades/apps/deployments.go +++ b/test/e2e/upgrades/apps/deployments.go @@ -67,7 +67,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) { ginkgo.By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns)) d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, appsv1.RollingUpdateDeploymentStrategyType) - deployment, err := deploymentClient.Create(context.TODO(), d) + deployment, err := deploymentClient.Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName)) diff --git a/test/e2e/upgrades/apps/replicasets.go b/test/e2e/upgrades/apps/replicasets.go index bdc0e791a34..76ea1d49e0b 100644 --- a/test/e2e/upgrades/apps/replicasets.go +++ b/test/e2e/upgrades/apps/replicasets.go @@ -59,7 +59,7 @@ func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) { ginkgo.By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsName, ns)) replicaSet := newReplicaSet(rsName, ns, 1, map[string]string{"test": "upgrade"}, "nginx", nginxImage) - rs, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), replicaSet) + rs, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), replicaSet, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName)) diff --git a/test/e2e/upgrades/apps/statefulset.go b/test/e2e/upgrades/apps/statefulset.go index a8bfd362955..cd47dfe480c 100644 --- a/test/e2e/upgrades/apps/statefulset.go +++ b/test/e2e/upgrades/apps/statefulset.go @@ -85,12 +85,12 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) { e2esset.PauseNewPods(t.set) ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns) - _, err := f.ClientSet.CoreV1().Services(ns).Create(context.TODO(), t.service) + _, err := f.ClientSet.CoreV1().Services(ns).Create(context.TODO(), t.service, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) *(t.set.Spec.Replicas) = 3 - _, err = f.ClientSet.AppsV1().StatefulSets(ns).Create(context.TODO(), t.set) + _, err = f.ClientSet.AppsV1().StatefulSets(ns).Create(context.TODO(), t.set, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Saturating stateful set " + t.set.Name) diff --git a/test/e2e/upgrades/configmaps.go b/test/e2e/upgrades/configmaps.go index 112ffd945f1..c69e1587ef2 100644 --- a/test/e2e/upgrades/configmaps.go +++ b/test/e2e/upgrades/configmaps.go @@ -58,7 +58,7 @@ func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating a ConfigMap") var err error - if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(context.TODO(), t.configMap); err != nil { + if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(context.TODO(), t.configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err) } diff --git a/test/e2e/upgrades/secrets.go b/test/e2e/upgrades/secrets.go index 2919b9463d1..fa776f3b099 100644 --- a/test/e2e/upgrades/secrets.go +++ b/test/e2e/upgrades/secrets.go @@ -56,7 +56,7 @@ func (t *SecretUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating a secret") var err error - if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), t.secret); err != nil { + if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), t.secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", t.secret.Name, err) } diff --git a/test/e2e/windows/dns.go b/test/e2e/windows/dns.go index 3a68a1ef5fa..1594122d65b 100644 --- a/test/e2e/windows/dns.go +++ b/test/e2e/windows/dns.go @@ -50,7 +50,7 @@ var _ = SIGDescribe("DNS", func() { Nameservers: []string{testInjectedIP}, Searches: []string{testSearchPath}, } - testUtilsPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testUtilsPod) + testUtilsPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testUtilsPod, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.Logf("Created pod %v", testUtilsPod) defer func() { diff --git a/test/e2e/windows/gmsa_full.go b/test/e2e/windows/gmsa_full.go index ed250af805f..2fbbc4ede8a 100644 --- a/test/e2e/windows/gmsa_full.go +++ b/test/e2e/windows/gmsa_full.go @@ -316,7 +316,7 @@ func createRBACRoleForGmsa(f *framework.Framework) (string, func(), error) { f.ClientSet.RbacV1().ClusterRoles().Delete(context.TODO(), roleName, &metav1.DeleteOptions{}) } - _, err := f.ClientSet.RbacV1().ClusterRoles().Create(context.TODO(), role) + _, err := f.ClientSet.RbacV1().ClusterRoles().Create(context.TODO(), role, metav1.CreateOptions{}) if err != nil { err = errors.Wrapf(err, "unable to create RBAC cluster role %q", roleName) } @@ -333,7 +333,7 @@ func createServiceAccount(f *framework.Framework) string { Namespace: f.Namespace.Name, }, } - if _, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), account); err != nil { + if _, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), account, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create service account %q: %v", accountName, err) } return accountName @@ -359,7 +359,7 @@ func bindRBACRoleToServiceAccount(f *framework.Framework, serviceAccountName, rb Name: rbacRoleName, }, } - f.ClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(context.TODO(), binding) + f.ClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(context.TODO(), binding, metav1.CreateOptions{}) } // createPodWithGmsa creates a pod using the test GMSA cred spec, and returns its name. diff --git a/test/e2e_kubeadm/util.go b/test/e2e_kubeadm/util.go index 3facc5f65f6..94bde52745b 100644 --- a/test/e2e_kubeadm/util.go +++ b/test/e2e_kubeadm/util.go @@ -171,7 +171,7 @@ func ExpectSubjectHasAccessToResource(c clientset.Interface, subjectKind, subjec framework.Failf("invalid subjectKind %s", subjectKind) } - s, err := c.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar) + s, err := c.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{}) framework.ExpectNoError(err, "error getting SubjectAccessReview for %s %s to resource %+v", subjectKind, subject, *sar.Spec.ResourceAttributes) gomega.Expect(s.Status.Allowed).Should(gomega.BeTrue(), "%s %s has no access to resource %+v", subjectKind, subject, *sar.Spec.ResourceAttributes) diff --git a/test/e2e_node/device_plugin_test.go b/test/e2e_node/device_plugin_test.go index 9ad110c75df..0553caa76e1 100644 --- a/test/e2e_node/device_plugin_test.go +++ b/test/e2e_node/device_plugin_test.go @@ -125,7 +125,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { framework.Logf("env %v", dp.Spec.Containers[0].Env) dp.Spec.NodeName = framework.TestContext.NodeName ginkgo.By("Create sample device plugin pod") - devicePluginPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp) + devicePluginPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting for devices to become available on the local node") @@ -212,7 +212,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { framework.Logf("Trying to get dp pod after deletion. err must be non-nil. err: %v", err) framework.ExpectError(err) - devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp) + devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp, metav1.CreateOptions{}) framework.ExpectNoError(err) ensurePodContainerRestart(f, pod1.Name, pod1.Name) @@ -258,7 +258,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { framework.ExpectEqual(devIDRestart2, devID2) ginkgo.By("Re-register resources") - devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp) + devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting for the resource exported by the stub device plugin to become healthy on the local node") diff --git a/test/e2e_node/dynamic_kubelet_config_test.go b/test/e2e_node/dynamic_kubelet_config_test.go index 4edf5053887..ea089db565b 100644 --- a/test/e2e_node/dynamic_kubelet_config_test.go +++ b/test/e2e_node/dynamic_kubelet_config_test.go @@ -142,7 +142,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam // we base the "correct" configmap off of the configuration from before the test correctKC := beforeKC.DeepCopy() correctConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-correct", correctKC) - correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), correctConfigMap) + correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), correctConfigMap, metav1.CreateOptions{}) framework.ExpectNoError(err) // fail to parse, we insert some bogus stuff into the configMap @@ -152,14 +152,14 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam "kubelet": "{0xdeadbeef}", }, } - failParseConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), failParseConfigMap) + failParseConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), failParseConfigMap, metav1.CreateOptions{}) framework.ExpectNoError(err) // fail to validate, we make a copy of correct and set an invalid KubeAPIQPS on kc before serializing invalidKC := correctKC.DeepCopy() invalidKC.KubeAPIQPS = -1 failValidateConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-fail-validate", invalidKC) - failValidateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), failValidateConfigMap) + failValidateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), failValidateConfigMap, metav1.CreateOptions{}) framework.ExpectNoError(err) correctSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{ @@ -307,7 +307,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam // we base the "lkg" configmap off of the configuration from before the test lkgKC := beforeKC.DeepCopy() lkgConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-intended-lkg", lkgKC) - lkgConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), lkgConfigMap) + lkgConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), lkgConfigMap, metav1.CreateOptions{}) framework.ExpectNoError(err) // bad config map, we insert some bogus stuff into the configMap @@ -317,7 +317,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam "kubelet": "{0xdeadbeef}", }, } - badConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), badConfigMap) + badConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), badConfigMap, metav1.CreateOptions{}) framework.ExpectNoError(err) lkgSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{ @@ -373,7 +373,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam lkgKC := beforeKC.DeepCopy() combinedConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-combined", lkgKC) combinedConfigMap.Data[badConfigKey] = "{0xdeadbeef}" - combinedConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), combinedConfigMap) + combinedConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), combinedConfigMap, metav1.CreateOptions{}) framework.ExpectNoError(err) lkgSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{ @@ -426,7 +426,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam // we base the "lkg" configmap off of the configuration from before the test lkgKC := beforeKC.DeepCopy() lkgConfigMap1 := newKubeletConfigMap("dynamic-kubelet-config-test-lkg-1", lkgKC) - lkgConfigMap1, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), lkgConfigMap1) + lkgConfigMap1, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), lkgConfigMap1, metav1.CreateOptions{}) framework.ExpectNoError(err) lkgSource1 := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{ @@ -439,7 +439,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam lkgStatus1.ConfigMap.ResourceVersion = lkgConfigMap1.ResourceVersion lkgConfigMap2 := newKubeletConfigMap("dynamic-kubelet-config-test-lkg-2", lkgKC) - lkgConfigMap2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), lkgConfigMap2) + lkgConfigMap2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), lkgConfigMap2, metav1.CreateOptions{}) framework.ExpectNoError(err) lkgSource2 := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{ @@ -491,14 +491,14 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam // we just create two configmaps with the same config but different names and toggle between them kc1 := beforeKC.DeepCopy() cm1 := newKubeletConfigMap("dynamic-kubelet-config-test-cm1", kc1) - cm1, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), cm1) + cm1, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), cm1, metav1.CreateOptions{}) framework.ExpectNoError(err) // slightly change the config kc2 := kc1.DeepCopy() kc2.EventRecordQPS = kc1.EventRecordQPS + 1 cm2 := newKubeletConfigMap("dynamic-kubelet-config-test-cm2", kc2) - cm2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), cm2) + cm2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), cm2, metav1.CreateOptions{}) framework.ExpectNoError(err) cm1Source := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{ @@ -547,7 +547,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam // we base the "correct" configmap off of the configuration from before the test correctKC := beforeKC.DeepCopy() correctConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-in-place", correctKC) - correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), correctConfigMap) + correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), correctConfigMap, metav1.CreateOptions{}) framework.ExpectNoError(err) // we reuse the same name, namespace @@ -627,7 +627,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam // we base the "lkg" configmap off of the configuration from before the test lkgKC := beforeKC.DeepCopy() lkgConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-in-place-lkg", lkgKC) - lkgConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), lkgConfigMap) + lkgConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), lkgConfigMap, metav1.CreateOptions{}) framework.ExpectNoError(err) // bad config map, we insert some bogus stuff into the configMap @@ -706,7 +706,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam // we base the "correct" configmap off of the configuration from before the test correctKC := beforeKC.DeepCopy() correctConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-delete-createe", correctKC) - correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), correctConfigMap) + correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), correctConfigMap, metav1.CreateOptions{}) framework.ExpectNoError(err) // we reuse the same name, namespace @@ -786,7 +786,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam // we base the "correct" configmap off of the configuration from before the test correctKC := beforeKC.DeepCopy() correctConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-delete-createe", correctKC) - correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), correctConfigMap) + correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), correctConfigMap, metav1.CreateOptions{}) framework.ExpectNoError(err) // ensure node config source is set to the config map we will mutate in-place, @@ -904,7 +904,7 @@ func updateConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error { // (with respect to concurrency control) when you omit ResourceVersion. // We know that we won't perform concurrent updates during this test. tc.configMap.ResourceVersion = "" - cm, err := f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Update(context.TODO(), tc.configMap) + cm, err := f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Update(context.TODO(), tc.configMap, metav1.UpdateOptions{}) if err != nil { return err } @@ -936,7 +936,7 @@ func deleteConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error { // to match the created configMap func createConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error { tc.configMap.ResourceVersion = "" - cm, err := f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Create(context.TODO(), tc.configMap) + cm, err := f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Create(context.TODO(), tc.configMap, metav1.CreateOptions{}) if err != nil { return err } diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index 3a29d8e4b8c..022379bea85 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -303,7 +303,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [ initialConfig.EvictionMinimumReclaim = map[string]string{} }) ginkgo.BeforeEach(func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) ginkgo.AfterEach(func() { @@ -360,7 +360,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser initialConfig.EvictionMinimumReclaim = map[string]string{} }) ginkgo.BeforeEach(func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) ginkgo.AfterEach(func() { @@ -413,7 +413,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis initialConfig.EvictionMinimumReclaim = map[string]string{} }) ginkgo.BeforeEach(func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) ginkgo.AfterEach(func() { diff --git a/test/e2e_node/gpu_device_plugin_test.go b/test/e2e_node/gpu_device_plugin_test.go index f5df2d2dc3d..d7dc0b072c4 100644 --- a/test/e2e_node/gpu_device_plugin_test.go +++ b/test/e2e_node/gpu_device_plugin_test.go @@ -77,7 +77,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi } ginkgo.By("Creating the Google Device Plugin pod for NVIDIA GPU in GKE") - devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), NVIDIADevicePlugin()) + devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), NVIDIADevicePlugin(), metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting for GPUs to become available on the local node") diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index 809c304805f..2c69fb8f117 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -166,7 +166,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete _, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: configName}, Data: map[string]string{path.Base(configFile): config}, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Create the node problem detector") hostPathType := new(v1.HostPathType) diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index 73721bb7bad..f02ad16201b 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -247,7 +247,7 @@ func setNodeConfigSource(f *framework.Framework, source *v1.NodeConfigSource) er node.Spec.ConfigSource = source // update to the new source - _, err = nodeclient.Update(context.TODO(), node) + _, err = nodeclient.Update(context.TODO(), node, metav1.UpdateOptions{}) if err != nil { return err } @@ -316,7 +316,7 @@ func decodeConfigz(resp *http.Response) (*kubeletconfig.KubeletConfiguration, er // creates a configmap containing kubeCfg in kube-system namespace func createConfigMap(f *framework.Framework, internalKC *kubeletconfig.KubeletConfiguration) (*v1.ConfigMap, error) { cmap := newKubeletConfigMap("testcfg", internalKC) - cmap, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), cmap) + cmap, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), cmap, metav1.CreateOptions{}) if err != nil { return nil, err } diff --git a/test/integration/apimachinery/watch_restart_test.go b/test/integration/apimachinery/watch_restart_test.go index c9eda16af41..8c9e1dd0952 100644 --- a/test/integration/apimachinery/watch_restart_test.go +++ b/test/integration/apimachinery/watch_restart_test.go @@ -119,7 +119,7 @@ func TestWatchRestartsIfTimeoutNotReached(t *testing.T) { counter = counter + 1 patch := fmt.Sprintf(`{"metadata": {"annotations": {"count": "%d"}}}`, counter) - _, err := c.CoreV1().Secrets(secret.Namespace).Patch(context.TODO(), secret.Name, types.StrategicMergePatchType, []byte(patch)) + _, err := c.CoreV1().Secrets(secret.Namespace).Patch(context.TODO(), secret.Name, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) if err != nil { t.Fatalf("Failed to patch secret: %v", err) } @@ -212,7 +212,7 @@ func TestWatchRestartsIfTimeoutNotReached(t *testing.T) { t.Fatalf("Failed to create clientset: %v", err) } - secret, err := c.CoreV1().Secrets(tc.secret.Namespace).Create(context.TODO(), tc.secret) + secret, err := c.CoreV1().Secrets(tc.secret.Namespace).Create(context.TODO(), tc.secret, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create testing secret %s/%s: %v", tc.secret.Namespace, tc.secret.Name, err) } diff --git a/test/integration/apiserver/admissionwebhook/admission_test.go b/test/integration/apiserver/admissionwebhook/admission_test.go index d1f450aaaff..a4afe1e4ef3 100644 --- a/test/integration/apiserver/admissionwebhook/admission_test.go +++ b/test/integration/apiserver/admissionwebhook/admission_test.go @@ -459,7 +459,7 @@ func testWebhookAdmission(t *testing.T, watchCache bool) { // create CRDs etcd.CreateTestCRDs(t, apiextensionsclientset.NewForConfigOrDie(server.ClientConfig), false, etcd.GetCustomResourceDefinitionData()...) - if _, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil { + if _, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}, metav1.CreateOptions{}); err != nil { t.Fatal(err) } @@ -1478,7 +1478,7 @@ func createV1beta1ValidationWebhook(client clientset.Interface, endpoint, conver AdmissionReviewVersions: []string{"v1beta1"}, }, }, - }) + }, metav1.CreateOptions{}) return err } @@ -1514,7 +1514,7 @@ func createV1beta1MutationWebhook(client clientset.Interface, endpoint, converte AdmissionReviewVersions: []string{"v1beta1"}, }, }, - }) + }, metav1.CreateOptions{}) return err } @@ -1553,7 +1553,7 @@ func createV1ValidationWebhook(client clientset.Interface, endpoint, convertedEn SideEffects: &none, }, }, - }) + }, metav1.CreateOptions{}) return err } @@ -1592,7 +1592,7 @@ func createV1MutationWebhook(client clientset.Interface, endpoint, convertedEndp SideEffects: &none, }, }, - }) + }, metav1.CreateOptions{}) return err } diff --git a/test/integration/apiserver/admissionwebhook/broken_webhook_test.go b/test/integration/apiserver/admissionwebhook/broken_webhook_test.go index 5b10b5efa11..43b83f39cb2 100644 --- a/test/integration/apiserver/admissionwebhook/broken_webhook_test.go +++ b/test/integration/apiserver/admissionwebhook/broken_webhook_test.go @@ -54,13 +54,13 @@ func TestBrokenWebhook(t *testing.T) { } t.Logf("Creating Deployment to ensure apiserver is functional") - _, err = client.AppsV1().Deployments("default").Create(context.TODO(), exampleDeployment(generateDeploymentName(0))) + _, err = client.AppsV1().Deployments("default").Create(context.TODO(), exampleDeployment(generateDeploymentName(0)), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create deployment: %v", err) } t.Logf("Creating Broken Webhook that will block all operations on all objects") - _, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(context.TODO(), brokenWebhookConfig(brokenWebhookName)) + _, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(context.TODO(), brokenWebhookConfig(brokenWebhookName), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to register broken webhook: %v", err) } @@ -72,7 +72,7 @@ func TestBrokenWebhook(t *testing.T) { // test whether the webhook blocks requests t.Logf("Attempt to create Deployment which should fail due to the webhook") - _, err = client.AppsV1().Deployments("default").Create(context.TODO(), exampleDeployment(generateDeploymentName(1))) + _, err = client.AppsV1().Deployments("default").Create(context.TODO(), exampleDeployment(generateDeploymentName(1)), metav1.CreateOptions{}) if err == nil { t.Fatalf("Expected the broken webhook to cause creating a deployment to fail, but it succeeded.") } @@ -90,7 +90,7 @@ func TestBrokenWebhook(t *testing.T) { // test whether the webhook still blocks requests after restarting t.Logf("Attempt again to create Deployment which should fail due to the webhook") - _, err = client.AppsV1().Deployments("default").Create(context.TODO(), exampleDeployment(generateDeploymentName(2))) + _, err = client.AppsV1().Deployments("default").Create(context.TODO(), exampleDeployment(generateDeploymentName(2)), metav1.CreateOptions{}) if err == nil { t.Fatalf("Expected the broken webhook to cause creating a deployment to fail, but it succeeded.") } @@ -106,7 +106,7 @@ func TestBrokenWebhook(t *testing.T) { // test if the deleted webhook no longer blocks requests t.Logf("Creating Deployment to ensure webhook is deleted") - _, err = client.AppsV1().Deployments("default").Create(context.TODO(), exampleDeployment(generateDeploymentName(3))) + _, err = client.AppsV1().Deployments("default").Create(context.TODO(), exampleDeployment(generateDeploymentName(3)), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create deployment: %v", err) } diff --git a/test/integration/apiserver/admissionwebhook/client_auth_test.go b/test/integration/apiserver/admissionwebhook/client_auth_test.go index e3be06ece3e..030e6d63f28 100644 --- a/test/integration/apiserver/admissionwebhook/client_auth_test.go +++ b/test/integration/apiserver/admissionwebhook/client_auth_test.go @@ -153,14 +153,14 @@ plugins: t.Fatalf("unexpected error: %v", err) } - _, err = client.CoreV1().Pods("default").Create(context.TODO(), clientAuthMarkerFixture) + _, err = client.CoreV1().Pods("default").Create(context.TODO(), clientAuthMarkerFixture, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } upCh := recorder.Reset() ns := "load-balance" - _, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -181,7 +181,7 @@ plugins: FailurePolicy: &fail, AdmissionReviewVersions: []string{"v1beta1"}, }}, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -194,7 +194,7 @@ plugins: // wait until new webhook is called if err := wait.PollImmediate(time.Millisecond*5, wait.ForeverTestTimeout, func() (bool, error) { - _, err = client.CoreV1().Pods("default").Patch(context.TODO(), clientAuthMarkerFixture.Name, types.JSONPatchType, []byte("[]")) + _, err = client.CoreV1().Pods("default").Patch(context.TODO(), clientAuthMarkerFixture.Name, types.JSONPatchType, []byte("[]"), metav1.PatchOptions{}) if t.Failed() { return true, nil } diff --git a/test/integration/apiserver/admissionwebhook/load_balance_test.go b/test/integration/apiserver/admissionwebhook/load_balance_test.go index ca9d6fe8158..96d68131584 100644 --- a/test/integration/apiserver/admissionwebhook/load_balance_test.go +++ b/test/integration/apiserver/admissionwebhook/load_balance_test.go @@ -102,14 +102,14 @@ func TestWebhookLoadBalance(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - _, err = client.CoreV1().Pods("default").Create(context.TODO(), loadBalanceMarkerFixture) + _, err = client.CoreV1().Pods("default").Create(context.TODO(), loadBalanceMarkerFixture, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } upCh := recorder.Reset() ns := "load-balance" - _, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -130,7 +130,7 @@ func TestWebhookLoadBalance(t *testing.T) { FailurePolicy: &fail, AdmissionReviewVersions: []string{"v1beta1"}, }}, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -143,7 +143,7 @@ func TestWebhookLoadBalance(t *testing.T) { // wait until new webhook is called the first time if err := wait.PollImmediate(time.Millisecond*5, wait.ForeverTestTimeout, func() (bool, error) { - _, err = client.CoreV1().Pods("default").Patch(context.TODO(), loadBalanceMarkerFixture.Name, types.JSONPatchType, []byte("[]")) + _, err = client.CoreV1().Pods("default").Patch(context.TODO(), loadBalanceMarkerFixture.Name, types.JSONPatchType, []byte("[]"), metav1.PatchOptions{}) select { case <-upCh: return true, nil @@ -176,7 +176,7 @@ func TestWebhookLoadBalance(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - _, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod()) + _, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod(), metav1.CreateOptions{}) if err != nil { t.Error(err) } @@ -195,7 +195,7 @@ func TestWebhookLoadBalance(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - _, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod()) + _, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod(), metav1.CreateOptions{}) if err != nil { t.Error(err) } diff --git a/test/integration/apiserver/admissionwebhook/reinvocation_test.go b/test/integration/apiserver/admissionwebhook/reinvocation_test.go index ce325fe242f..fa53c5cf0f6 100644 --- a/test/integration/apiserver/admissionwebhook/reinvocation_test.go +++ b/test/integration/apiserver/admissionwebhook/reinvocation_test.go @@ -308,7 +308,7 @@ func testWebhookReinvocationPolicy(t *testing.T, watchCache bool) { } for priorityClass, priority := range map[string]int{"low-priority": 1, "high-priority": 10} { - _, err = client.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityClass}, Value: int32(priority)}) + _, err = client.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityClass}, Value: int32(priority)}, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -320,7 +320,7 @@ func testWebhookReinvocationPolicy(t *testing.T, watchCache bool) { testCaseID := strconv.Itoa(i) ns := "reinvoke-" + testCaseID nsLabels := map[string]string{"test-case": testCaseID} - _, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns, Labels: nsLabels}}) + _, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns, Labels: nsLabels}}, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -328,13 +328,13 @@ func testWebhookReinvocationPolicy(t *testing.T, watchCache bool) { // Write markers to a separate namespace to avoid cross-talk markerNs := ns + "-markers" markerNsLabels := map[string]string{"test-markers": testCaseID} - _, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: markerNs, Labels: markerNsLabels}}) + _, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: markerNs, Labels: markerNsLabels}}, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } // Create a maker object to use to check for the webhook configurations to be ready. - marker, err := client.CoreV1().Pods(markerNs).Create(context.TODO(), newReinvocationMarkerFixture(markerNs)) + marker, err := client.CoreV1().Pods(markerNs).Create(context.TODO(), newReinvocationMarkerFixture(markerNs), metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -381,7 +381,7 @@ func testWebhookReinvocationPolicy(t *testing.T, watchCache bool) { cfg, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(context.TODO(), &admissionv1beta1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("admission.integration.test-%d", i)}, Webhooks: webhooks, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -394,7 +394,7 @@ func testWebhookReinvocationPolicy(t *testing.T, watchCache bool) { // wait until new webhook is called the first time if err := wait.PollImmediate(time.Millisecond*5, wait.ForeverTestTimeout, func() (bool, error) { - _, err = client.CoreV1().Pods(markerNs).Patch(context.TODO(), marker.Name, types.JSONPatchType, []byte("[]")) + _, err = client.CoreV1().Pods(markerNs).Patch(context.TODO(), marker.Name, types.JSONPatchType, []byte("[]"), metav1.PatchOptions{}) select { case <-upCh: return true, nil @@ -422,7 +422,7 @@ func testWebhookReinvocationPolicy(t *testing.T, watchCache bool) { if tt.initialPriorityClass != "" { pod.Spec.PriorityClassName = tt.initialPriorityClass } - obj, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod) + obj, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) if tt.expectError { if err == nil { diff --git a/test/integration/apiserver/admissionwebhook/timeout_test.go b/test/integration/apiserver/admissionwebhook/timeout_test.go index 62912ef7c9c..255bded03a3 100644 --- a/test/integration/apiserver/admissionwebhook/timeout_test.go +++ b/test/integration/apiserver/admissionwebhook/timeout_test.go @@ -175,7 +175,7 @@ func testWebhookTimeout(t *testing.T, watchCache bool) { t.Fatalf("unexpected error: %v", err) } - _, err = client.CoreV1().Pods("default").Create(context.TODO(), timeoutMarkerFixture) + _, err = client.CoreV1().Pods("default").Create(context.TODO(), timeoutMarkerFixture, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -184,7 +184,7 @@ func testWebhookTimeout(t *testing.T, watchCache bool) { t.Run(tt.name, func(t *testing.T) { upCh := recorder.Reset() ns := fmt.Sprintf("reinvoke-%d", i) - _, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -212,7 +212,7 @@ func testWebhookTimeout(t *testing.T, watchCache bool) { mutatingCfg, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(context.TODO(), &admissionv1beta1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("admission.integration.test-%d", i)}, Webhooks: mutatingWebhooks, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -246,7 +246,7 @@ func testWebhookTimeout(t *testing.T, watchCache bool) { validatingCfg, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(context.TODO(), &admissionv1beta1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("admission.integration.test-%d", i)}, Webhooks: validatingWebhooks, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -259,7 +259,7 @@ func testWebhookTimeout(t *testing.T, watchCache bool) { // wait until new webhook is called the first time if err := wait.PollImmediate(time.Millisecond*5, wait.ForeverTestTimeout, func() (bool, error) { - _, err = client.CoreV1().Pods("default").Patch(context.TODO(), timeoutMarkerFixture.Name, types.JSONPatchType, []byte("[]")) + _, err = client.CoreV1().Pods("default").Patch(context.TODO(), timeoutMarkerFixture.Name, types.JSONPatchType, []byte("[]"), metav1.PatchOptions{}) select { case <-upCh: return true, nil diff --git a/test/integration/apiserver/apiserver_test.go b/test/integration/apiserver/apiserver_test.go index 737f2a3b8cb..41e1434052c 100644 --- a/test/integration/apiserver/apiserver_test.go +++ b/test/integration/apiserver/apiserver_test.go @@ -227,7 +227,7 @@ func Test202StatusCode(t *testing.T) { // 1. Create the resource without any finalizer and then delete it without setting DeleteOptions. // Verify that server returns 200 in this case. - rs, err := rsClient.Create(context.TODO(), newRS(ns.Name)) + rs, err := rsClient.Create(context.TODO(), newRS(ns.Name), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create rs: %v", err) } @@ -237,7 +237,7 @@ func Test202StatusCode(t *testing.T) { // Verify that the apiserver still returns 200 since DeleteOptions.OrphanDependents is not set. rs = newRS(ns.Name) rs.ObjectMeta.Finalizers = []string{"kube.io/dummy-finalizer"} - rs, err = rsClient.Create(context.TODO(), rs) + rs, err = rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create rs: %v", err) } @@ -246,7 +246,7 @@ func Test202StatusCode(t *testing.T) { // 3. Create the resource and then delete it with DeleteOptions.OrphanDependents=false. // Verify that the server still returns 200 since the resource is immediately deleted. rs = newRS(ns.Name) - rs, err = rsClient.Create(context.TODO(), rs) + rs, err = rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create rs: %v", err) } @@ -256,7 +256,7 @@ func Test202StatusCode(t *testing.T) { // Verify that the server returns 202 in this case. rs = newRS(ns.Name) rs.ObjectMeta.Finalizers = []string{"kube.io/dummy-finalizer"} - rs, err = rsClient.Create(context.TODO(), rs) + rs, err = rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create rs: %v", err) } @@ -293,7 +293,7 @@ func TestListResourceVersion0(t *testing.T) { for i := 0; i < 10; i++ { rs := newRS(ns.Name) rs.Name = fmt.Sprintf("test-%d", i) - if _, err := rsClient.Create(context.TODO(), rs); err != nil { + if _, err := rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -332,7 +332,7 @@ func TestAPIListChunking(t *testing.T) { for i := 0; i < 4; i++ { rs := newRS(ns.Name) rs.Name = fmt.Sprintf("test-%d", i) - if _, err := rsClient.Create(context.TODO(), rs); err != nil { + if _, err := rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -353,7 +353,7 @@ func TestAPIListChunking(t *testing.T) { if calls == 2 { rs := newRS(ns.Name) rs.Name = "test-5" - if _, err := rsClient.Create(context.TODO(), rs); err != nil { + if _, err := rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -407,11 +407,11 @@ func TestNameInFieldSelector(t *testing.T) { ns := framework.CreateTestingNamespace(fmt.Sprintf("ns%d", i), s, t) defer framework.DeleteTestingNamespace(ns, s, t) - _, err := clientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), makeSecret("foo")) + _, err := clientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), makeSecret("foo"), metav1.CreateOptions{}) if err != nil { t.Errorf("Couldn't create secret: %v", err) } - _, err = clientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), makeSecret("bar")) + _, err = clientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), makeSecret("bar"), metav1.CreateOptions{}) if err != nil { t.Errorf("Couldn't create secret: %v", err) } @@ -534,7 +534,7 @@ func TestMetadataClient(t *testing.T) { name: "list, get, patch, and delete via metadata client", want: func(t *testing.T) { ns := "metadata-builtin" - svc, err := clientset.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-1", Annotations: map[string]string{"foo": "bar"}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + svc, err := clientset.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-1", Annotations: map[string]string{"foo": "bar"}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create service: %v", err) } @@ -674,11 +674,11 @@ func TestMetadataClient(t *testing.T) { name: "watch via metadata client", want: func(t *testing.T) { ns := "metadata-watch" - svc, err := clientset.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-2", Annotations: map[string]string{"foo": "bar"}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + svc, err := clientset.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-2", Annotations: map[string]string{"foo": "bar"}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create service: %v", err) } - if _, err := clientset.CoreV1().Services(ns).Patch(context.TODO(), "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().Services(ns).Patch(context.TODO(), "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } @@ -1136,11 +1136,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify columns on services", accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - svc, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-1"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + svc, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-1"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create service: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), svc.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), svc.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update service: %v", err) } return svc, "", "services" @@ -1154,11 +1154,11 @@ func TestTransform(t *testing.T) { accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", includeObject: metav1.IncludeNone, object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-2"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-2"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "services" @@ -1172,11 +1172,11 @@ func TestTransform(t *testing.T) { accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", includeObject: metav1.IncludeObject, object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-3"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-3"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "services" @@ -1196,11 +1196,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify partial metadata object on config maps", accept: "application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-1", Annotations: map[string]string{"test": "0"}}}) + obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-1", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "configmaps" @@ -1213,11 +1213,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify partial metadata object on config maps in protobuf", accept: "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-2", Annotations: map[string]string{"test": "0"}}}) + obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-2", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "configmaps" @@ -1392,11 +1392,11 @@ func TestTransform(t *testing.T) { name: "v1 verify columns on services", accept: "application/json;as=Table;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - svc, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-5"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + svc, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-5"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create service: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), svc.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), svc.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update service: %v", err) } return svc, "", "services" @@ -1410,11 +1410,11 @@ func TestTransform(t *testing.T) { accept: "application/json;as=Table;g=meta.k8s.io;v=v1", includeObject: metav1.IncludeNone, object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-6"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-6"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "services" @@ -1428,11 +1428,11 @@ func TestTransform(t *testing.T) { accept: "application/json;as=Table;g=meta.k8s.io;v=v1", includeObject: metav1.IncludeObject, object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-7"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-7"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "services" @@ -1452,11 +1452,11 @@ func TestTransform(t *testing.T) { name: "v1 verify partial metadata object on config maps", accept: "application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-3", Annotations: map[string]string{"test": "0"}}}) + obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-3", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "configmaps" @@ -1469,11 +1469,11 @@ func TestTransform(t *testing.T) { name: "v1 verify partial metadata object on config maps in protobuf", accept: "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-4", Annotations: map[string]string{"test": "0"}}}) + obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-4", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "configmaps" diff --git a/test/integration/apiserver/max_json_patch_operations_test.go b/test/integration/apiserver/max_json_patch_operations_test.go index 1d701e7f012..005ad090213 100644 --- a/test/integration/apiserver/max_json_patch_operations_test.go +++ b/test/integration/apiserver/max_json_patch_operations_test.go @@ -51,7 +51,7 @@ func TestMaxJSONPatchOperations(t *testing.T) { Name: "test", }, } - _, err := clientSet.CoreV1().Secrets("default").Create(context.TODO(), secret) + _, err := clientSet.CoreV1().Secrets("default").Create(context.TODO(), secret, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } diff --git a/test/integration/apiserver/max_request_body_bytes_test.go b/test/integration/apiserver/max_request_body_bytes_test.go index 7e1c9cedffc..b236a01f90a 100644 --- a/test/integration/apiserver/max_request_body_bytes_test.go +++ b/test/integration/apiserver/max_request_body_bytes_test.go @@ -58,7 +58,7 @@ func TestMaxResourceSize(t *testing.T) { Name: "test", }, } - _, err := clientSet.CoreV1().Secrets("default").Create(context.TODO(), secret) + _, err := clientSet.CoreV1().Secrets("default").Create(context.TODO(), secret, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } diff --git a/test/integration/apiserver/patch_test.go b/test/integration/apiserver/patch_test.go index 85b334dc5f6..a12fcbb2823 100644 --- a/test/integration/apiserver/patch_test.go +++ b/test/integration/apiserver/patch_test.go @@ -64,7 +64,7 @@ func TestPatchConflicts(t *testing.T) { } // Create the object we're going to conflict on - clientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), secret) + clientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) client := clientSet.CoreV1().RESTClient() successes := int32(0) diff --git a/test/integration/apiserver/podlogs/podlogs_test.go b/test/integration/apiserver/podlogs/podlogs_test.go index 64bde0824fb..46a072ca337 100644 --- a/test/integration/apiserver/podlogs/podlogs_test.go +++ b/test/integration/apiserver/podlogs/podlogs_test.go @@ -84,7 +84,7 @@ func TestInsecurePodLogs(t *testing.T) { node, err := clientSet.CoreV1().Nodes().Create(context.TODO(), &corev1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "fake"}, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -101,21 +101,21 @@ func TestInsecurePodLogs(t *testing.T) { }, }, } - node, err = clientSet.CoreV1().Nodes().UpdateStatus(context.TODO(), node) + node, err = clientSet.CoreV1().Nodes().UpdateStatus(context.TODO(), node, metav1.UpdateOptions{}) if err != nil { t.Fatal(err) } _, err = clientSet.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: "ns"}, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } _, err = clientSet.CoreV1().ServiceAccounts("ns").Create(context.TODO(), &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "ns"}, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -133,7 +133,7 @@ func TestInsecurePodLogs(t *testing.T) { NodeName: node.Name, AutomountServiceAccountToken: &falseRef, }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } diff --git a/test/integration/auth/accessreview_test.go b/test/integration/auth/accessreview_test.go index 2be729a5f6e..e5b8db9b571 100644 --- a/test/integration/auth/accessreview_test.go +++ b/test/integration/auth/accessreview_test.go @@ -123,7 +123,7 @@ func TestSubjectAccessReview(t *testing.T) { } for _, test := range tests { - response, err := clientset.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), test.sar) + response, err := clientset.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), test.sar, metav1.CreateOptions{}) switch { case err == nil && len(test.expectedError) == 0: @@ -207,7 +207,7 @@ func TestSelfSubjectAccessReview(t *testing.T) { for _, test := range tests { username = test.username - response, err := clientset.AuthorizationV1().SelfSubjectAccessReviews().Create(context.TODO(), test.sar) + response, err := clientset.AuthorizationV1().SelfSubjectAccessReviews().Create(context.TODO(), test.sar, metav1.CreateOptions{}) switch { case err == nil && len(test.expectedError) == 0: @@ -325,7 +325,7 @@ func TestLocalSubjectAccessReview(t *testing.T) { } for _, test := range tests { - response, err := clientset.AuthorizationV1().LocalSubjectAccessReviews(test.namespace).Create(context.TODO(), test.sar) + response, err := clientset.AuthorizationV1().LocalSubjectAccessReviews(test.namespace).Create(context.TODO(), test.sar, metav1.CreateOptions{}) switch { case err == nil && len(test.expectedError) == 0: diff --git a/test/integration/auth/node_test.go b/test/integration/auth/node_test.go index 50e7deb8d08..2d15af82d0b 100644 --- a/test/integration/auth/node_test.go +++ b/test/integration/auth/node_test.go @@ -96,20 +96,20 @@ func TestNodeAuthorizer(t *testing.T) { } // Create objects - if _, err := superuserClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns"}}); err != nil { + if _, err := superuserClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns"}}, metav1.CreateOptions{}); err != nil { t.Fatal(err) } - if _, err := superuserClient.CoreV1().Secrets("ns").Create(context.TODO(), &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mysecret"}}); err != nil { + if _, err := superuserClient.CoreV1().Secrets("ns").Create(context.TODO(), &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mysecret"}}, metav1.CreateOptions{}); err != nil { t.Fatal(err) } - if _, err := superuserClient.CoreV1().Secrets("ns").Create(context.TODO(), &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mypvsecret"}}); err != nil { + if _, err := superuserClient.CoreV1().Secrets("ns").Create(context.TODO(), &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mypvsecret"}}, metav1.CreateOptions{}); err != nil { t.Fatal(err) } - if _, err := superuserClient.CoreV1().ConfigMaps("ns").Create(context.TODO(), &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmap"}}); err != nil { + if _, err := superuserClient.CoreV1().ConfigMaps("ns").Create(context.TODO(), &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmap"}}, metav1.CreateOptions{}); err != nil { t.Fatal(err) } - if _, err := superuserClient.CoreV1().ConfigMaps("ns").Create(context.TODO(), &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmapconfigsource"}}); err != nil { + if _, err := superuserClient.CoreV1().ConfigMaps("ns").Create(context.TODO(), &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmapconfigsource"}}, metav1.CreateOptions{}); err != nil { t.Fatal(err) } pvName := "mypv" @@ -120,7 +120,7 @@ func TestNodeAuthorizer(t *testing.T) { Source: storagev1.VolumeAttachmentSource{PersistentVolumeName: &pvName}, NodeName: "node2", }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatal(err) } if _, err := superuserClient.CoreV1().PersistentVolumeClaims("ns").Create(context.TODO(), &corev1.PersistentVolumeClaim{ @@ -129,7 +129,7 @@ func TestNodeAuthorizer(t *testing.T) { AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany}, Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceStorage: resource.MustParse("1")}}, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatal(err) } @@ -141,7 +141,7 @@ func TestNodeAuthorizer(t *testing.T) { ClaimRef: &corev1.ObjectReference{Namespace: "ns", Name: "mypvc"}, PersistentVolumeSource: corev1.PersistentVolumeSource{AzureFile: &corev1.AzureFilePersistentVolumeSource{ShareName: "default", SecretName: "mypvsecret"}}, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatal(err) } @@ -201,7 +201,7 @@ func TestNodeAuthorizer(t *testing.T) { {Name: "pvc", VolumeSource: corev1.VolumeSource{PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc"}}}, }, }, - }) + }, metav1.CreateOptions{}) return err } } @@ -211,7 +211,7 @@ func TestNodeAuthorizer(t *testing.T) { _, err := client.CoreV1().Pods("ns").UpdateStatus(context.TODO(), &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"}, Status: corev1.PodStatus{StartTime: &startTime}, - }) + }, metav1.UpdateOptions{}) return err } } @@ -233,7 +233,7 @@ func TestNodeAuthorizer(t *testing.T) { NodeName: "node2", Containers: []corev1.Container{{Name: "image", Image: "busybox"}}, }, - }) + }, metav1.CreateOptions{}) return err } } @@ -246,7 +246,7 @@ func TestNodeAuthorizer(t *testing.T) { createNode2 := func(client clientset.Interface) func() error { return func() error { - _, err := client.CoreV1().Nodes().Create(context.TODO(), &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}}) + _, err := client.CoreV1().Nodes().Create(context.TODO(), &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}}, metav1.CreateOptions{}) return err } } @@ -263,7 +263,7 @@ func TestNodeAuthorizer(t *testing.T) { KubeletConfigKey: "kubelet", }, } - _, err = client.CoreV1().Nodes().Update(context.TODO(), node2) + _, err = client.CoreV1().Nodes().Update(context.TODO(), node2, metav1.UpdateOptions{}) return err } } @@ -274,7 +274,7 @@ func TestNodeAuthorizer(t *testing.T) { return err } node2.Spec.ConfigSource = nil - _, err = client.CoreV1().Nodes().Update(context.TODO(), node2) + _, err = client.CoreV1().Nodes().Update(context.TODO(), node2, metav1.UpdateOptions{}) return err } } @@ -283,7 +283,7 @@ func TestNodeAuthorizer(t *testing.T) { _, err := client.CoreV1().Nodes().UpdateStatus(context.TODO(), &corev1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "node2"}, Status: corev1.NodeStatus{}, - }) + }, metav1.UpdateOptions{}) return err } } @@ -331,7 +331,7 @@ func TestNodeAuthorizer(t *testing.T) { capacity++ statusString := fmt.Sprintf("{\"status\": {\"capacity\": {\"storage\": \"%dG\"}}}", capacity) patchBytes := []byte(statusString) - _, err := client.CoreV1().PersistentVolumeClaims("ns").Patch(context.TODO(), "mypvc", types.StrategicMergePatchType, patchBytes, "status") + _, err := client.CoreV1().PersistentVolumeClaims("ns").Patch(context.TODO(), "mypvc", types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") return err } } @@ -339,7 +339,7 @@ func TestNodeAuthorizer(t *testing.T) { updatePVCPhase := func(client clientset.Interface) func() error { return func() error { patchBytes := []byte(`{"status":{"phase": "Bound"}}`) - _, err := client.CoreV1().PersistentVolumeClaims("ns").Patch(context.TODO(), "mypvc", types.StrategicMergePatchType, patchBytes, "status") + _, err := client.CoreV1().PersistentVolumeClaims("ns").Patch(context.TODO(), "mypvc", types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") return err } } @@ -363,7 +363,7 @@ func TestNodeAuthorizer(t *testing.T) { RenewTime: &metav1.MicroTime{Time: time.Now()}, }, } - _, err := client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Create(context.TODO(), lease) + _, err := client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Create(context.TODO(), lease, metav1.CreateOptions{}) return err } } @@ -374,7 +374,7 @@ func TestNodeAuthorizer(t *testing.T) { return err } lease.Spec.RenewTime = &metav1.MicroTime{Time: time.Now()} - _, err = client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Update(context.TODO(), lease) + _, err = client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Update(context.TODO(), lease, metav1.UpdateOptions{}) return err } } @@ -382,7 +382,7 @@ func TestNodeAuthorizer(t *testing.T) { return func() error { node1LeaseDurationSeconds++ bs := []byte(fmt.Sprintf(`{"spec": {"leaseDurationSeconds": %d}}`, node1LeaseDurationSeconds)) - _, err := client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Patch(context.TODO(), "node1", types.StrategicMergePatchType, bs) + _, err := client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Patch(context.TODO(), "node1", types.StrategicMergePatchType, bs, metav1.PatchOptions{}) return err } } @@ -414,7 +414,7 @@ func TestNodeAuthorizer(t *testing.T) { }, }, } - _, err := client.StorageV1().CSINodes().Create(context.TODO(), nodeInfo) + _, err := client.StorageV1().CSINodes().Create(context.TODO(), nodeInfo, metav1.CreateOptions{}) return err } } @@ -431,7 +431,7 @@ func TestNodeAuthorizer(t *testing.T) { TopologyKeys: []string{"com.example.csi/rack"}, }, } - _, err = client.StorageV1().CSINodes().Update(context.TODO(), nodeInfo) + _, err = client.StorageV1().CSINodes().Update(context.TODO(), nodeInfo, metav1.UpdateOptions{}) return err } } @@ -439,7 +439,7 @@ func TestNodeAuthorizer(t *testing.T) { return func() error { bs := []byte(fmt.Sprintf(`{"csiDrivers": [ { "driver": "net.example.storage.driver2", "nodeID": "net.example.storage/node1", "topologyKeys": [ "net.example.storage/region" ] } ] }`)) // StrategicMergePatch is unsupported by CRs. Falling back to MergePatch - _, err := client.StorageV1().CSINodes().Patch(context.TODO(), "node1", types.MergePatchType, bs) + _, err := client.StorageV1().CSINodes().Patch(context.TODO(), "node1", types.MergePatchType, bs, metav1.PatchOptions{}) return err } } diff --git a/test/integration/auth/rbac_test.go b/test/integration/auth/rbac_test.go index c443ce043cf..e39bcb6e32a 100644 --- a/test/integration/auth/rbac_test.go +++ b/test/integration/auth/rbac_test.go @@ -127,25 +127,25 @@ type bootstrapRoles struct { // client should be authenticated as the RBAC super user. func (b bootstrapRoles) bootstrap(client clientset.Interface) error { for _, r := range b.clusterRoles { - _, err := client.RbacV1().ClusterRoles().Create(context.TODO(), &r) + _, err := client.RbacV1().ClusterRoles().Create(context.TODO(), &r, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("failed to make request: %v", err) } } for _, r := range b.roles { - _, err := client.RbacV1().Roles(r.Namespace).Create(context.TODO(), &r) + _, err := client.RbacV1().Roles(r.Namespace).Create(context.TODO(), &r, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("failed to make request: %v", err) } } for _, r := range b.clusterRoleBindings { - _, err := client.RbacV1().ClusterRoleBindings().Create(context.TODO(), &r) + _, err := client.RbacV1().ClusterRoleBindings().Create(context.TODO(), &r, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("failed to make request: %v", err) } } for _, r := range b.roleBindings { - _, err := client.RbacV1().RoleBindings(r.Namespace).Create(context.TODO(), &r) + _, err := client.RbacV1().RoleBindings(r.Namespace).Create(context.TODO(), &r, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("failed to make request: %v", err) } @@ -728,7 +728,7 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) { APIGroup: "rbac.authorization.k8s.io", }, } - if discRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(context.TODO(), discRoleBinding); err != nil { + if discRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(context.TODO(), discRoleBinding, metav1.UpdateOptions{}); err != nil { t.Fatalf("Failed to update `system:discovery` ClusterRoleBinding: %v", err) } t.Logf("Modifying default `system:basic-user` ClusterRoleBinding") @@ -738,7 +738,7 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) { } basicUserRoleBinding.Annotations["rbac.authorization.kubernetes.io/autoupdate"] = "false" basicUserRoleBinding.Annotations["rbac-discovery-upgrade-test"] = "pass" - if basicUserRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(context.TODO(), basicUserRoleBinding); err != nil { + if basicUserRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(context.TODO(), basicUserRoleBinding, metav1.UpdateOptions{}); err != nil { t.Fatalf("Failed to update `system:basic-user` ClusterRoleBinding: %v", err) } t.Logf("Deleting default `system:public-info-viewer` ClusterRoleBinding") diff --git a/test/integration/auth/svcaccttoken_test.go b/test/integration/auth/svcaccttoken_test.go index e090b1e152e..a50cedd1375 100644 --- a/test/integration/auth/svcaccttoken_test.go +++ b/test/integration/auth/svcaccttoken_test.go @@ -162,13 +162,13 @@ func TestServiceAccountTokenCreate(t *testing.T) { }, } - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp) } sa, delSvcAcct := createDeleteSvcAcct(t, cs, sa) defer delSvcAcct() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -200,13 +200,13 @@ func TestServiceAccountTokenCreate(t *testing.T) { }, } - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp) } sa, del := createDeleteSvcAcct(t, cs, sa) defer del() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token bound to nonexistant pod but got: %#v", resp) } pod, delPod := createDeletePod(t, cs, pod) @@ -214,17 +214,17 @@ func TestServiceAccountTokenCreate(t *testing.T) { // right uid treq.Spec.BoundObjectRef.UID = pod.UID - if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err != nil { + if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } // wrong uid treq.Spec.BoundObjectRef.UID = wrongUID - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token bound to pod with wrong uid but got: %#v", resp) } // no uid treq.Spec.BoundObjectRef.UID = noUID - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -263,13 +263,13 @@ func TestServiceAccountTokenCreate(t *testing.T) { }, } - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp) } sa, del := createDeleteSvcAcct(t, cs, sa) defer del() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token bound to nonexistant secret but got: %#v", resp) } secret, delSecret := createDeleteSecret(t, cs, secret) @@ -277,17 +277,17 @@ func TestServiceAccountTokenCreate(t *testing.T) { // right uid treq.Spec.BoundObjectRef.UID = secret.UID - if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err != nil { + if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } // wrong uid treq.Spec.BoundObjectRef.UID = wrongUID - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token bound to secret with wrong uid but got: %#v", resp) } // no uid treq.Spec.BoundObjectRef.UID = noUID - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -321,7 +321,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { _, del = createDeletePod(t, cs, otherpod) defer del() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err but got: %#v", resp) } }) @@ -336,7 +336,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { sa, del := createDeleteSvcAcct(t, cs, sa) defer del() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -375,7 +375,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { sa, del := createDeleteSvcAcct(t, cs, sa) defer del() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -391,7 +391,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { sa, del := createDeleteSvcAcct(t, cs, sa) defer del() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -419,7 +419,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer originalDelPod() treq.Spec.BoundObjectRef.UID = originalPod.UID - if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err != nil { + if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } @@ -460,7 +460,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer originalDelSecret() treq.Spec.BoundObjectRef.UID = originalSecret.UID - if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err != nil { + if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } @@ -503,7 +503,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer originalDelSecret() treq.Spec.BoundObjectRef.UID = originalSecret.UID - if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err != nil { + if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } @@ -547,7 +547,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer originalDelSecret() treq.Spec.BoundObjectRef.UID = originalSecret.UID - if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err != nil { + if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } @@ -576,7 +576,7 @@ func doTokenReview(t *testing.T, cs clientset.Interface, treq *authenticationv1. Spec: authenticationv1.TokenReviewSpec{ Token: treq.Status.Token, }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -643,7 +643,7 @@ func getPayload(t *testing.T, b string) string { func createDeleteSvcAcct(t *testing.T, cs clientset.Interface, sa *v1.ServiceAccount) (*v1.ServiceAccount, func()) { t.Helper() - sa, err := cs.CoreV1().ServiceAccounts(sa.Namespace).Create(context.TODO(), sa) + sa, err := cs.CoreV1().ServiceAccounts(sa.Namespace).Create(context.TODO(), sa, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -662,7 +662,7 @@ func createDeleteSvcAcct(t *testing.T, cs clientset.Interface, sa *v1.ServiceAcc func createDeletePod(t *testing.T, cs clientset.Interface, pod *v1.Pod) (*v1.Pod, func()) { t.Helper() - pod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) + pod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -681,7 +681,7 @@ func createDeletePod(t *testing.T, cs clientset.Interface, pod *v1.Pod) (*v1.Pod func createDeleteSecret(t *testing.T, cs clientset.Interface, sec *v1.Secret) (*v1.Secret, func()) { t.Helper() - sec, err := cs.CoreV1().Secrets(sec.Namespace).Create(context.TODO(), sec) + sec, err := cs.CoreV1().Secrets(sec.Namespace).Create(context.TODO(), sec, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } diff --git a/test/integration/client/client_test.go b/test/integration/client/client_test.go index d6b792effaa..987bae8396e 100644 --- a/test/integration/client/client_test.go +++ b/test/integration/client/client_test.go @@ -82,14 +82,14 @@ func TestClient(t *testing.T) { }, } - got, err := client.CoreV1().Pods("default").Create(context.TODO(), pod) + got, err := client.CoreV1().Pods("default").Create(context.TODO(), pod, metav1.CreateOptions{}) if err == nil { t.Fatalf("unexpected non-error: %v", got) } // get a created pod pod.Spec.Containers[0].Image = "an-image" - got, err = client.CoreV1().Pods("default").Create(context.TODO(), pod) + got, err = client.CoreV1().Pods("default").Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -151,7 +151,7 @@ func TestAtomicPut(t *testing.T) { }, } rcs := c.CoreV1().ReplicationControllers("default") - rc, err := rcs.Create(context.TODO(), &rcBody) + rc, err := rcs.Create(context.TODO(), &rcBody, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed creating atomicRC: %v", err) } @@ -180,7 +180,7 @@ func TestAtomicPut(t *testing.T) { tmpRC.Spec.Selector[l] = v tmpRC.Spec.Template.Labels[l] = v } - _, err = rcs.Update(context.TODO(), tmpRC) + _, err = rcs.Update(context.TODO(), tmpRC, metav1.UpdateOptions{}) if err != nil { if apierrors.IsConflict(err) { // This is what we expect. @@ -227,7 +227,7 @@ func TestPatch(t *testing.T) { }, } pods := c.CoreV1().Pods("default") - _, err := pods.Create(context.TODO(), &podBody) + _, err := pods.Create(context.TODO(), &podBody, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed creating patchpods: %v", err) } @@ -357,7 +357,7 @@ func TestPatchWithCreateOnUpdate(t *testing.T) { } // Create the endpoint (endpoints set AllowCreateOnUpdate=true) to get a UID and resource version - createdEndpoint, err := c.CoreV1().Endpoints("default").Update(context.TODO(), endpointTemplate) + createdEndpoint, err := c.CoreV1().Endpoints("default").Update(context.TODO(), endpointTemplate, metav1.UpdateOptions{}) if err != nil { t.Fatalf("Failed creating endpoint: %v", err) } @@ -476,7 +476,7 @@ func TestSingleWatch(t *testing.T) { rv1 := "" for i := 0; i < 10; i++ { event := mkEvent(i) - got, err := client.CoreV1().Events("default").Create(context.TODO(), event) + got, err := client.CoreV1().Events("default").Create(context.TODO(), event, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed creating event %#q: %v", event, err) } @@ -580,7 +580,7 @@ func TestMultiWatch(t *testing.T) { Image: imageutils.GetPauseImageName(), }}, }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatalf("Couldn't make %v: %v", name, err) @@ -639,7 +639,7 @@ func TestMultiWatch(t *testing.T) { if !ok { return } - if _, err := client.CoreV1().Events("default").Create(context.TODO(), dummyEvent(i)); err != nil { + if _, err := client.CoreV1().Events("default").Create(context.TODO(), dummyEvent(i), metav1.CreateOptions{}); err != nil { panic(fmt.Sprintf("couldn't make an event: %v", err)) } changeMade <- i @@ -686,7 +686,7 @@ func TestMultiWatch(t *testing.T) { Image: imageutils.GetPauseImageName(), }}, }, - }) + }, metav1.CreateOptions{}) if err != nil { panic(fmt.Sprintf("couldn't make unrelated pod: %v", err)) @@ -716,7 +716,7 @@ func TestMultiWatch(t *testing.T) { } pod.Spec.Containers[0].Image = imageutils.GetPauseImageName() sentTimes <- timePair{time.Now(), name} - if _, err := client.CoreV1().Pods("default").Update(context.TODO(), pod); err != nil { + if _, err := client.CoreV1().Pods("default").Update(context.TODO(), pod, metav1.UpdateOptions{}); err != nil { panic(fmt.Sprintf("Couldn't make %v: %v", name, err)) } }(i) @@ -754,7 +754,7 @@ func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace s }, }, } - pod, err := c.CoreV1().Pods(namespace).Create(context.TODO(), &podBody) + pod, err := c.CoreV1().Pods(namespace).Create(context.TODO(), &podBody, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed creating selflinktest pod: %v", err) } diff --git a/test/integration/client/dynamic_client_test.go b/test/integration/client/dynamic_client_test.go index 27076038873..9eb1047ef9b 100644 --- a/test/integration/client/dynamic_client_test.go +++ b/test/integration/client/dynamic_client_test.go @@ -65,7 +65,7 @@ func TestDynamicClient(t *testing.T) { }, } - actual, err := client.CoreV1().Pods("default").Create(context.TODO(), pod) + actual, err := client.CoreV1().Pods("default").Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("unexpected error when creating pod: %v", err) } @@ -150,7 +150,7 @@ func TestDynamicClientWatch(t *testing.T) { rv1 := "" for i := 0; i < 10; i++ { event := mkEvent(i) - got, err := client.CoreV1().Events("default").Create(context.TODO(), event) + got, err := client.CoreV1().Events("default").Create(context.TODO(), event, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed creating event %#q: %v", event, err) } diff --git a/test/integration/configmap/configmap_test.go b/test/integration/configmap/configmap_test.go index e0287112425..a52ded4027a 100644 --- a/test/integration/configmap/configmap_test.go +++ b/test/integration/configmap/configmap_test.go @@ -57,7 +57,7 @@ func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *v1.Namespace) }, } - if _, err := client.CoreV1().ConfigMaps(cfg.Namespace).Create(context.TODO(), &cfg); err != nil { + if _, err := client.CoreV1().ConfigMaps(cfg.Namespace).Create(context.TODO(), &cfg, metav1.CreateOptions{}); err != nil { t.Errorf("unable to create test configMap: %v", err) } defer deleteConfigMapOrErrorf(t, client, cfg.Namespace, cfg.Name) @@ -112,7 +112,7 @@ func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *v1.Namespace) } pod.ObjectMeta.Name = "uses-configmap" - if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create pod: %v", err) } defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name) diff --git a/test/integration/cronjob/cronjob_test.go b/test/integration/cronjob/cronjob_test.go index 9bc52315707..8c47ba1ac34 100644 --- a/test/integration/cronjob/cronjob_test.go +++ b/test/integration/cronjob/cronjob_test.go @@ -163,7 +163,7 @@ func TestCronJobLaunchesPodAndCleansUp(t *testing.T) { go cjc.Run(stopCh) go jc.Run(1, stopCh) - _, err := cjClient.Create(context.TODO(), newCronJob(cronJobName, ns.Name, "* * * * ?")) + _, err := cjClient.Create(context.TODO(), newCronJob(cronJobName, ns.Name, "* * * * ?"), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create CronJob: %v", err) } diff --git a/test/integration/daemonset/daemonset_test.go b/test/integration/daemonset/daemonset_test.go index b81a123d6eb..19899d7b1b8 100644 --- a/test/integration/daemonset/daemonset_test.go +++ b/test/integration/daemonset/daemonset_test.go @@ -159,7 +159,7 @@ func cleanupDaemonSets(t *testing.T, cs clientset.Interface, ds *apps.DaemonSet) // force update to avoid version conflict ds.ResourceVersion = "" - if ds, err = cs.AppsV1().DaemonSets(ds.Namespace).Update(context.TODO(), ds); err != nil { + if ds, err = cs.AppsV1().DaemonSets(ds.Namespace).Update(context.TODO(), ds, metav1.UpdateOptions{}); err != nil { t.Errorf("Failed to update DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err) return } @@ -248,7 +248,7 @@ func newNode(name string, label map[string]string) *v1.Node { func addNodes(nodeClient corev1client.NodeInterface, startIndex, numNodes int, label map[string]string, t *testing.T) { for i := startIndex; i < startIndex+numNodes; i++ { - _, err := nodeClient.Create(context.TODO(), newNode(fmt.Sprintf("node-%d", i), label)) + _, err := nodeClient.Create(context.TODO(), newNode(fmt.Sprintf("node-%d", i), label), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -288,7 +288,7 @@ func validateDaemonSetPodsAndMarkReady( Phase: v1.PodRunning, Conditions: []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}, } - _, err := podClient.UpdateStatus(context.TODO(), podCopy) + _, err := podClient.UpdateStatus(context.TODO(), podCopy, metav1.UpdateOptions{}) if err != nil { return false, err } @@ -409,7 +409,7 @@ func updateDS(t *testing.T, dsClient appstyped.DaemonSetInterface, dsName string return err } updateFunc(newDS) - ds, err = dsClient.Update(context.TODO(), newDS) + ds, err = dsClient.Update(context.TODO(), newDS, metav1.UpdateOptions{}) return err }); err != nil { t.Fatalf("Failed to update DaemonSet: %v", err) @@ -447,13 +447,13 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - _, err := dsClient.Create(context.TODO(), ds) + _, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } defer cleanupDaemonSets(t, clientset, ds) - _, err = nodeClient.Create(context.TODO(), newNode("single-node", nil)) + _, err = nodeClient.Create(context.TODO(), newNode("single-node", nil), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -486,7 +486,7 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - _, err := dsClient.Create(context.TODO(), ds) + _, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -550,7 +550,7 @@ func TestDaemonSetWithNodeSelectorLaunchesPods(t *testing.T) { }, } - _, err := dsClient.Create(context.TODO(), ds) + _, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -591,7 +591,7 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - _, err := dsClient.Create(context.TODO(), ds) + _, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -602,7 +602,7 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) { node.Status.Conditions = []v1.NodeCondition{ {Type: v1.NodeReady, Status: v1.ConditionFalse}, } - _, err = nodeClient.Create(context.TODO(), node) + _, err = nodeClient.Create(context.TODO(), node, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -639,7 +639,7 @@ func TestInsufficientCapacityNode(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.Template.Spec = resourcePodSpec("", "120M", "75m") ds.Spec.UpdateStrategy = *strategy - ds, err := dsClient.Create(context.TODO(), ds) + ds, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -648,7 +648,7 @@ func TestInsufficientCapacityNode(t *testing.T) { node := newNode("node-with-limited-memory", nil) node.Status.Allocatable = allocatableResources("100M", "200m") - _, err = nodeClient.Create(context.TODO(), node) + _, err = nodeClient.Create(context.TODO(), node, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -667,7 +667,7 @@ func TestInsufficientCapacityNode(t *testing.T) { node1 := newNode("node-with-enough-memory", nil) node1.Status.Allocatable = allocatableResources("200M", "2000m") - _, err = nodeClient.Create(context.TODO(), node1) + _, err = nodeClient.Create(context.TODO(), node1, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -700,7 +700,7 @@ func TestLaunchWithHashCollision(t *testing.T) { setupScheduler(ctx, t, clientset, informers) // Create single node - _, err := nodeClient.Create(context.TODO(), newNode("single-node", nil)) + _, err := nodeClient.Create(context.TODO(), newNode("single-node", nil), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -714,7 +714,7 @@ func TestLaunchWithHashCollision(t *testing.T) { MaxUnavailable: &oneIntString, }, } - ds, err := dsClient.Create(context.TODO(), orgDs) + ds, err := dsClient.Create(context.TODO(), orgDs, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -758,7 +758,7 @@ func TestLaunchWithHashCollision(t *testing.T) { Data: revision.Data, Revision: revision.Revision + 1, } - _, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(context.TODO(), newRevision) + _, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(context.TODO(), newRevision, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create ControllerRevision: %v", err) } @@ -811,7 +811,7 @@ func TestTaintedNode(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - ds, err := dsClient.Create(context.TODO(), ds) + ds, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -820,13 +820,13 @@ func TestTaintedNode(t *testing.T) { nodeWithTaint := newNode("node-with-taint", nil) nodeWithTaint.Spec.Taints = []v1.Taint{{Key: "key1", Value: "val1", Effect: "NoSchedule"}} - _, err = nodeClient.Create(context.TODO(), nodeWithTaint) + _, err = nodeClient.Create(context.TODO(), nodeWithTaint, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create nodeWithTaint: %v", err) } nodeWithoutTaint := newNode("node-without-taint", nil) - _, err = nodeClient.Create(context.TODO(), nodeWithoutTaint) + _, err = nodeClient.Create(context.TODO(), nodeWithoutTaint, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create nodeWithoutTaint: %v", err) } @@ -841,7 +841,7 @@ func TestTaintedNode(t *testing.T) { } nodeWithTaintCopy := nodeWithTaint.DeepCopy() nodeWithTaintCopy.Spec.Taints = []v1.Taint{} - _, err = nodeClient.Update(context.TODO(), nodeWithTaintCopy) + _, err = nodeClient.Update(context.TODO(), nodeWithTaintCopy, metav1.UpdateOptions{}) if err != nil { t.Fatalf("Failed to update nodeWithTaint: %v", err) } @@ -877,7 +877,7 @@ func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy ds.Spec.Template.Spec.HostNetwork = true - _, err := dsClient.Create(context.TODO(), ds) + _, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -894,7 +894,7 @@ func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) { }, } - _, err = nodeClient.Create(context.TODO(), node) + _, err = nodeClient.Create(context.TODO(), node, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -912,7 +912,7 @@ func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) { }, } - _, err = nodeClient.Create(context.TODO(), nodeNU) + _, err = nodeClient.Create(context.TODO(), nodeNU, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } diff --git a/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go b/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go index 74ef786b7f2..0fb11eb770a 100644 --- a/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go +++ b/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go @@ -57,7 +57,7 @@ func TestAdmission(t *testing.T) { }, } - updatedPod, err := client.CoreV1().Pods(pod.Namespace).Create(context.TODO(), &pod) + updatedPod, err := client.CoreV1().Pods(pod.Namespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("error creating pod: %v", err) } diff --git a/test/integration/deployment/deployment_test.go b/test/integration/deployment/deployment_test.go index eeeb4ca0bd4..77601450e51 100644 --- a/test/integration/deployment/deployment_test.go +++ b/test/integration/deployment/deployment_test.go @@ -47,7 +47,7 @@ func TestNewDeployment(t *testing.T) { tester.deployment.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err) } @@ -137,7 +137,7 @@ func TestDeploymentRollingUpdate(t *testing.T) { // Create a deployment. var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err) } @@ -218,7 +218,7 @@ func TestDeploymentSelectorImmutability(t *testing.T) { tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, int32(20))} var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create apps/v1 deployment %s: %v", tester.deployment.Name, err) } @@ -232,7 +232,7 @@ func TestDeploymentSelectorImmutability(t *testing.T) { newSelectorLabels = map[string]string{"name_apps_v1": "test_apps_v1"} deploymentAppsV1.Spec.Selector.MatchLabels = newSelectorLabels deploymentAppsV1.Spec.Template.Labels = newSelectorLabels - _, err = c.AppsV1().Deployments(ns.Name).Update(context.TODO(), deploymentAppsV1) + _, err = c.AppsV1().Deployments(ns.Name).Update(context.TODO(), deploymentAppsV1, metav1.UpdateOptions{}) if err == nil { t.Fatalf("failed to provide validation error when changing immutable selector when updating apps/v1 deployment %s", deploymentAppsV1.Name) } @@ -258,7 +258,7 @@ func TestPausedDeployment(t *testing.T) { tester.deployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &tgps var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err) } @@ -359,7 +359,7 @@ func TestScalePausedDeployment(t *testing.T) { tester.deployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &tgps var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err) } @@ -440,7 +440,7 @@ func TestDeploymentHashCollision(t *testing.T) { tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)} var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err) } @@ -543,7 +543,7 @@ func TestFailedDeployment(t *testing.T) { tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)} tester.deployment.Spec.ProgressDeadlineSeconds = &three var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } @@ -601,7 +601,7 @@ func TestOverlappingDeployments(t *testing.T) { var err error var rss []*apps.ReplicaSet for _, tester := range testers { - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment, metav1.CreateOptions{}) dname := tester.deployment.Name if err != nil { t.Fatalf("failed to create deployment %q: %v", dname, err) @@ -677,7 +677,7 @@ func TestScaledRolloutDeployment(t *testing.T) { tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)} tester.deployment.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3) tester.deployment.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2) - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create deployment %q: %v", name, err) } @@ -862,7 +862,7 @@ func TestSpecReplicasChange(t *testing.T) { tester.deployment.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType tester.deployment.Spec.Strategy.RollingUpdate = nil var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } @@ -920,7 +920,7 @@ func TestDeploymentAvailableCondition(t *testing.T) { // progressDeadlineSeconds must be greater than minReadySeconds tester.deployment.Spec.ProgressDeadlineSeconds = pointer.Int32Ptr(7200) var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } @@ -1037,7 +1037,7 @@ func TestGeneralReplicaSetAdoption(t *testing.T) { replicas := int32(1) tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)} var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } @@ -1103,7 +1103,7 @@ func testScalingUsingScaleSubresource(t *testing.T, tester *deploymentTester, re return err } scale.Spec.Replicas = replicas - _, err = tester.c.AppsV1().Deployments(ns).UpdateScale(context.TODO(), deploymentName, scale) + _, err = tester.c.AppsV1().Deployments(ns).UpdateScale(context.TODO(), deploymentName, scale, metav1.UpdateOptions{}) return err }); err != nil { t.Fatalf("Failed to set .Spec.Replicas of scale subresource for deployment %q: %v", deploymentName, err) @@ -1129,7 +1129,7 @@ func TestDeploymentScaleSubresource(t *testing.T) { replicas := int32(2) tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)} var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } @@ -1173,7 +1173,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { replicas := int32(1) tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)} var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } diff --git a/test/integration/deployment/util.go b/test/integration/deployment/util.go index afbd5e40f3d..610e756acc5 100644 --- a/test/integration/deployment/util.go +++ b/test/integration/deployment/util.go @@ -211,7 +211,7 @@ func (d *deploymentTester) waitForDeploymentRevisionAndImage(revision, image str func markPodReady(c clientset.Interface, ns string, pod *v1.Pod) error { addPodConditionReady(pod, metav1.Now()) - _, err := c.CoreV1().Pods(ns).UpdateStatus(context.TODO(), pod) + _, err := c.CoreV1().Pods(ns).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}) return err } diff --git a/test/integration/disruption/disruption_test.go b/test/integration/disruption/disruption_test.go index b4ac572ba65..ffcfb789992 100644 --- a/test/integration/disruption/disruption_test.go +++ b/test/integration/disruption/disruption_test.go @@ -157,7 +157,7 @@ func TestPDBWithScaleSubresource(t *testing.T) { }, }, } - if _, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(nsName).Create(context.TODO(), pdb); err != nil { + if _, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(nsName).Create(context.TODO(), pdb, metav1.CreateOptions{}); err != nil { t.Errorf("Error creating PodDisruptionBudget: %v", err) } @@ -198,12 +198,12 @@ func createPod(t *testing.T, name, namespace, labelValue string, clientSet clien }, }, } - _, err := clientSet.CoreV1().Pods(namespace).Create(context.TODO(), pod) + _, err := clientSet.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Error(err) } addPodConditionReady(pod) - if _, err := clientSet.CoreV1().Pods(namespace).UpdateStatus(context.TODO(), pod); err != nil { + if _, err := clientSet.CoreV1().Pods(namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}); err != nil { t.Error(err) } } @@ -213,7 +213,7 @@ func createNs(t *testing.T, name string, clientSet clientset.Interface) { ObjectMeta: metav1.ObjectMeta{ Name: name, }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Errorf("Error creating namespace: %v", err) } diff --git a/test/integration/dryrun/dryrun_test.go b/test/integration/dryrun/dryrun_test.go index 1d6acd77218..492405ce6a7 100644 --- a/test/integration/dryrun/dryrun_test.go +++ b/test/integration/dryrun/dryrun_test.go @@ -233,7 +233,7 @@ func TestDryRun(t *testing.T) { // create CRDs so we can make sure that custom resources do not get lost etcd.CreateTestCRDs(t, apiextensionsclientset.NewForConfigOrDie(s.ClientConfig), false, etcd.GetCustomResourceDefinitionData()...) - if _, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil { + if _, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}, metav1.CreateOptions{}); err != nil { t.Fatal(err) } diff --git a/test/integration/etcd/crd_overlap_storage_test.go b/test/integration/etcd/crd_overlap_storage_test.go index a1cdc258b78..07397190d88 100644 --- a/test/integration/etcd/crd_overlap_storage_test.go +++ b/test/integration/etcd/crd_overlap_storage_test.go @@ -126,7 +126,7 @@ func TestOverlappingCustomResourceAPIService(t *testing.T) { }, }, }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -170,7 +170,7 @@ func TestOverlappingCustomResourceAPIService(t *testing.T) { VersionPriority: 100, GroupPriorityMinimum: 100, }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -289,7 +289,7 @@ func TestOverlappingCustomResourceCustomResourceDefinition(t *testing.T) { }, }, }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -325,7 +325,7 @@ func TestOverlappingCustomResourceCustomResourceDefinition(t *testing.T) { } // Updating v1 succeeds (built-in validation, not CR validation) - _, err = crdClient.CustomResourceDefinitions().Patch(context.TODO(), crdCRD.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"updated"}}}`)) + _, err = crdClient.CustomResourceDefinitions().Patch(context.TODO(), crdCRD.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"updated"}}}`), metav1.PatchOptions{}) if err != nil { t.Fatal(err) } diff --git a/test/integration/etcd/etcd_cross_group_test.go b/test/integration/etcd/etcd_cross_group_test.go index bd8bdd1211b..94a71b6e4c6 100644 --- a/test/integration/etcd/etcd_cross_group_test.go +++ b/test/integration/etcd/etcd_cross_group_test.go @@ -44,7 +44,7 @@ func TestCrossGroupStorage(t *testing.T) { crossGroupResources := map[schema.GroupVersionKind][]Resource{} - master.Client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}) + master.Client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}, metav1.CreateOptions{}) // Group by persisted GVK for _, resourceToPersist := range master.Resources { diff --git a/test/integration/etcd/etcd_storage_path_test.go b/test/integration/etcd/etcd_storage_path_test.go index a7e7e16f861..01b64182989 100644 --- a/test/integration/etcd/etcd_storage_path_test.go +++ b/test/integration/etcd/etcd_storage_path_test.go @@ -57,7 +57,7 @@ func TestEtcdStoragePath(t *testing.T) { client := &allClient{dynamicClient: master.Dynamic} - if _, err := master.Client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil { + if _, err := master.Client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}, metav1.CreateOptions{}); err != nil { t.Fatal(err) } diff --git a/test/integration/etcd/server.go b/test/integration/etcd/server.go index 8fbc76b3fb0..3b6ff13af5b 100644 --- a/test/integration/etcd/server.go +++ b/test/integration/etcd/server.go @@ -311,7 +311,7 @@ func CreateTestCRDs(t *testing.T, client apiextensionsclientset.Interface, skipC } func createTestCRD(t *testing.T, client apiextensionsclientset.Interface, skipCrdExistsInDiscovery bool, crd *apiextensionsv1beta1.CustomResourceDefinition) { - if _, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), crd); err != nil { + if _, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create %s CRD; %v", crd.Name, err) } if skipCrdExistsInDiscovery { diff --git a/test/integration/evictions/evictions_test.go b/test/integration/evictions/evictions_test.go index bcf80694bd0..42290d069d1 100644 --- a/test/integration/evictions/evictions_test.go +++ b/test/integration/evictions/evictions_test.go @@ -82,12 +82,12 @@ func TestConcurrentEvictionRequests(t *testing.T) { podName := fmt.Sprintf(podNameFormat, i) pod := newPod(podName) - if _, err := clientSet.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { + if _, err := clientSet.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create pod: %v", err) } addPodConditionReady(pod) - if _, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(context.TODO(), pod); err != nil { + if _, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}); err != nil { t.Fatal(err) } } @@ -95,7 +95,7 @@ func TestConcurrentEvictionRequests(t *testing.T) { waitToObservePods(t, informers.Core().V1().Pods().Informer(), numOfEvictions, v1.PodRunning) pdb := newPDB() - if _, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Create(context.TODO(), pdb); err != nil { + if _, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Create(context.TODO(), pdb, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create PodDisruptionBudget: %v", err) } @@ -196,19 +196,19 @@ func TestTerminalPodEviction(t *testing.T) { GracePeriodSeconds: &gracePeriodSeconds, } pod := newPod("test-terminal-pod1") - if _, err := clientSet.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { + if _, err := clientSet.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create pod: %v", err) } addPodConditionSucceeded(pod) - if _, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(context.TODO(), pod); err != nil { + if _, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}); err != nil { t.Fatal(err) } waitToObservePods(t, informers.Core().V1().Pods().Informer(), 1, v1.PodSucceeded) pdb := newPDB() - if _, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Create(context.TODO(), pdb); err != nil { + if _, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Create(context.TODO(), pdb, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create PodDisruptionBudget: %v", err) } diff --git a/test/integration/examples/apiserver_test.go b/test/integration/examples/apiserver_test.go index bfa8fbdef2f..3e71da91d1e 100644 --- a/test/integration/examples/apiserver_test.go +++ b/test/integration/examples/apiserver_test.go @@ -124,7 +124,7 @@ func TestAggregatedAPIServer(t *testing.T) { GroupPriorityMinimum: 200, VersionPriority: 200, }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } diff --git a/test/integration/examples/webhook_test.go b/test/integration/examples/webhook_test.go index a22ed046877..e81f7c5e181 100644 --- a/test/integration/examples/webhook_test.go +++ b/test/integration/examples/webhook_test.go @@ -77,7 +77,7 @@ func TestWebhookLoopback(t *testing.T) { }}, FailurePolicy: &fail, }}, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -86,7 +86,7 @@ func TestWebhookLoopback(t *testing.T) { _, err = client.CoreV1().ConfigMaps("default").Create(context.TODO(), &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: "webhook-test"}, Data: map[string]string{"invalid key": "value"}, - }) + }, metav1.CreateOptions{}) if err == nil { t.Fatal("Unexpected success") } diff --git a/test/integration/framework/perf_utils.go b/test/integration/framework/perf_utils.go index 5013f9f76ab..834b66c2fa5 100644 --- a/test/integration/framework/perf_utils.go +++ b/test/integration/framework/perf_utils.go @@ -90,7 +90,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error { for i := 0; i < numNodes; i++ { var err error for retry := 0; retry < retries; retry++ { - _, err = p.client.CoreV1().Nodes().Create(context.TODO(), baseNode) + _, err = p.client.CoreV1().Nodes().Create(context.TODO(), baseNode, metav1.CreateOptions{}) if err == nil || !testutils.IsRetryableAPIError(err) { break } diff --git a/test/integration/garbagecollector/cluster_scoped_owner_test.go b/test/integration/garbagecollector/cluster_scoped_owner_test.go index 20b8a09c88f..f108ded740f 100644 --- a/test/integration/garbagecollector/cluster_scoped_owner_test.go +++ b/test/integration/garbagecollector/cluster_scoped_owner_test.go @@ -82,7 +82,7 @@ func TestClusterScopedOwners(t *testing.T) { Capacity: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Gi")}, AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -91,7 +91,7 @@ func TestClusterScopedOwners(t *testing.T) { Name: "cm-valid", OwnerReferences: []metav1.OwnerReference{{Kind: "PersistentVolume", APIVersion: "v1", Name: pv.Name, UID: pv.UID}}, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatal(err) } @@ -102,7 +102,7 @@ func TestClusterScopedOwners(t *testing.T) { Labels: map[string]string{"missing": "true"}, OwnerReferences: []metav1.OwnerReference{{Kind: "PersistentVolume", APIVersion: "v1", Name: "missing-name", UID: types.UID("missing-uid")}}, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatal(err) } @@ -112,7 +112,7 @@ func TestClusterScopedOwners(t *testing.T) { Name: "cm-invalid", OwnerReferences: []metav1.OwnerReference{{Kind: "UnknownType", APIVersion: "unknown.group/v1", Name: "invalid-name", UID: types.UID("invalid-uid")}}, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatal(err) } diff --git a/test/integration/garbagecollector/garbage_collector_test.go b/test/integration/garbagecollector/garbage_collector_test.go index 735cad84b4d..3fdd25ca027 100644 --- a/test/integration/garbagecollector/garbage_collector_test.go +++ b/test/integration/garbagecollector/garbage_collector_test.go @@ -293,14 +293,14 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work func createNamespaceOrDie(name string, c clientset.Interface, t *testing.T) *v1.Namespace { ns := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}} - if _, err := c.CoreV1().Namespaces().Create(context.TODO(), ns); err != nil { + if _, err := c.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}); err != nil { t.Fatalf("failed to create namespace: %v", err) } falseVar := false _, err := c.CoreV1().ServiceAccounts(ns.Name).Create(context.TODO(), &v1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{Name: "default"}, AutomountServiceAccountToken: &falseVar, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create service account: %v", err) } @@ -329,11 +329,11 @@ func TestCascadingDeletion(t *testing.T) { rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) podClient := clientSet.CoreV1().Pods(ns.Name) - toBeDeletedRC, err := rcClient.Create(context.TODO(), newOwnerRC(toBeDeletedRCName, ns.Name)) + toBeDeletedRC, err := rcClient.Create(context.TODO(), newOwnerRC(toBeDeletedRCName, ns.Name), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } - remainingRC, err := rcClient.Create(context.TODO(), newOwnerRC(remainingRCName, ns.Name)) + remainingRC, err := rcClient.Create(context.TODO(), newOwnerRC(remainingRCName, ns.Name), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } @@ -348,7 +348,7 @@ func TestCascadingDeletion(t *testing.T) { // this pod should be cascadingly deleted. pod := newPod(garbageCollectedPodName, ns.Name, []metav1.OwnerReference{{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}}) - _, err = podClient.Create(context.TODO(), pod) + _, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } @@ -358,14 +358,14 @@ func TestCascadingDeletion(t *testing.T) { {UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}, {UID: remainingRC.ObjectMeta.UID, Name: remainingRCName}, }) - _, err = podClient.Create(context.TODO(), pod) + _, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } // this pod shouldn't be cascadingly deleted, because it doesn't have an owner. pod = newPod(independentPodName, ns.Name, []metav1.OwnerReference{}) - _, err = podClient.Create(context.TODO(), pod) + _, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } @@ -416,7 +416,7 @@ func TestCreateWithNonExistentOwner(t *testing.T) { podClient := clientSet.CoreV1().Pods(ns.Name) pod := newPod(garbageCollectedPodName, ns.Name, []metav1.OwnerReference{{UID: "doesn't matter", Name: toBeDeletedRCName}}) - _, err := podClient.Create(context.TODO(), pod) + _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } @@ -443,7 +443,7 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet rcName := "test.rc." + nameSuffix rc := newOwnerRC(rcName, namespace) rc.ObjectMeta.Finalizers = initialFinalizers - rc, err := rcClient.Create(context.TODO(), rc) + rc, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } @@ -453,7 +453,7 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet for j := 0; j < 3; j++ { podName := "test.pod." + nameSuffix + "-" + strconv.Itoa(j) pod := newPod(podName, namespace, []metav1.OwnerReference{{UID: rc.ObjectMeta.UID, Name: rc.ObjectMeta.Name}}) - createdPod, err := podClient.Create(context.TODO(), pod) + createdPod, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } @@ -594,7 +594,7 @@ func TestOrphaning(t *testing.T) { rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) // create the RC with the orphan finalizer set toBeDeletedRC := newOwnerRC(toBeDeletedRCName, ns.Name) - toBeDeletedRC, err := rcClient.Create(context.TODO(), toBeDeletedRC) + toBeDeletedRC, err := rcClient.Create(context.TODO(), toBeDeletedRC, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } @@ -605,7 +605,7 @@ func TestOrphaning(t *testing.T) { for i := 0; i < podsNum; i++ { podName := garbageCollectedPodName + strconv.Itoa(i) pod := newPod(podName, ns.Name, []metav1.OwnerReference{{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}}) - createdPod, err := podClient.Create(context.TODO(), pod) + createdPod, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } @@ -673,11 +673,11 @@ func TestSolidOwnerDoesNotBlockWaitingOwner(t *testing.T) { podClient := clientSet.CoreV1().Pods(ns.Name) rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) // create the RC with the orphan finalizer set - toBeDeletedRC, err := rcClient.Create(context.TODO(), newOwnerRC(toBeDeletedRCName, ns.Name)) + toBeDeletedRC, err := rcClient.Create(context.TODO(), newOwnerRC(toBeDeletedRCName, ns.Name), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } - remainingRC, err := rcClient.Create(context.TODO(), newOwnerRC(remainingRCName, ns.Name)) + remainingRC, err := rcClient.Create(context.TODO(), newOwnerRC(remainingRCName, ns.Name), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } @@ -686,7 +686,7 @@ func TestSolidOwnerDoesNotBlockWaitingOwner(t *testing.T) { {UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRC.Name, BlockOwnerDeletion: &trueVar}, {UID: remainingRC.ObjectMeta.UID, Name: remainingRC.Name}, }) - _, err = podClient.Create(context.TODO(), pod) + _, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } @@ -733,7 +733,7 @@ func TestNonBlockingOwnerRefDoesNotBlock(t *testing.T) { podClient := clientSet.CoreV1().Pods(ns.Name) rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) // create the RC with the orphan finalizer set - toBeDeletedRC, err := rcClient.Create(context.TODO(), newOwnerRC(toBeDeletedRCName, ns.Name)) + toBeDeletedRC, err := rcClient.Create(context.TODO(), newOwnerRC(toBeDeletedRCName, ns.Name), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } @@ -750,11 +750,11 @@ func TestNonBlockingOwnerRefDoesNotBlock(t *testing.T) { }) // adding finalizer that no controller handles, so that the pod won't be deleted pod2.ObjectMeta.Finalizers = []string{"x/y"} - _, err = podClient.Create(context.TODO(), pod1) + _, err = podClient.Create(context.TODO(), pod1, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } - _, err = podClient.Create(context.TODO(), pod2) + _, err = podClient.Create(context.TODO(), pod2, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } @@ -799,7 +799,7 @@ func TestDoubleDeletionWithFinalizer(t *testing.T) { podClient := clientSet.CoreV1().Pods(ns.Name) pod := newPod("lucy", ns.Name, nil) pod.ObjectMeta.Finalizers = []string{"x/y"} - if _, err := podClient.Create(context.TODO(), pod); err != nil { + if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create pod: %v", err) } if err := podClient.Delete(context.TODO(), pod.Name, getForegroundOptions()); err != nil { @@ -839,7 +839,7 @@ func TestDoubleDeletionWithFinalizer(t *testing.T) { // step 3: removes the custom finalizer and checks if the pod was removed patch := []byte(`[{"op":"remove","path":"/metadata/finalizers"}]`) - if _, err := podClient.Patch(context.TODO(), pod.Name, types.JSONPatchType, patch); err != nil { + if _, err := podClient.Patch(context.TODO(), pod.Name, types.JSONPatchType, patch, metav1.PatchOptions{}); err != nil { t.Fatalf("Failed to update pod: %v", err) } if err := wait.Poll(1*time.Second, 10*time.Second, func() (bool, error) { @@ -861,7 +861,7 @@ func TestBlockingOwnerRefDoesBlock(t *testing.T) { podClient := clientSet.CoreV1().Pods(ns.Name) rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) // create the RC with the orphan finalizer set - toBeDeletedRC, err := rcClient.Create(context.TODO(), newOwnerRC(toBeDeletedRCName, ns.Name)) + toBeDeletedRC, err := rcClient.Create(context.TODO(), newOwnerRC(toBeDeletedRCName, ns.Name), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } @@ -871,7 +871,7 @@ func TestBlockingOwnerRefDoesBlock(t *testing.T) { }) // adding finalizer that no controller handles, so that the pod won't be deleted pod.ObjectMeta.Finalizers = []string{"x/y"} - _, err = podClient.Create(context.TODO(), pod) + _, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } @@ -995,14 +995,14 @@ func TestMixedRelationships(t *testing.T) { // Create a core dependent resource. coreDependent := newConfigMap(ns.Name, names.SimpleNameGenerator.GenerateName("dependent")) link(t, customOwner, coreDependent) - coreDependent, err = configMapClient.Create(context.TODO(), coreDependent) + coreDependent, err = configMapClient.Create(context.TODO(), coreDependent, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create dependent: %v", err) } t.Logf("created core dependent %q", coreDependent.GetName()) // Create a core owner resource. - coreOwner, err := configMapClient.Create(context.TODO(), newConfigMap(ns.Name, names.SimpleNameGenerator.GenerateName("owner"))) + coreOwner, err := configMapClient.Create(context.TODO(), newConfigMap(ns.Name, names.SimpleNameGenerator.GenerateName("owner")), metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create owner: %v", err) } @@ -1108,7 +1108,7 @@ func testCRDDeletion(t *testing.T, ctx *testContext, ns *v1.Namespace, definitio // Create a core dependent resource. dependent := newConfigMap(ns.Name, names.SimpleNameGenerator.GenerateName("dependent")) link(t, owner, dependent) - dependent, err = configMapClient.Create(context.TODO(), dependent) + dependent, err = configMapClient.Create(context.TODO(), dependent, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create dependent: %v", err) } diff --git a/test/integration/ipamperf/util.go b/test/integration/ipamperf/util.go index e6818021520..3c64be20118 100644 --- a/test/integration/ipamperf/util.go +++ b/test/integration/ipamperf/util.go @@ -79,7 +79,7 @@ func createNodes(apiURL string, config *Config) error { for i := 0; i < config.NumNodes; i++ { var err error for j := 0; j < maxCreateRetries; j++ { - if _, err = clientSet.CoreV1().Nodes().Create(context.TODO(), baseNodeTemplate); err != nil && apierrors.IsServerTimeout(err) { + if _, err = clientSet.CoreV1().Nodes().Create(context.TODO(), baseNodeTemplate, metav1.CreateOptions{}); err != nil && apierrors.IsServerTimeout(err) { klog.Infof("Server timeout creating nodes, retrying after %v", retryDelay) time.Sleep(retryDelay) continue diff --git a/test/integration/kubelet/watch_manager_test.go b/test/integration/kubelet/watch_manager_test.go index 95befe0f683..8df3369403f 100644 --- a/test/integration/kubelet/watch_manager_test.go +++ b/test/integration/kubelet/watch_manager_test.go @@ -46,7 +46,7 @@ func TestWatchBasedManager(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - if _, err := client.CoreV1().Namespaces().Create(context.TODO(), (&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}})); err != nil { + if _, err := client.CoreV1().Namespaces().Create(context.TODO(), (&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}), metav1.CreateOptions{}); err != nil { t.Fatal(err) } @@ -69,7 +69,7 @@ func TestWatchBasedManager(t *testing.T) { defer wg.Done() for j := 0; j < 100; j++ { name := fmt.Sprintf("s%d", i*100+j) - if _, err := client.CoreV1().Secrets(testNamespace).Create(context.TODO(), &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: name}}); err != nil { + if _, err := client.CoreV1().Secrets(testNamespace).Create(context.TODO(), &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: name}}, metav1.CreateOptions{}); err != nil { select { case errCh <- err: default: diff --git a/test/integration/master/audit_dynamic_test.go b/test/integration/master/audit_dynamic_test.go index ba0ea807153..2f3410fb142 100644 --- a/test/integration/master/audit_dynamic_test.go +++ b/test/integration/master/audit_dynamic_test.go @@ -71,7 +71,7 @@ func TestDynamicAudit(t *testing.T) { // test creates a single audit sink, generates audit events, and ensures they arrive at the server success := t.Run("one sink", func(t *testing.T) { - _, err := kubeclient.AuditregistrationV1alpha1().AuditSinks().Create(context.TODO(), sinkConfig1) + _, err := kubeclient.AuditregistrationV1alpha1().AuditSinks().Create(context.TODO(), sinkConfig1, metav1.CreateOptions{}) require.NoError(t, err, "failed to create audit sink1") t.Log("created audit sink1") @@ -89,7 +89,7 @@ func TestDynamicAudit(t *testing.T) { // test creates a second audit sink, generates audit events, and ensures events arrive in both servers success = t.Run("two sink", func(t *testing.T) { - _, err := kubeclient.AuditregistrationV1alpha1().AuditSinks().Create(context.TODO(), sinkConfig2) + _, err := kubeclient.AuditregistrationV1alpha1().AuditSinks().Create(context.TODO(), sinkConfig2, metav1.CreateOptions{}) require.NoError(t, err, "failed to create audit sink2") t.Log("created audit sink2") @@ -169,7 +169,7 @@ func TestDynamicAudit(t *testing.T) { // update the url sink1.Spec.Webhook.ClientConfig.URL = &testServer2.Server.URL - _, err = kubeclient.AuditregistrationV1alpha1().AuditSinks().Update(context.TODO(), sink1) + _, err = kubeclient.AuditregistrationV1alpha1().AuditSinks().Update(context.TODO(), sink1, metav1.UpdateOptions{}) require.NoError(t, err, "failed to update audit sink1") t.Log("updated audit sink1 to point to server2") diff --git a/test/integration/master/audit_test.go b/test/integration/master/audit_test.go index 21532e4d24a..2b6769f7521 100644 --- a/test/integration/master/audit_test.go +++ b/test/integration/master/audit_test.go @@ -361,7 +361,7 @@ func configMapOperations(t *testing.T, kubeclient kubernetes.Interface) { }, } - _, err := kubeclient.CoreV1().ConfigMaps(namespace).Create(context.TODO(), configMap) + _, err := kubeclient.CoreV1().ConfigMaps(namespace).Create(context.TODO(), configMap, metav1.CreateOptions{}) expectNoError(t, err, "failed to create audit-configmap") _, err = kubeclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), configMap.Name, metav1.GetOptions{}) @@ -375,10 +375,10 @@ func configMapOperations(t *testing.T, kubeclient kubernetes.Interface) { // event at stage ResponseComplete will not be generated. } - _, err = kubeclient.CoreV1().ConfigMaps(namespace).Update(context.TODO(), configMap) + _, err = kubeclient.CoreV1().ConfigMaps(namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{}) expectNoError(t, err, "failed to update audit-configmap") - _, err = kubeclient.CoreV1().ConfigMaps(namespace).Patch(context.TODO(), configMap.Name, types.JSONPatchType, patch) + _, err = kubeclient.CoreV1().ConfigMaps(namespace).Patch(context.TODO(), configMap.Name, types.JSONPatchType, patch, metav1.PatchOptions{}) expectNoError(t, err, "failed to patch configmap") _, err = kubeclient.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) @@ -443,6 +443,6 @@ func createV1beta1MutationWebhook(client clientset.Interface, endpoint string) e FailurePolicy: &fail, AdmissionReviewVersions: []string{"v1beta1"}, }}, - }) + }, metav1.CreateOptions{}) return err } diff --git a/test/integration/master/crd_test.go b/test/integration/master/crd_test.go index ead8badaef4..d14c4e935ec 100644 --- a/test/integration/master/crd_test.go +++ b/test/integration/master/crd_test.go @@ -50,7 +50,7 @@ func TestCRDShadowGroup(t *testing.T) { if err != nil { t.Fatalf("Unexpected error: %v", err) } - if _, err := kubeclient.CoreV1().Namespaces().Create(context.TODO(), (&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}})); err != nil { + if _, err := kubeclient.CoreV1().Namespaces().Create(context.TODO(), (&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}), metav1.CreateOptions{}); err != nil { t.Fatal(err) } @@ -66,7 +66,7 @@ func TestCRDShadowGroup(t *testing.T) { PodSelector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, Ingress: []networkingv1.NetworkPolicyIngressRule{}, }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create NetworkPolicy: %v", err) } @@ -112,7 +112,7 @@ func TestCRD(t *testing.T) { if err != nil { t.Fatalf("Unexpected error: %v", err) } - if _, err := kubeclient.CoreV1().Namespaces().Create(context.TODO(), (&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}})); err != nil { + if _, err := kubeclient.CoreV1().Namespaces().Create(context.TODO(), (&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}), metav1.CreateOptions{}); err != nil { t.Fatal(err) } @@ -269,7 +269,7 @@ func TestCRDOpenAPI(t *testing.T) { prop := structuralCRD.Spec.Validation.OpenAPIV3Schema.Properties["foo"] prop.Type = "boolean" structuralCRD.Spec.Validation.OpenAPIV3Schema.Properties["foo"] = prop - if _, err = apiextensionsclient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), structuralCRD); err != nil { + if _, err = apiextensionsclient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), structuralCRD, metav1.UpdateOptions{}); err != nil { t.Fatal(err) } waitForSpec(structuralCRD, "boolean") diff --git a/test/integration/master/kube_apiserver_test.go b/test/integration/master/kube_apiserver_test.go index 14e59a204d2..27f6be71e08 100644 --- a/test/integration/master/kube_apiserver_test.go +++ b/test/integration/master/kube_apiserver_test.go @@ -92,7 +92,7 @@ func TestRun(t *testing.T) { }, }, }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create deployment: %v", err) } diff --git a/test/integration/master/synthetic_master_test.go b/test/integration/master/synthetic_master_test.go index 0d3d78f82e8..5c415e5a9b3 100644 --- a/test/integration/master/synthetic_master_test.go +++ b/test/integration/master/synthetic_master_test.go @@ -342,7 +342,7 @@ func TestObjectSizeResponses(t *testing.T) { for _, r := range requests { t.Run(r.size, func(t *testing.T) { - _, err := client.AppsV1().Deployments(metav1.NamespaceDefault).Create(context.TODO(), r.deploymentObject) + _, err := client.AppsV1().Deployments(metav1.NamespaceDefault).Create(context.TODO(), r.deploymentObject, metav1.CreateOptions{}) if err != nil { if !strings.Contains(err.Error(), r.expectedMessage) { t.Errorf("got: %s;want: %s", err.Error(), r.expectedMessage) @@ -679,13 +679,13 @@ func TestServiceAlloc(t *testing.T) { // make 5 more services to take up all IPs for i := 0; i < 5; i++ { - if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc(i)); err != nil { + if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc(i), metav1.CreateOptions{}); err != nil { t.Error(err) } } // Make another service. It will fail because we're out of cluster IPs - if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc(8)); err != nil { + if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc(8), metav1.CreateOptions{}); err != nil { if !strings.Contains(err.Error(), "range is full") { t.Errorf("unexpected error text: %v", err) } @@ -707,7 +707,7 @@ func TestServiceAlloc(t *testing.T) { } // This time creating the second service should work. - if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc(8)); err != nil { + if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc(8), metav1.CreateOptions{}); err != nil { t.Fatalf("got unexpected error: %v", err) } } @@ -741,7 +741,7 @@ func TestUpdateNodeObjects(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("node-%d", i), }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -850,7 +850,7 @@ func TestUpdateNodeObjects(t *testing.T) { lastCount = 0 n.Status.Conditions = nil } - if _, err := c.Nodes().UpdateStatus(context.TODO(), n); err != nil { + if _, err := c.Nodes().UpdateStatus(context.TODO(), n, metav1.UpdateOptions{}); err != nil { if !apierrors.IsConflict(err) { fmt.Printf("[%d] error after %d: %v\n", node, i, err) break diff --git a/test/integration/master/transformation_testcase.go b/test/integration/master/transformation_testcase.go index 64623c057ac..9e3325307dd 100644 --- a/test/integration/master/transformation_testcase.go +++ b/test/integration/master/transformation_testcase.go @@ -205,7 +205,7 @@ func (e *transformTest) createNamespace(name string) (*corev1.Namespace, error) }, } - if _, err := e.restClient.CoreV1().Namespaces().Create(context.TODO(), ns); err != nil { + if _, err := e.restClient.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}); err != nil { return nil, fmt.Errorf("unable to create testing namespace %v", err) } @@ -222,7 +222,7 @@ func (e *transformTest) createSecret(name, namespace string) (*corev1.Secret, er secretKey: []byte(secretVal), }, } - if _, err := e.restClient.CoreV1().Secrets(secret.Namespace).Create(context.TODO(), secret); err != nil { + if _, err := e.restClient.CoreV1().Secrets(secret.Namespace).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { return nil, fmt.Errorf("error while writing secret: %v", err) } diff --git a/test/integration/namespace/ns_conditions_test.go b/test/integration/namespace/ns_conditions_test.go index a1d92deb2c4..2ca3fc8b170 100644 --- a/test/integration/namespace/ns_conditions_test.go +++ b/test/integration/namespace/ns_conditions_test.go @@ -47,7 +47,7 @@ func TestNamespaceCondition(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: nsName, }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } diff --git a/test/integration/objectmeta/objectmeta_test.go b/test/integration/objectmeta/objectmeta_test.go index 0384ce991ac..1140c7850dd 100644 --- a/test/integration/objectmeta/objectmeta_test.go +++ b/test/integration/objectmeta/objectmeta_test.go @@ -42,12 +42,12 @@ func TestIgnoreClusterName(t *testing.T) { ClusterName: "cluster-name-to-ignore", }, } - nsNew, err := client.CoreV1().Namespaces().Create(context.TODO(), &ns) + nsNew, err := client.CoreV1().Namespaces().Create(context.TODO(), &ns, metav1.CreateOptions{}) assert.Nil(t, err) assert.Equal(t, ns.Name, nsNew.Name) assert.Empty(t, nsNew.ClusterName) - nsNew, err = client.CoreV1().Namespaces().Update(context.TODO(), &ns) + nsNew, err = client.CoreV1().Namespaces().Update(context.TODO(), &ns, metav1.UpdateOptions{}) assert.Nil(t, err) assert.Equal(t, ns.Name, nsNew.Name) assert.Empty(t, nsNew.ClusterName) diff --git a/test/integration/pods/pods_test.go b/test/integration/pods/pods_test.go index 02ed3dbbab7..8b52d002ac2 100644 --- a/test/integration/pods/pods_test.go +++ b/test/integration/pods/pods_test.go @@ -136,13 +136,13 @@ func TestPodUpdateActiveDeadlineSeconds(t *testing.T) { pod.Spec.ActiveDeadlineSeconds = tc.original pod.ObjectMeta.Name = fmt.Sprintf("activedeadlineseconds-test-%v", i) - if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create pod: %v", err) } pod.Spec.ActiveDeadlineSeconds = tc.update - _, err := client.CoreV1().Pods(ns.Name).Update(context.TODO(), pod) + _, err := client.CoreV1().Pods(ns.Name).Update(context.TODO(), pod, metav1.UpdateOptions{}) if tc.valid && err != nil { t.Errorf("%v: failed to update pod: %v", tc.name, err) } else if !tc.valid && err == nil { @@ -180,7 +180,7 @@ func TestPodReadOnlyFilesystem(t *testing.T) { }, } - if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create pod: %v", err) } @@ -224,7 +224,7 @@ func TestPodCreateEphemeralContainers(t *testing.T) { }, } - if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err == nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err == nil { t.Errorf("Unexpected allowed creation of pod with ephemeral containers") integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name) } else if !strings.HasSuffix(err.Error(), "spec.ephemeralContainers: Forbidden: cannot be set on create") { @@ -235,7 +235,7 @@ func TestPodCreateEphemeralContainers(t *testing.T) { // setUpEphemeralContainers creates a pod that has Ephemeral Containers. This is a two step // process because Ephemeral Containers are not allowed during pod creation. func setUpEphemeralContainers(podsClient typedv1.PodInterface, pod *v1.Pod, containers []v1.EphemeralContainer) error { - if _, err := podsClient.Create(context.TODO(), pod); err != nil { + if _, err := podsClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { return fmt.Errorf("failed to create pod: %v", err) } @@ -244,7 +244,7 @@ func setUpEphemeralContainers(podsClient typedv1.PodInterface, pod *v1.Pod, cont } pod.Spec.EphemeralContainers = containers - if _, err := podsClient.Update(context.TODO(), pod); err == nil { + if _, err := podsClient.Update(context.TODO(), pod, metav1.UpdateOptions{}); err == nil { return fmt.Errorf("unexpected allowed direct update of ephemeral containers during set up: %v", err) } @@ -254,7 +254,7 @@ func setUpEphemeralContainers(podsClient typedv1.PodInterface, pod *v1.Pod, cont } ec.EphemeralContainers = containers - if _, err = podsClient.UpdateEphemeralContainers(context.TODO(), pod.Name, ec); err != nil { + if _, err = podsClient.UpdateEphemeralContainers(context.TODO(), pod.Name, ec, metav1.UpdateOptions{}); err != nil { return fmt.Errorf("failed to update ephemeral containers for test case set up: %v", err) } @@ -457,7 +457,7 @@ func TestPodPatchEphemeralContainers(t *testing.T) { t.Errorf("%v: %v", tc.name, err) } - if _, err := client.CoreV1().Pods(ns.Name).Patch(context.TODO(), pod.Name, tc.patchType, tc.patchBody, "ephemeralcontainers"); tc.valid && err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Patch(context.TODO(), pod.Name, tc.patchType, tc.patchBody, metav1.PatchOptions{}, "ephemeralcontainers"); tc.valid && err != nil { t.Errorf("%v: failed to update ephemeral containers: %v", tc.name, err) } else if !tc.valid && err == nil { t.Errorf("%v: unexpected allowed update to ephemeral containers", tc.name) @@ -653,7 +653,7 @@ func TestPodUpdateEphemeralContainers(t *testing.T) { } ec.EphemeralContainers = tc.update - if _, err := client.CoreV1().Pods(ns.Name).UpdateEphemeralContainers(context.TODO(), pod.Name, ec); tc.valid && err != nil { + if _, err := client.CoreV1().Pods(ns.Name).UpdateEphemeralContainers(context.TODO(), pod.Name, ec, metav1.UpdateOptions{}); tc.valid && err != nil { t.Errorf("%v: failed to update ephemeral containers: %v", tc.name, err) } else if !tc.valid && err == nil { t.Errorf("%v: unexpected allowed update to ephemeral containers", tc.name) diff --git a/test/integration/quota/quota_test.go b/test/integration/quota/quota_test.go index af29287a119..05a8bf1f2a1 100644 --- a/test/integration/quota/quota_test.go +++ b/test/integration/quota/quota_test.go @@ -156,7 +156,7 @@ func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Cl t.Fatalf("unexpected error: %v", err) } - if _, err := clientset.CoreV1().ResourceQuotas(quota.Namespace).Create(context.TODO(), quota); err != nil { + if _, err := clientset.CoreV1().ResourceQuotas(quota.Namespace).Create(context.TODO(), quota, metav1.CreateOptions{}); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -215,7 +215,7 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) { t.Fatalf("unexpected error: %v", err) } - if _, err := clientset.CoreV1().ReplicationControllers(namespace).Create(context.TODO(), rc); err != nil { + if _, err := clientset.CoreV1().ReplicationControllers(namespace).Create(context.TODO(), rc, metav1.CreateOptions{}); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -339,7 +339,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) { }, }, } - if _, err := clientset.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err == nil { + if _, err := clientset.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err == nil { t.Fatalf("expected error for insufficient quota") } @@ -362,7 +362,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) { // attempt to create a new pod once the quota is propagated err = wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) { // retry until we succeed (to allow time for all changes to propagate) - if _, err := clientset.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err == nil { + if _, err := clientset.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err == nil { return true, nil } return false, nil diff --git a/test/integration/replicaset/replicaset_test.go b/test/integration/replicaset/replicaset_test.go index 05a631de274..89214688d43 100644 --- a/test/integration/replicaset/replicaset_test.go +++ b/test/integration/replicaset/replicaset_test.go @@ -174,14 +174,14 @@ func createRSsPods(t *testing.T, clientSet clientset.Interface, rss []*apps.Repl var createdRSs []*apps.ReplicaSet var createdPods []*v1.Pod for _, rs := range rss { - createdRS, err := clientSet.AppsV1().ReplicaSets(rs.Namespace).Create(context.TODO(), rs) + createdRS, err := clientSet.AppsV1().ReplicaSets(rs.Namespace).Create(context.TODO(), rs, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create replica set %s: %v", rs.Name, err) } createdRSs = append(createdRSs, createdRS) } for _, pod := range pods { - createdPod, err := clientSet.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) + createdPod, err := clientSet.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create pod %s: %v", pod.Name, err) } @@ -215,7 +215,7 @@ func updatePod(t *testing.T, podClient typedv1.PodInterface, podName string, upd return err } updateFunc(newPod) - pod, err = podClient.Update(context.TODO(), newPod) + pod, err = podClient.Update(context.TODO(), newPod, metav1.UpdateOptions{}) return err }); err != nil { t.Fatalf("Failed to update pod %s: %v", podName, err) @@ -230,7 +230,7 @@ func updatePodStatus(t *testing.T, podClient typedv1.PodInterface, pod *v1.Pod, return err } updateStatusFunc(newPod) - _, err = podClient.UpdateStatus(context.TODO(), newPod) + _, err = podClient.UpdateStatus(context.TODO(), newPod, metav1.UpdateOptions{}) return err }); err != nil { t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err) @@ -255,7 +255,7 @@ func updateRS(t *testing.T, rsClient appsclient.ReplicaSetInterface, rsName stri return err } updateFunc(newRS) - rs, err = rsClient.Update(context.TODO(), newRS) + rs, err = rsClient.Update(context.TODO(), newRS, metav1.UpdateOptions{}) return err }); err != nil { t.Fatalf("Failed to update rs %s: %v", rsName, err) @@ -319,7 +319,7 @@ func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1 } pod.Status.Conditions = append(pod.Status.Conditions, *condition) } - _, err := clientSet.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod) + _, err := clientSet.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}) if err != nil { // When status fails to be updated, we continue to next pod continue @@ -354,7 +354,7 @@ func testScalingUsingScaleSubresource(t *testing.T, c clientset.Interface, rs *a return err } scale.Spec.Replicas = replicas - _, err = c.AppsV1().ReplicaSets(ns).UpdateScale(context.TODO(), rs.Name, scale) + _, err = c.AppsV1().ReplicaSets(ns).UpdateScale(context.TODO(), rs.Name, scale, metav1.UpdateOptions{}) return err }); err != nil { t.Fatalf("Failed to set .Spec.Replicas of scale subresource for rs %s: %v", rs.Name, err) @@ -429,14 +429,14 @@ func TestAdoption(t *testing.T) { rsClient := clientSet.AppsV1().ReplicaSets(ns.Name) podClient := clientSet.CoreV1().Pods(ns.Name) const rsName = "rs" - rs, err := rsClient.Create(context.TODO(), newRS(rsName, ns.Name, 1)) + rs, err := rsClient.Create(context.TODO(), newRS(rsName, ns.Name, 1), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create replica set: %v", err) } podName := fmt.Sprintf("pod%d", i) pod := newMatchingPod(podName, ns.Name) pod.OwnerReferences = tc.existingOwnerReferences(rs) - _, err = podClient.Create(context.TODO(), pod) + _, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } @@ -480,7 +480,7 @@ func TestRSSelectorImmutability(t *testing.T) { newSelectorLabels := map[string]string{"changed_name_apps_v1": "changed_test_apps_v1"} rsV1.Spec.Selector.MatchLabels = newSelectorLabels rsV1.Spec.Template.Labels = newSelectorLabels - _, err = clientSet.AppsV1().ReplicaSets(ns.Name).Update(context.TODO(), rsV1) + _, err = clientSet.AppsV1().ReplicaSets(ns.Name).Update(context.TODO(), rsV1, metav1.UpdateOptions{}) if err == nil { t.Fatalf("failed to provide validation error when changing immutable selector when updating apps/v1 replicaset %s", rsV1.Name) } diff --git a/test/integration/replicationcontroller/replicationcontroller_test.go b/test/integration/replicationcontroller/replicationcontroller_test.go index f28e6244110..93ef5787a74 100644 --- a/test/integration/replicationcontroller/replicationcontroller_test.go +++ b/test/integration/replicationcontroller/replicationcontroller_test.go @@ -156,14 +156,14 @@ func createRCsPods(t *testing.T, clientSet clientset.Interface, rcs []*v1.Replic var createdRCs []*v1.ReplicationController var createdPods []*v1.Pod for _, rc := range rcs { - createdRC, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(context.TODO(), rc) + createdRC, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(context.TODO(), rc, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create replication controller %s: %v", rc.Name, err) } createdRCs = append(createdRCs, createdRC) } for _, pod := range pods { - createdPod, err := clientSet.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) + createdPod, err := clientSet.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create pod %s: %v", pod.Name, err) } @@ -204,7 +204,7 @@ func updatePod(t *testing.T, podClient typedv1.PodInterface, podName string, upd return err } updateFunc(newPod) - pod, err = podClient.Update(context.TODO(), newPod) + pod, err = podClient.Update(context.TODO(), newPod, metav1.UpdateOptions{}) return err }); err != nil { t.Fatalf("Failed to update pod %s: %v", podName, err) @@ -219,7 +219,7 @@ func updatePodStatus(t *testing.T, podClient typedv1.PodInterface, pod *v1.Pod, return err } updateStatusFunc(newPod) - _, err = podClient.UpdateStatus(context.TODO(), newPod) + _, err = podClient.UpdateStatus(context.TODO(), newPod, metav1.UpdateOptions{}) return err }); err != nil { t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err) @@ -244,7 +244,7 @@ func updateRC(t *testing.T, rcClient typedv1.ReplicationControllerInterface, rcN return err } updateFunc(newRC) - rc, err = rcClient.Update(context.TODO(), newRC) + rc, err = rcClient.Update(context.TODO(), newRC, metav1.UpdateOptions{}) return err }); err != nil { t.Fatalf("Failed to update rc %s: %v", rcName, err) @@ -308,7 +308,7 @@ func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1 } pod.Status.Conditions = append(pod.Status.Conditions, *condition) } - _, err := clientSet.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod) + _, err := clientSet.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}) if err != nil { // When status fails to be updated, we continue to next pod continue @@ -343,7 +343,7 @@ func testScalingUsingScaleSubresource(t *testing.T, c clientset.Interface, rc *v return err } scale.Spec.Replicas = replicas - _, err = c.CoreV1().ReplicationControllers(ns).UpdateScale(context.TODO(), rc.Name, scale) + _, err = c.CoreV1().ReplicationControllers(ns).UpdateScale(context.TODO(), rc.Name, scale, metav1.UpdateOptions{}) return err }); err != nil { t.Fatalf("Failed to set .Spec.Replicas of scale subresource for rc %s: %v", rc.Name, err) @@ -418,14 +418,14 @@ func TestAdoption(t *testing.T) { rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) podClient := clientSet.CoreV1().Pods(ns.Name) const rcName = "rc" - rc, err := rcClient.Create(context.TODO(), newRC(rcName, ns.Name, 1)) + rc, err := rcClient.Create(context.TODO(), newRC(rcName, ns.Name, 1), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create replication controllers: %v", err) } podName := fmt.Sprintf("pod%d", i) pod := newMatchingPod(podName, ns.Name) pod.OwnerReferences = tc.existingOwnerReferences(rc) - _, err = podClient.Create(context.TODO(), pod) + _, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } diff --git a/test/integration/scale/scale_test.go b/test/integration/scale/scale_test.go index 362c5be4001..03722cc273d 100644 --- a/test/integration/scale/scale_test.go +++ b/test/integration/scale/scale_test.go @@ -121,16 +121,16 @@ func TestScaleSubresources(t *testing.T) { } // Create objects required to exercise scale subresources - if _, err := clientSet.CoreV1().ReplicationControllers("default").Create(context.TODO(), rcStub); err != nil { + if _, err := clientSet.CoreV1().ReplicationControllers("default").Create(context.TODO(), rcStub, metav1.CreateOptions{}); err != nil { t.Fatal(err) } - if _, err := clientSet.AppsV1().ReplicaSets("default").Create(context.TODO(), rsStub); err != nil { + if _, err := clientSet.AppsV1().ReplicaSets("default").Create(context.TODO(), rsStub, metav1.CreateOptions{}); err != nil { t.Fatal(err) } - if _, err := clientSet.AppsV1().Deployments("default").Create(context.TODO(), deploymentStub); err != nil { + if _, err := clientSet.AppsV1().Deployments("default").Create(context.TODO(), deploymentStub, metav1.CreateOptions{}); err != nil { t.Fatal(err) } - if _, err := clientSet.AppsV1().StatefulSets("default").Create(context.TODO(), ssStub); err != nil { + if _, err := clientSet.AppsV1().StatefulSets("default").Create(context.TODO(), ssStub, metav1.CreateOptions{}); err != nil { t.Fatal(err) } diff --git a/test/integration/scheduler/extender_test.go b/test/integration/scheduler/extender_test.go index 67e56c2533c..e09382039d9 100644 --- a/test/integration/scheduler/extender_test.go +++ b/test/integration/scheduler/extender_test.go @@ -378,7 +378,7 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) for ii := 0; ii < 5; ii++ { node.Name = fmt.Sprintf("machine%d", ii+1) - if _, err := cs.CoreV1().Nodes().Create(context.TODO(), node); err != nil { + if _, err := cs.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create nodes: %v", err) } } @@ -400,7 +400,7 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) }, } - myPod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), pod) + myPod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create pod: %v", err) } diff --git a/test/integration/scheduler/predicates_test.go b/test/integration/scheduler/predicates_test.go index f6d647bd5aa..b99b01e7777 100644 --- a/test/integration/scheduler/predicates_test.go +++ b/test/integration/scheduler/predicates_test.go @@ -824,7 +824,7 @@ func TestInterPodAffinity(t *testing.T) { } else { nsName = testCtx.ns.Name } - createdPod, err := cs.CoreV1().Pods(nsName).Create(context.TODO(), pod) + createdPod, err := cs.CoreV1().Pods(nsName).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test) } @@ -833,7 +833,7 @@ func TestInterPodAffinity(t *testing.T) { t.Errorf("Test Failed: error, %v, while waiting for pod during test, %v", err, test) } } - testPod, err := cs.CoreV1().Pods(testCtx.ns.Name).Create(context.TODO(), test.pod) + testPod, err := cs.CoreV1().Pods(testCtx.ns.Name).Create(context.TODO(), test.pod, metav1.CreateOptions{}) if err != nil { if !(test.errorType == "invalidPod" && apierrors.IsInvalid(err)) { t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test) @@ -1005,7 +1005,7 @@ func TestEvenPodsSpreadPredicate(t *testing.T) { allPods := append(tt.existingPods, tt.incomingPod) defer cleanupPods(cs, t, allPods) for _, pod := range tt.existingPods { - createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) + createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Test Failed: error while creating pod during test: %v", err) } @@ -1014,7 +1014,7 @@ func TestEvenPodsSpreadPredicate(t *testing.T) { t.Errorf("Test Failed: error while waiting for pod during test: %v", err) } } - testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(context.TODO(), tt.incomingPod) + testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(context.TODO(), tt.incomingPod, metav1.CreateOptions{}) if err != nil && !apierrors.IsInvalid(err) { t.Fatalf("Test Failed: error while creating pod during test: %v", err) } diff --git a/test/integration/scheduler/preemption_test.go b/test/integration/scheduler/preemption_test.go index 6b266c3237b..9a35c9a8123 100644 --- a/test/integration/scheduler/preemption_test.go +++ b/test/integration/scheduler/preemption_test.go @@ -1172,7 +1172,7 @@ func TestPDBInPreemption(t *testing.T) { } // Add pod condition ready so that PDB is updated. addPodConditionReady(p) - if _, err := testCtx.clientSet.CoreV1().Pods(testCtx.ns.Name).UpdateStatus(context.TODO(), p); err != nil { + if _, err := testCtx.clientSet.CoreV1().Pods(testCtx.ns.Name).UpdateStatus(context.TODO(), p, metav1.UpdateOptions{}); err != nil { t.Fatal(err) } } @@ -1183,7 +1183,7 @@ func TestPDBInPreemption(t *testing.T) { // Create PDBs. for _, pdb := range test.pdbs { - _, err := testCtx.clientSet.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).Create(context.TODO(), pdb) + _, err := testCtx.clientSet.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).Create(context.TODO(), pdb, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create PDB: %v", err) } diff --git a/test/integration/scheduler/priorities_test.go b/test/integration/scheduler/priorities_test.go index a09710695f5..767310e45ae 100644 --- a/test/integration/scheduler/priorities_test.go +++ b/test/integration/scheduler/priorities_test.go @@ -334,7 +334,7 @@ func TestEvenPodsSpreadPriority(t *testing.T) { allPods := append(tt.existingPods, tt.incomingPod) defer cleanupPods(cs, t, allPods) for _, pod := range tt.existingPods { - createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) + createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Test Failed: error while creating pod during test: %v", err) } @@ -343,7 +343,7 @@ func TestEvenPodsSpreadPriority(t *testing.T) { t.Errorf("Test Failed: error while waiting for pod during test: %v", err) } } - testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(context.TODO(), tt.incomingPod) + testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(context.TODO(), tt.incomingPod, metav1.CreateOptions{}) if err != nil && !apierrors.IsInvalid(err) { t.Fatalf("Test Failed: error while creating pod during test: %v", err) } diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index 33a5982b716..58bb24cb018 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -251,7 +251,7 @@ priorities: [] } policyConfigMap.APIVersion = "v1" - clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &policyConfigMap) + clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &policyConfigMap, metav1.CreateOptions{}) eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: clientSet.EventsV1beta1().Events("")}) stopCh := make(chan struct{}) @@ -375,7 +375,7 @@ func TestUnschedulableNodes(t *testing.T) { { makeUnSchedulable: func(t *testing.T, n *v1.Node, nodeLister corelisters.NodeLister, c clientset.Interface) { n.Spec.Unschedulable = true - if _, err := c.CoreV1().Nodes().Update(context.TODO(), n); err != nil { + if _, err := c.CoreV1().Nodes().Update(context.TODO(), n, metav1.UpdateOptions{}); err != nil { t.Fatalf("Failed to update node with unschedulable=true: %v", err) } err = waitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool { @@ -391,7 +391,7 @@ func TestUnschedulableNodes(t *testing.T) { }, makeSchedulable: func(t *testing.T, n *v1.Node, nodeLister corelisters.NodeLister, c clientset.Interface) { n.Spec.Unschedulable = false - if _, err := c.CoreV1().Nodes().Update(context.TODO(), n); err != nil { + if _, err := c.CoreV1().Nodes().Update(context.TODO(), n, metav1.UpdateOptions{}); err != nil { t.Fatalf("Failed to update node with unschedulable=false: %v", err) } err = waitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool { @@ -405,7 +405,7 @@ func TestUnschedulableNodes(t *testing.T) { } for i, mod := range nodeModifications { - unSchedNode, err := testCtx.clientSet.CoreV1().Nodes().Create(context.TODO(), node) + unSchedNode, err := testCtx.clientSet.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -488,7 +488,7 @@ func TestMultiScheduler(t *testing.T) { }, }, } - testCtx.clientSet.CoreV1().Nodes().Create(context.TODO(), node) + testCtx.clientSet.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) // 3. create 3 pods for testing t.Logf("create 3 pods for testing") @@ -639,7 +639,7 @@ func TestAllocatable(t *testing.T) { }, } - if _, err := testCtx.clientSet.CoreV1().Nodes().UpdateStatus(context.TODO(), allocNode); err != nil { + if _, err := testCtx.clientSet.CoreV1().Nodes().UpdateStatus(context.TODO(), allocNode, metav1.UpdateOptions{}); err != nil { t.Fatalf("Failed to update node with Status.Allocatable: %v", err) } diff --git a/test/integration/scheduler/taint_test.go b/test/integration/scheduler/taint_test.go index 879cc7eb923..0e3d08b96bd 100644 --- a/test/integration/scheduler/taint_test.go +++ b/test/integration/scheduler/taint_test.go @@ -531,7 +531,7 @@ func TestTaintNodeByCondition(t *testing.T) { }, } - if _, err := cs.CoreV1().Nodes().Create(context.TODO(), node); err != nil { + if _, err := cs.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create node, err: %v", err) } if err := waitForNodeTaints(cs, node, test.expectedTaints); err != nil { @@ -549,7 +549,7 @@ func TestTaintNodeByCondition(t *testing.T) { pod.Name = fmt.Sprintf("%s-%d", pod.Name, i) pod.Spec.Tolerations = p.tolerations - createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) + createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create pod %s/%s, error: %v", pod.Namespace, pod.Name, err) @@ -669,7 +669,7 @@ func TestTaintBasedEvictions(t *testing.T) { defer cleanupTest(t, testCtx) cs := testCtx.clientSet informers := testCtx.informerFactory - _, err := cs.CoreV1().Namespaces().Create(context.TODO(), testCtx.ns) + _, err := cs.CoreV1().Namespaces().Create(context.TODO(), testCtx.ns, metav1.CreateOptions{}) if err != nil { t.Errorf("Failed to create namespace %+v", err) } @@ -731,7 +731,7 @@ func TestTaintBasedEvictions(t *testing.T) { }, }, }) - if _, err := cs.CoreV1().Nodes().Create(context.TODO(), nodes[i]); err != nil { + if _, err := cs.CoreV1().Nodes().Create(context.TODO(), nodes[i], metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create node, err: %v", err) } } @@ -743,7 +743,7 @@ func TestTaintBasedEvictions(t *testing.T) { test.pod.Spec.Tolerations[0].TolerationSeconds = &tolerationSeconds[i] } - test.pod, err = cs.CoreV1().Pods(testCtx.ns.Name).Create(context.TODO(), test.pod) + test.pod, err = cs.CoreV1().Pods(testCtx.ns.Name).Create(context.TODO(), test.pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Test Failed: error: %v, while creating pod", err) } diff --git a/test/integration/scheduler/util.go b/test/integration/scheduler/util.go index 6460e4e0860..c9b8b9608c1 100644 --- a/test/integration/scheduler/util.go +++ b/test/integration/scheduler/util.go @@ -83,7 +83,7 @@ func createAlgorithmSourceFromPolicy(policy *schedulerapi.Policy, clientSet clie Data: map[string]string{schedulerapi.SchedulerPolicyConfigMapKey: policyString}, } policyConfigMap.APIVersion = "v1" - clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &policyConfigMap) + clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &policyConfigMap, metav1.CreateOptions{}) return schedulerapi.SchedulerAlgorithmSource{ Policy: &schedulerapi.SchedulerPolicySource{ @@ -344,17 +344,17 @@ func initNode(name string, res *v1.ResourceList, images []v1.ContainerImage) *v1 // createNode creates a node with the given resource list. func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.Node, error) { - return cs.CoreV1().Nodes().Create(context.TODO(), initNode(name, res, nil)) + return cs.CoreV1().Nodes().Create(context.TODO(), initNode(name, res, nil), metav1.CreateOptions{}) } // createNodeWithImages creates a node with the given resource list and images. func createNodeWithImages(cs clientset.Interface, name string, res *v1.ResourceList, images []v1.ContainerImage) (*v1.Node, error) { - return cs.CoreV1().Nodes().Create(context.TODO(), initNode(name, res, images)) + return cs.CoreV1().Nodes().Create(context.TODO(), initNode(name, res, images), metav1.CreateOptions{}) } // updateNodeStatus updates the status of node. func updateNodeStatus(cs clientset.Interface, node *v1.Node) error { - _, err := cs.CoreV1().Nodes().UpdateStatus(context.TODO(), node) + _, err := cs.CoreV1().Nodes().UpdateStatus(context.TODO(), node, metav1.UpdateOptions{}) return err } @@ -404,7 +404,7 @@ func addTaintToNode(cs clientset.Interface, nodeName string, taint v1.Taint) err } copy := node.DeepCopy() copy.Spec.Taints = append(copy.Spec.Taints, taint) - _, err = cs.CoreV1().Nodes().Update(context.TODO(), copy) + _, err = cs.CoreV1().Nodes().Update(context.TODO(), copy, metav1.UpdateOptions{}) return err } @@ -470,7 +470,7 @@ func initPausePod(cs clientset.Interface, conf *pausePodConfig) *v1.Pod { // createPausePod creates a pod with "Pause" image and the given config and // return its pointer and error status. func createPausePod(cs clientset.Interface, p *v1.Pod) (*v1.Pod, error) { - return cs.CoreV1().Pods(p.Namespace).Create(context.TODO(), p) + return cs.CoreV1().Pods(p.Namespace).Create(context.TODO(), p, metav1.CreateOptions{}) } // createPausePodWithResource creates a pod with "Pause" image and the given @@ -499,7 +499,7 @@ func createPausePodWithResource(cs clientset.Interface, podName string, // runPausePod creates a pod with "Pause" image and the given config and waits // until it is scheduled. It returns its pointer and error status. func runPausePod(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) { - pod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) + pod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("Error creating pause pod: %v", err) } @@ -536,7 +536,7 @@ func initPodWithContainers(cs clientset.Interface, conf *podWithContainersConfig // runPodWithContainers creates a pod with given config and containers and waits // until it is scheduled. It returns its pointer and error status. func runPodWithContainers(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) { - pod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) + pod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("Error creating pod-with-containers: %v", err) } diff --git a/test/integration/scheduler_perf/scheduler_test.go b/test/integration/scheduler_perf/scheduler_test.go index 9d9f44f59ab..d51edba9cf8 100644 --- a/test/integration/scheduler_perf/scheduler_test.go +++ b/test/integration/scheduler_perf/scheduler_test.go @@ -243,11 +243,11 @@ func (na nodeAffinity) mutatePodTemplate(pod *v1.Pod) { // generateNodes generates nodes to be used for scheduling. func (inputConfig *schedulerPerfConfig) generateNodes(config *testConfig) { for i := 0; i < inputConfig.NodeCount; i++ { - config.clientset.CoreV1().Nodes().Create(context.TODO(), config.mutatedNodeTemplate) + config.clientset.CoreV1().Nodes().Create(context.TODO(), config.mutatedNodeTemplate, metav1.CreateOptions{}) } for i := 0; i < config.numNodes-inputConfig.NodeCount; i++ { - config.clientset.CoreV1().Nodes().Create(context.TODO(), baseNodeTemplate) + config.clientset.CoreV1().Nodes().Create(context.TODO(), baseNodeTemplate, metav1.CreateOptions{}) } } diff --git a/test/integration/secrets/secrets_test.go b/test/integration/secrets/secrets_test.go index 0a87df95ecd..f626d9d4fd5 100644 --- a/test/integration/secrets/secrets_test.go +++ b/test/integration/secrets/secrets_test.go @@ -63,7 +63,7 @@ func DoTestSecrets(t *testing.T, client clientset.Interface, ns *v1.Namespace) { }, } - if _, err := client.CoreV1().Secrets(s.Namespace).Create(context.TODO(), &s); err != nil { + if _, err := client.CoreV1().Secrets(s.Namespace).Create(context.TODO(), &s, metav1.CreateOptions{}); err != nil { t.Errorf("unable to create test secret: %v", err) } defer deleteSecretOrErrorf(t, client, s.Namespace, s.Name) @@ -103,14 +103,14 @@ func DoTestSecrets(t *testing.T, client clientset.Interface, ns *v1.Namespace) { // Create a pod to consume secret. pod.ObjectMeta.Name = "uses-secret" - if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create pod: %v", err) } defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name) // Create a pod that consumes non-existent secret. pod.ObjectMeta.Name = "uses-non-existent-secret" - if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create pod: %v", err) } defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name) diff --git a/test/integration/serviceaccount/service_account_test.go b/test/integration/serviceaccount/service_account_test.go index 3818e87a7f3..ba7ee1f9408 100644 --- a/test/integration/serviceaccount/service_account_test.go +++ b/test/integration/serviceaccount/service_account_test.go @@ -71,7 +71,7 @@ func TestServiceAccountAutoCreate(t *testing.T) { ns := "test-service-account-creation" // Create namespace - _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("could not create namespace: %v", err) } @@ -109,13 +109,13 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { name := "my-service-account" // Create namespace - _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("could not create namespace: %v", err) } // Create service account - _, err = c.CoreV1().ServiceAccounts(ns).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: name}}) + _, err = c.CoreV1().ServiceAccounts(ns).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: name}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -150,7 +150,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { t.Fatal(err) } serviceAccount.Secrets = []v1.ObjectReference{} - _, err = c.CoreV1().ServiceAccounts(ns).Update(context.TODO(), serviceAccount) + _, err = c.CoreV1().ServiceAccounts(ns).Update(context.TODO(), serviceAccount, metav1.UpdateOptions{}) if err != nil { t.Fatal(err) } @@ -206,7 +206,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) { ns := "auto-mount-ns" // Create "my" namespace - _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } @@ -260,7 +260,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) { } expectedContainer2VolumeMounts := protoPod.Spec.Containers[1].VolumeMounts - createdPod, err := c.CoreV1().Pods(ns).Create(context.TODO(), &protoPod) + createdPod, err := c.CoreV1().Pods(ns).Create(context.TODO(), &protoPod, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -289,19 +289,19 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { otherns := "other-ns" // Create "my" namespace - _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}) + _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } // Create "other" namespace - _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}}) + _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } // Create "ro" user in myns - _, err = c.CoreV1().ServiceAccounts(myns).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}) + _, err = c.CoreV1().ServiceAccounts(myns).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -334,7 +334,7 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { doServiceAccountAPIRequests(t, roClient, myns, false, false, false) // Create "rw" user in myns - _, err = c.CoreV1().ServiceAccounts(myns).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readWriteServiceAccountName}}) + _, err = c.CoreV1().ServiceAccounts(myns).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readWriteServiceAccountName}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -580,7 +580,10 @@ func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string }, } writeOps := []testOperation{ - func() error { _, err := c.CoreV1().Secrets(ns).Create(context.TODO(), testSecret); return err }, + func() error { + _, err := c.CoreV1().Secrets(ns).Create(context.TODO(), testSecret, metav1.CreateOptions{}) + return err + }, func() error { return c.CoreV1().Secrets(ns).Delete(context.TODO(), testSecret.Name, nil) }, } diff --git a/test/integration/statefulset/util.go b/test/integration/statefulset/util.go index 0dc964ff3f6..10528b7d562 100644 --- a/test/integration/statefulset/util.go +++ b/test/integration/statefulset/util.go @@ -190,7 +190,7 @@ func runControllerAndInformers(sc *statefulset.StatefulSetController, informers } func createHeadlessService(t *testing.T, clientSet clientset.Interface, headlessService *v1.Service) { - _, err := clientSet.CoreV1().Services(headlessService.Namespace).Create(context.TODO(), headlessService) + _, err := clientSet.CoreV1().Services(headlessService.Namespace).Create(context.TODO(), headlessService, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed creating headless service: %v", err) } @@ -200,14 +200,14 @@ func createSTSsPods(t *testing.T, clientSet clientset.Interface, stss []*appsv1. var createdSTSs []*appsv1.StatefulSet var createdPods []*v1.Pod for _, sts := range stss { - createdSTS, err := clientSet.AppsV1().StatefulSets(sts.Namespace).Create(context.TODO(), sts) + createdSTS, err := clientSet.AppsV1().StatefulSets(sts.Namespace).Create(context.TODO(), sts, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create sts %s: %v", sts.Name, err) } createdSTSs = append(createdSTSs, createdSTS) } for _, pod := range pods { - createdPod, err := clientSet.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) + createdPod, err := clientSet.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to create pod %s: %v", pod.Name, err) } @@ -240,7 +240,7 @@ func updatePod(t *testing.T, podClient typedv1.PodInterface, podName string, upd return err } updateFunc(newPod) - pod, err = podClient.Update(context.TODO(), newPod) + pod, err = podClient.Update(context.TODO(), newPod, metav1.UpdateOptions{}) return err }); err != nil { t.Fatalf("failed to update pod %s: %v", podName, err) @@ -256,7 +256,7 @@ func updatePodStatus(t *testing.T, podClient typedv1.PodInterface, podName strin return err } updateStatusFunc(newPod) - pod, err = podClient.UpdateStatus(context.TODO(), newPod) + pod, err = podClient.UpdateStatus(context.TODO(), newPod, metav1.UpdateOptions{}) return err }); err != nil { t.Fatalf("failed to update status of pod %s: %v", podName, err) @@ -285,7 +285,7 @@ func updateSTS(t *testing.T, stsClient typedappsv1.StatefulSetInterface, stsName return err } updateFunc(newSTS) - sts, err = stsClient.Update(context.TODO(), newSTS) + sts, err = stsClient.Update(context.TODO(), newSTS, metav1.UpdateOptions{}) return err }); err != nil { t.Fatalf("failed to update sts %s: %v", stsName, err) @@ -302,7 +302,7 @@ func scaleSTS(t *testing.T, c clientset.Interface, sts *appsv1.StatefulSet, repl return err } *newSTS.Spec.Replicas = replicas - sts, err = stsClient.Update(context.TODO(), newSTS) + sts, err = stsClient.Update(context.TODO(), newSTS, metav1.UpdateOptions{}) return err }); err != nil { t.Fatalf("failed to update .Spec.Replicas to %d for sts %s: %v", replicas, sts.Name, err) diff --git a/test/integration/storageclasses/storage_classes_test.go b/test/integration/storageclasses/storage_classes_test.go index 988cd69df7d..ae61dd8e8f3 100644 --- a/test/integration/storageclasses/storage_classes_test.go +++ b/test/integration/storageclasses/storage_classes_test.go @@ -60,7 +60,7 @@ func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *v1.Names Provisioner: provisionerPluginName, } - if _, err := client.StorageV1().StorageClasses().Create(context.TODO(), &s); err != nil { + if _, err := client.StorageV1().StorageClasses().Create(context.TODO(), &s, metav1.CreateOptions{}); err != nil { t.Errorf("unable to create test storage class: %v", err) } defer deleteStorageClassOrErrorf(t, client, s.Namespace, s.Name) @@ -80,7 +80,7 @@ func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *v1.Names } pvc.ObjectMeta.Name = "uses-storageclass" - if _, err := client.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc); err != nil { + if _, err := client.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create pvc: %v", err) } defer deletePersistentVolumeClaimOrErrorf(t, client, ns.Name, pvc.Name) diff --git a/test/integration/ttlcontroller/ttlcontroller_test.go b/test/integration/ttlcontroller/ttlcontroller_test.go index e35e6917b0c..e6cddac6164 100644 --- a/test/integration/ttlcontroller/ttlcontroller_test.go +++ b/test/integration/ttlcontroller/ttlcontroller_test.go @@ -60,7 +60,7 @@ func createNodes(t *testing.T, client *clientset.Clientset, startIndex, endIndex Name: fmt.Sprintf("node-%d", idx), }, } - if _, err := client.CoreV1().Nodes().Create(context.TODO(), node); err != nil { + if _, err := client.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create node: %v", err) } }(i) diff --git a/test/integration/volume/attach_detach_test.go b/test/integration/volume/attach_detach_test.go index 710f36d5689..3263a884d26 100644 --- a/test/integration/volume/attach_detach_test.go +++ b/test/integration/volume/attach_detach_test.go @@ -163,14 +163,14 @@ func TestPodDeletionWithDswp(t *testing.T) { pod := fakePodWithVol(namespaceName) podStopCh := make(chan struct{}) - if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to created node : %v", err) } stopCh := make(chan struct{}) go informers.Core().V1().Nodes().Informer().Run(stopCh) - if _, err := testClient.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -241,13 +241,13 @@ func TestPodUpdateWithWithADC(t *testing.T) { pod := fakePodWithVol(namespaceName) podStopCh := make(chan struct{}) - if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to created node : %v", err) } go informers.Core().V1().Nodes().Informer().Run(podStopCh) - if _, err := testClient.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -277,7 +277,7 @@ func TestPodUpdateWithWithADC(t *testing.T) { pod.Status.Phase = v1.PodSucceeded - if _, err := testClient.CoreV1().Pods(ns.Name).UpdateStatus(context.TODO(), pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}); err != nil { t.Errorf("Failed to update pod : %v", err) } @@ -310,13 +310,13 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) { pod := fakePodWithVol(namespaceName) podStopCh := make(chan struct{}) - if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to created node : %v", err) } go informers.Core().V1().Nodes().Informer().Run(podStopCh) - if _, err := testClient.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -346,7 +346,7 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) { pod.Status.Phase = v1.PodSucceeded - if _, err := testClient.CoreV1().Pods(ns.Name).UpdateStatus(context.TODO(), pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}); err != nil { t.Errorf("Failed to update pod : %v", err) } @@ -489,13 +489,13 @@ func TestPodAddedByDswp(t *testing.T) { pod := fakePodWithVol(namespaceName) podStopCh := make(chan struct{}) - if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to created node : %v", err) } go informers.Core().V1().Nodes().Informer().Run(podStopCh) - if _, err := testClient.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -565,7 +565,7 @@ func TestPVCBoundWithADC(t *testing.T) { }, }, } - if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to created node : %v", err) } @@ -573,10 +573,10 @@ func TestPVCBoundWithADC(t *testing.T) { pvcs := []*v1.PersistentVolumeClaim{} for i := 0; i < 3; i++ { pod, pvc := fakePodWithPVC(fmt.Sprintf("fakepod-pvcnotbound-%d", i), fmt.Sprintf("fakepvc-%d", i), namespaceName) - if _, err := testClient.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod); err != nil { + if _, err := testClient.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create pod : %v", err) } - if _, err := testClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc); err != nil { + if _, err := testClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create pvc : %v", err) } pvcs = append(pvcs, pvc) @@ -584,7 +584,7 @@ func TestPVCBoundWithADC(t *testing.T) { // pod with no pvc podNew := fakePodWithVol(namespaceName) podNew.SetName("fakepod") - if _, err := testClient.CoreV1().Pods(podNew.Namespace).Create(context.TODO(), podNew); err != nil { + if _, err := testClient.CoreV1().Pods(podNew.Namespace).Create(context.TODO(), podNew, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -625,7 +625,7 @@ func createPVForPVC(t *testing.T, testClient *clientset.Clientset, pvc *v1.Persi StorageClassName: *pvc.Spec.StorageClassName, }, } - if _, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { + if _, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create pv : %v", err) } } diff --git a/test/integration/volume/persistent_volumes_test.go b/test/integration/volume/persistent_volumes_test.go index 9614ff2ec23..c37f1712820 100644 --- a/test/integration/volume/persistent_volumes_test.go +++ b/test/integration/volume/persistent_volumes_test.go @@ -128,13 +128,13 @@ func TestPersistentVolumeRecycler(t *testing.T) { pv := createPV("fake-pv-recycler", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRecycle) pvc := createPVC("fake-pvc-recycler", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "") - _, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pv) + _, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) if err != nil { t.Errorf("Failed to create PersistentVolume: %v", err) } klog.V(2).Infof("TestPersistentVolumeRecycler pvc created") - _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc) + _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc, metav1.CreateOptions{}) if err != nil { t.Errorf("Failed to create PersistentVolumeClaim: %v", err) } @@ -183,12 +183,12 @@ func TestPersistentVolumeDeleter(t *testing.T) { pv := createPV("fake-pv-deleter", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimDelete) pvc := createPVC("fake-pvc-deleter", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "") - _, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pv) + _, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) if err != nil { t.Errorf("Failed to create PersistentVolume: %v", err) } klog.V(2).Infof("TestPersistentVolumeDeleter pv created") - _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc) + _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc, metav1.CreateOptions{}) if err != nil { t.Errorf("Failed to create PersistentVolumeClaim: %v", err) } @@ -248,7 +248,7 @@ func TestPersistentVolumeBindRace(t *testing.T) { counter++ newPvc := pvc.DeepCopy() newPvc.ObjectMeta = metav1.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)} - claim, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), newPvc) + claim, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), newPvc, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error creating newPvc: %v", err) } @@ -265,7 +265,7 @@ func TestPersistentVolumeBindRace(t *testing.T) { pv.Spec.ClaimRef = claimRef pv.Spec.ClaimRef.UID = "" - pv, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pv) + pv, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) if err != nil { t.Fatalf("Unexpected error creating pv: %v", err) } @@ -322,11 +322,11 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) { pvTrue.ObjectMeta.SetLabels(map[string]string{"foo": "true"}) pvFalse.ObjectMeta.SetLabels(map[string]string{"foo": "false"}) - _, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvTrue) + _, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvTrue, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create PersistentVolume: %v", err) } - _, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvFalse) + _, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvFalse, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create PersistentVolume: %v", err) } @@ -338,7 +338,7 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) { }, } - _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc) + _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create PersistentVolumeClaim: %v", err) } @@ -403,11 +403,11 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) { pvTrue.ObjectMeta.SetLabels(map[string]string{"foo": "valA", "bar": ""}) pvFalse.ObjectMeta.SetLabels(map[string]string{"foo": "valB", "baz": ""}) - _, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvTrue) + _, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvTrue, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create PersistentVolume: %v", err) } - _, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvFalse) + _, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvFalse, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create PersistentVolume: %v", err) } @@ -438,7 +438,7 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) { }, } - _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc) + _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create PersistentVolumeClaim: %v", err) } @@ -501,7 +501,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) { pvc := createPVC("pvc-2", ns.Name, strconv.Itoa(maxPVs/2)+"G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "") for i := 0; i < maxPVs; i++ { - _, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvs[i]) + _, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvs[i], metav1.CreateOptions{}) if err != nil { t.Errorf("Failed to create PersistentVolume %d: %v", i, err) } @@ -509,7 +509,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) { } t.Log("volumes created") - _, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc) + _, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc, metav1.CreateOptions{}) if err != nil { t.Errorf("Failed to create PersistentVolumeClaim: %v", err) } @@ -598,7 +598,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { // with >3000 volumes. go func() { for i := 0; i < objCount; i++ { - _, _ = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvs[i]) + _, _ = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvs[i], metav1.CreateOptions{}) } }() // Wait for them to get Available @@ -631,7 +631,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { } else { pv.Annotations["TestAnnotation"] = fmt.Sprint(rand.Int()) } - _, err = testClient.CoreV1().PersistentVolumes().Update(context.TODO(), pv) + _, err = testClient.CoreV1().PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{}) if err != nil { // Silently ignore error, the PV may have been updated by // the controller. @@ -655,7 +655,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { } else { pvc.Annotations["TestAnnotation"] = fmt.Sprint(rand.Int()) } - _, err = testClient.CoreV1().PersistentVolumeClaims(metav1.NamespaceDefault).Update(context.TODO(), pvc) + _, err = testClient.CoreV1().PersistentVolumeClaims(metav1.NamespaceDefault).Update(context.TODO(), pvc, metav1.UpdateOptions{}) if err != nil { // Silently ignore error, the PVC may have been updated by // the controller. @@ -677,7 +677,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { // Create the claims, again in a separate goroutine. go func() { for i := 0; i < objCount; i++ { - _, _ = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvcs[i]) + _, _ = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvcs[i], metav1.CreateOptions{}) } }() @@ -746,13 +746,13 @@ func TestPersistentVolumeControllerStartup(t *testing.T) { pvc := createPVC(pvcName, ns.Name, "1G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "") pvc.Annotations = map[string]string{"annBindCompleted": ""} pvc.Spec.VolumeName = pvName - newPVC, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc) + newPVC, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc, metav1.CreateOptions{}) if err != nil { t.Fatalf("Cannot create claim %q: %v", pvc.Name, err) } // Save Bound status as a separate transaction newPVC.Status.Phase = v1.ClaimBound - newPVC, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).UpdateStatus(context.TODO(), newPVC) + newPVC, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).UpdateStatus(context.TODO(), newPVC, metav1.UpdateOptions{}) if err != nil { t.Fatalf("Cannot update claim status %q: %v", pvc.Name, err) } @@ -770,13 +770,13 @@ func TestPersistentVolumeControllerStartup(t *testing.T) { return } pv.Spec.ClaimRef = claimRef - newPV, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pv) + newPV, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) if err != nil { t.Fatalf("Cannot create volume %q: %v", pv.Name, err) } // Save Bound status as a separate transaction newPV.Status.Phase = v1.VolumeBound - newPV, err = testClient.CoreV1().PersistentVolumes().UpdateStatus(context.TODO(), newPV) + newPV, err = testClient.CoreV1().PersistentVolumes().UpdateStatus(context.TODO(), newPV, metav1.UpdateOptions{}) if err != nil { t.Fatalf("Cannot update volume status %q: %v", pv.Name, err) } @@ -874,7 +874,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { }, Provisioner: provisionerPluginName, } - testClient.StorageV1().StorageClasses().Create(context.TODO(), &storageClass) + testClient.StorageV1().StorageClasses().Create(context.TODO(), &storageClass, metav1.CreateOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -893,7 +893,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { // early. It gets stuck with >3000 claims. go func() { for i := 0; i < objCount; i++ { - _, _ = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvcs[i]) + _, _ = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvcs[i], metav1.CreateOptions{}) } }() @@ -972,17 +972,17 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) { pvc := createPVC("pvc-rwm", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, "") - _, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvRwm) + _, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvRwm, metav1.CreateOptions{}) if err != nil { t.Errorf("Failed to create PersistentVolume: %v", err) } - _, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvRwo) + _, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvRwo, metav1.CreateOptions{}) if err != nil { t.Errorf("Failed to create PersistentVolume: %v", err) } t.Log("volumes created") - _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc) + _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc, metav1.CreateOptions{}) if err != nil { t.Errorf("Failed to create PersistentVolumeClaim: %v", err) } diff --git a/test/integration/volumescheduling/volume_binding_test.go b/test/integration/volumescheduling/volume_binding_test.go index fe53e168447..22d81603724 100644 --- a/test/integration/volumescheduling/volume_binding_test.go +++ b/test/integration/volumescheduling/volume_binding_test.go @@ -195,7 +195,7 @@ func TestVolumeBinding(t *testing.T) { classes[classImmediate] = makeStorageClass(fmt.Sprintf("immediate-%v", suffix), &modeImmediate) classes[classWait] = makeStorageClass(fmt.Sprintf("wait-%v", suffix), &modeWait) for _, sc := range classes { - if _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc); err != nil { + if _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) } } @@ -203,14 +203,14 @@ func TestVolumeBinding(t *testing.T) { // Create PVs for _, pvConfig := range test.pvs { pv := makePV(pvConfig.name, classes[pvConfig.scName].Name, pvConfig.preboundPVC, config.ns, pvConfig.node) - if _, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { + if _, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } } for _, pvConfig := range test.unboundPvs { pv := makePV(pvConfig.name, classes[pvConfig.scName].Name, pvConfig.preboundPVC, config.ns, pvConfig.node) - if _, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { + if _, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } } @@ -232,19 +232,19 @@ func TestVolumeBinding(t *testing.T) { // Create PVCs for _, pvcConfig := range test.pvcs { pvc := makePVC(pvcConfig.name, config.ns, &classes[pvcConfig.scName].Name, pvcConfig.preboundPV) - if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } } for _, pvcConfig := range test.unboundPvcs { pvc := makePVC(pvcConfig.name, config.ns, &classes[pvcConfig.scName].Name, pvcConfig.preboundPV) - if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } } // Create Pod - if _, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), test.pod); err != nil { + if _, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), test.pod, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pod %q: %v", test.pod.Name, err) } if test.shouldFail { @@ -297,7 +297,7 @@ func TestVolumeBindingRescheduling(t *testing.T) { }, trigger: func(config *testConfig) { sc := makeDynamicProvisionerStorageClass(storageClassName, &modeWait, nil) - if _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc); err != nil { + if _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) } }, @@ -310,12 +310,12 @@ func TestVolumeBindingRescheduling(t *testing.T) { }, trigger: func(config *testConfig) { sc := makeStorageClass(storageClassName, &modeWait) - if _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc); err != nil { + if _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) } // Create pv for this class to mock static provisioner behavior. pv := makePV("pv-reschedule-onclassadd-static", storageClassName, "", "", node1) - if pv, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { + if pv, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } }, @@ -332,7 +332,7 @@ func TestVolumeBindingRescheduling(t *testing.T) { }, trigger: func(config *testConfig) { pvc := makePVC("pvc-reschedule-onpvcadd", config.ns, &classWait, "") - if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } }, @@ -350,7 +350,7 @@ func TestVolumeBindingRescheduling(t *testing.T) { // Create unbound pvc for _, pvcConfig := range test.pvcs { pvc := makePVC(pvcConfig.name, config.ns, &storageClassName, "") - if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } } @@ -358,13 +358,13 @@ func TestVolumeBindingRescheduling(t *testing.T) { // Create PVs for _, pvConfig := range test.pvs { pv := makePV(pvConfig.name, sharedClasses[pvConfig.scName].Name, pvConfig.preboundPVC, config.ns, pvConfig.node) - if _, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { + if _, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } } // Create pod - if _, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), test.pod); err != nil { + if _, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), test.pod, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pod %q: %v", test.pod.Name, err) } @@ -434,7 +434,7 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, if dynamic { scName = &classDynamic sc := makeDynamicProvisionerStorageClass(*scName, &modeWait, nil) - if _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc); err != nil { + if _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) } } @@ -460,7 +460,7 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, // static prebound pvs pv = makePV(pvName, classImmediate, pvcName, config.ns, node1) } - if pv, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { + if pv, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } pvs[i] = pv @@ -470,7 +470,7 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, } else { pvc = makePVC(pvcName, config.ns, scName, "") } - if pvc, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { + if pvc, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } pvcs[i] = pvc @@ -486,7 +486,7 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, } pod := makePod(fmt.Sprintf("pod%03d", i), config.ns, podPvcs) - if pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod); err != nil { + if pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pod %q: %v", pod.Name, err) } pods[i] = pod @@ -528,7 +528,7 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n // Create PVs for the first node for i := 0; i < numPVsFirstNode; i++ { pv := makePV(fmt.Sprintf("pv-node1-%v", i), classWait, "", "", node1) - if pv, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { + if pv, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } } @@ -536,7 +536,7 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n // Create 1 PV per Node for the remaining nodes for i := 2; i <= numNodes; i++ { pv := makePV(fmt.Sprintf("pv-node%v-0", i), classWait, "", "", fmt.Sprintf("node-%v", i)) - if pv, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { + if pv, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } } @@ -545,7 +545,7 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n for i := 0; i < numPods; i++ { // Create one pvc per pod pvc := makePVC(fmt.Sprintf("pvc-%v", i), config.ns, &classWait, "") - if pvc, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { + if pvc, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } pvcs = append(pvcs, pvc) @@ -577,7 +577,7 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n } } - if pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod); err != nil { + if pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pod %q: %v", pod.Name, err) } pods = append(pods, pod) @@ -648,12 +648,12 @@ func TestPVAffinityConflict(t *testing.T) { pvc := makePVC("local-pvc", config.ns, &classImmediate, "") // Create PV - if _, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { + if _, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } // Create PVC - if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } @@ -671,7 +671,7 @@ func TestPVAffinityConflict(t *testing.T) { pod := makePod(podName, config.ns, []string{"local-pvc"}) nodeMarkers[i].(func(*v1.Pod, string))(pod, "node-2") // Create Pod - if _, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod); err != nil { + if _, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pod %q: %v", pod.Name, err) } // Give time to shceduler to attempt to schedule pod @@ -768,14 +768,14 @@ func TestVolumeProvision(t *testing.T) { } classes[classTopoMismatch] = makeDynamicProvisionerStorageClass(fmt.Sprintf("topomismatch-%v", suffix), &modeWait, topo) for _, sc := range classes { - if _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc); err != nil { + if _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) } } // Create PVs for _, pvConfig := range test.pvs { pv := makePV(pvConfig.name, classes[pvConfig.scName].Name, pvConfig.preboundPVC, config.ns, pvConfig.node) - if _, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { + if _, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } } @@ -783,25 +783,25 @@ func TestVolumeProvision(t *testing.T) { // Create PVCs for _, pvcConfig := range test.boundPvcs { pvc := makePVC(pvcConfig.name, config.ns, &classes[pvcConfig.scName].Name, pvcConfig.preboundPV) - if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } } for _, pvcConfig := range test.unboundPvcs { pvc := makePVC(pvcConfig.name, config.ns, &classes[pvcConfig.scName].Name, pvcConfig.preboundPV) - if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } } for _, pvcConfig := range test.provisionedPvcs { pvc := makePVC(pvcConfig.name, config.ns, &classes[pvcConfig.scName].Name, pvcConfig.preboundPV) - if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } } // Create Pod - if _, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), test.pod); err != nil { + if _, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), test.pod, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pod %q: %v", test.pod.Name, err) } if test.shouldFail { @@ -859,14 +859,14 @@ func TestRescheduleProvisioning(t *testing.T) { // Prepare node and storage class. testNode := makeNode(0) - if _, err := clientset.CoreV1().Nodes().Create(context.TODO(), testNode); err != nil { + if _, err := clientset.CoreV1().Nodes().Create(context.TODO(), testNode, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Node %q: %v", testNode.Name, err) } scName := "fail-provision" sc := makeDynamicProvisionerStorageClass(scName, &modeWait, nil) // Expect the storage class fail to provision. sc.Parameters[volumetest.ExpectProvisionFailureKey] = "" - if _, err := clientset.StorageV1().StorageClasses().Create(context.TODO(), sc); err != nil { + if _, err := clientset.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) } @@ -874,7 +874,7 @@ func TestRescheduleProvisioning(t *testing.T) { pvcName := "pvc-fail-to-provision" pvc := makePVC(pvcName, ns, &scName, "") pvc.Annotations = map[string]string{"volume.kubernetes.io/selected-node": node1} - pvc, err = clientset.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc) + pvc, err = clientset.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } @@ -913,14 +913,14 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod t // Create nodes for i := 0; i < numberOfNodes; i++ { testNode := makeNode(i) - if _, err := clientset.CoreV1().Nodes().Create(context.TODO(), testNode); err != nil { + if _, err := clientset.CoreV1().Nodes().Create(context.TODO(), testNode, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Node %q: %v", testNode.Name, err) } } // Create SCs for _, sc := range sharedClasses { - if _, err := clientset.StorageV1().StorageClasses().Create(context.TODO(), sc); err != nil { + if _, err := clientset.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) } } diff --git a/test/soak/serve_hostnames/serve_hostnames.go b/test/soak/serve_hostnames/serve_hostnames.go index 9ba0cb14457..3071e0ff9ec 100644 --- a/test/soak/serve_hostnames/serve_hostnames.go +++ b/test/soak/serve_hostnames/serve_hostnames.go @@ -116,7 +116,7 @@ func main() { queries := *queriesAverage * len(nodes.Items) * *podsPerNode // Create the namespace - got, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "serve-hostnames-"}}) + got, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "serve-hostnames-"}}, metav1.CreateOptions{}) if err != nil { klog.Fatalf("Failed to create namespace: %v", err) } @@ -161,7 +161,7 @@ func main() { "name": "serve-hostname", }, }, - }) + }, metav1.CreateOptions{}) klog.V(4).Infof("Service create %s/server-hostnames took %v", ns, time.Since(t)) if err == nil { break @@ -211,7 +211,7 @@ func main() { }, NodeName: node.Name, }, - }) + }, metav1.CreateOptions{}) klog.V(4).Infof("Pod create %s/%s request took %v", ns, podName, time.Since(t)) if err == nil { break diff --git a/test/utils/create_resources.go b/test/utils/create_resources.go index 7f8a5146761..51b92ace88f 100644 --- a/test/utils/create_resources.go +++ b/test/utils/create_resources.go @@ -27,6 +27,7 @@ import ( batch "k8s.io/api/batch/v1" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" @@ -69,7 +70,7 @@ func CreatePodWithRetries(c clientset.Interface, namespace string, obj *v1.Pod) return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().Pods(namespace).Create(context.TODO(), obj) + _, err := c.CoreV1().Pods(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -86,7 +87,7 @@ func CreateRCWithRetries(c clientset.Interface, namespace string, obj *v1.Replic return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().ReplicationControllers(namespace).Create(context.TODO(), obj) + _, err := c.CoreV1().ReplicationControllers(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -103,7 +104,7 @@ func CreateReplicaSetWithRetries(c clientset.Interface, namespace string, obj *a return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.AppsV1().ReplicaSets(namespace).Create(context.TODO(), obj) + _, err := c.AppsV1().ReplicaSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -120,7 +121,7 @@ func CreateDeploymentWithRetries(c clientset.Interface, namespace string, obj *a return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.AppsV1().Deployments(namespace).Create(context.TODO(), obj) + _, err := c.AppsV1().Deployments(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -137,7 +138,7 @@ func CreateDaemonSetWithRetries(c clientset.Interface, namespace string, obj *ap return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.AppsV1().DaemonSets(namespace).Create(context.TODO(), obj) + _, err := c.AppsV1().DaemonSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -154,7 +155,7 @@ func CreateJobWithRetries(c clientset.Interface, namespace string, obj *batch.Jo return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.BatchV1().Jobs(namespace).Create(context.TODO(), obj) + _, err := c.BatchV1().Jobs(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -171,7 +172,7 @@ func CreateSecretWithRetries(c clientset.Interface, namespace string, obj *v1.Se return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().Secrets(namespace).Create(context.TODO(), obj) + _, err := c.CoreV1().Secrets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -188,7 +189,7 @@ func CreateConfigMapWithRetries(c clientset.Interface, namespace string, obj *v1 return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().ConfigMaps(namespace).Create(context.TODO(), obj) + _, err := c.CoreV1().ConfigMaps(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -205,7 +206,7 @@ func CreateServiceWithRetries(c clientset.Interface, namespace string, obj *v1.S return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().Services(namespace).Create(context.TODO(), obj) + _, err := c.CoreV1().Services(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -222,7 +223,7 @@ func CreateResourceQuotaWithRetries(c clientset.Interface, namespace string, obj return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), obj) + _, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -239,7 +240,7 @@ func CreatePersistentVolumeWithRetries(c clientset.Interface, obj *v1.Persistent return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), obj) + _, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -256,7 +257,7 @@ func CreatePersistentVolumeClaimWithRetries(c clientset.Interface, namespace str return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), obj) + _, err := c.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } diff --git a/test/utils/density_utils.go b/test/utils/density_utils.go index a09a24f3f74..aedf76c8814 100644 --- a/test/utils/density_utils.go +++ b/test/utils/density_utils.go @@ -43,7 +43,7 @@ func AddLabelsToNode(c clientset.Interface, nodeName string, labels map[string]s patch := fmt.Sprintf(`{"metadata":{"labels":%v}}`, labelString) var err error for attempt := 0; attempt < retries; attempt++ { - _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.MergePatchType, []byte(patch)) + _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.MergePatchType, []byte(patch), metav1.PatchOptions{}) if err != nil { if !apierrors.IsConflict(err) { return err @@ -75,7 +75,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri } delete(node.Labels, labelKey) } - _, err = c.CoreV1().Nodes().Update(context.TODO(), node) + _, err = c.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{}) if err != nil { if !apierrors.IsConflict(err) { return err diff --git a/test/utils/deployment.go b/test/utils/deployment.go index f6e968a3639..a9f982affe6 100644 --- a/test/utils/deployment.go +++ b/test/utils/deployment.go @@ -265,7 +265,7 @@ func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, } // Apply the update, then attempt to push it to the apiserver. applyUpdate(deployment) - if deployment, err = c.AppsV1().Deployments(namespace).Update(context.TODO(), deployment); err == nil { + if deployment, err = c.AppsV1().Deployments(namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{}); err == nil { logf("Updating deployment %s", name) return true, nil } diff --git a/test/utils/replicaset.go b/test/utils/replicaset.go index 60ae2a28078..625d6c7cc83 100644 --- a/test/utils/replicaset.go +++ b/test/utils/replicaset.go @@ -40,7 +40,7 @@ func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, } // Apply the update, then attempt to push it to the apiserver. applyUpdate(rs) - if rs, err = c.AppsV1().ReplicaSets(namespace).Update(context.TODO(), rs); err == nil { + if rs, err = c.AppsV1().ReplicaSets(namespace).Update(context.TODO(), rs, metav1.UpdateOptions{}); err == nil { logf("Updating replica set %q", name) return true, nil } @@ -78,7 +78,7 @@ func UpdateReplicaSetStatusWithRetries(c clientset.Interface, namespace, name st } // Apply the update, then attempt to push it to the apiserver. applyUpdate(rs) - if rs, err = c.AppsV1().ReplicaSets(namespace).UpdateStatus(context.TODO(), rs); err == nil { + if rs, err = c.AppsV1().ReplicaSets(namespace).UpdateStatus(context.TODO(), rs, metav1.UpdateOptions{}); err == nil { logf("Updating replica set %q", name) return true, nil } diff --git a/test/utils/runners.go b/test/utils/runners.go index f3ba0caeecd..e53e8b8e58e 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -1088,7 +1088,7 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, d) } - _, err := client.StorageV1beta1().CSINodes().Create(context.TODO(), csiNode) + _, err := client.StorageV1beta1().CSINodes().Create(context.TODO(), csiNode, metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { // Something created CSINode instance after we checked it did not exist. // Make the caller to re-try PrepareDependentObjects by returning Conflict error @@ -1118,7 +1118,7 @@ func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1beta1.CSINode, } csiNode.Annotations[v1.MigratedPluginsAnnotationKey] = strings.Join(s.MigratedPlugins, ",") - _, err := client.StorageV1beta1().CSINodes().Update(context.TODO(), csiNode) + _, err := client.StorageV1beta1().CSINodes().Update(context.TODO(), csiNode, metav1.UpdateOptions{}) return err } @@ -1194,7 +1194,7 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo return nil } for attempt := 0; attempt < retries; attempt++ { - if _, err = client.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.MergePatchType, []byte(patch)); err == nil { + if _, err = client.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{}); err == nil { break } if !apierrors.IsConflict(err) { @@ -1232,7 +1232,7 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare if apiequality.Semantic.DeepEqual(node, updatedNode) { return nil } - if _, err = client.CoreV1().Nodes().Update(context.TODO(), updatedNode); err == nil { + if _, err = client.CoreV1().Nodes().Update(context.TODO(), updatedNode, metav1.UpdateOptions{}); err == nil { break } if !apierrors.IsConflict(err) { From 1dcd1fb3b7a4399bdfd1aeaa8d200ae899a30b27 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Tue, 4 Feb 2020 17:53:04 -0800 Subject: [PATCH 3/4] generated: update clients --- .../versioned/typed/cr/v1/example.go | 23 +++++++------ .../typed/cr/v1/fake/fake_example.go | 6 ++-- .../v1/customresourcedefinition.go | 29 +++++++++------- .../v1/fake/fake_customresourcedefinition.go | 8 ++--- .../v1beta1/customresourcedefinition.go | 29 +++++++++------- .../fake/fake_customresourcedefinition.go | 8 ++--- .../fake/fake_mutatingwebhookconfiguration.go | 6 ++-- .../fake_validatingwebhookconfiguration.go | 6 ++-- .../v1/mutatingwebhookconfiguration.go | 23 +++++++------ .../v1/validatingwebhookconfiguration.go | 23 +++++++------ .../fake/fake_mutatingwebhookconfiguration.go | 6 ++-- .../fake_validatingwebhookconfiguration.go | 6 ++-- .../v1beta1/mutatingwebhookconfiguration.go | 23 +++++++------ .../v1beta1/validatingwebhookconfiguration.go | 23 +++++++------ .../typed/apps/v1/controllerrevision.go | 23 +++++++------ .../kubernetes/typed/apps/v1/daemonset.go | 29 +++++++++------- .../kubernetes/typed/apps/v1/deployment.go | 34 +++++++++++-------- .../apps/v1/fake/fake_controllerrevision.go | 6 ++-- .../typed/apps/v1/fake/fake_daemonset.go | 8 ++--- .../typed/apps/v1/fake/fake_deployment.go | 10 +++--- .../typed/apps/v1/fake/fake_replicaset.go | 10 +++--- .../typed/apps/v1/fake/fake_statefulset.go | 10 +++--- .../kubernetes/typed/apps/v1/replicaset.go | 34 +++++++++++-------- .../kubernetes/typed/apps/v1/statefulset.go | 34 +++++++++++-------- .../typed/apps/v1beta1/controllerrevision.go | 23 +++++++------ .../typed/apps/v1beta1/deployment.go | 29 +++++++++------- .../v1beta1/fake/fake_controllerrevision.go | 6 ++-- .../apps/v1beta1/fake/fake_deployment.go | 8 ++--- .../apps/v1beta1/fake/fake_statefulset.go | 8 ++--- .../typed/apps/v1beta1/statefulset.go | 29 +++++++++------- .../typed/apps/v1beta2/controllerrevision.go | 23 +++++++------ .../typed/apps/v1beta2/daemonset.go | 29 +++++++++------- .../typed/apps/v1beta2/deployment.go | 29 +++++++++------- .../v1beta2/fake/fake_controllerrevision.go | 6 ++-- .../typed/apps/v1beta2/fake/fake_daemonset.go | 8 ++--- .../apps/v1beta2/fake/fake_deployment.go | 8 ++--- .../apps/v1beta2/fake/fake_replicaset.go | 8 ++--- .../apps/v1beta2/fake/fake_statefulset.go | 10 +++--- .../typed/apps/v1beta2/replicaset.go | 29 +++++++++------- .../typed/apps/v1beta2/statefulset.go | 34 +++++++++++-------- .../auditregistration/v1alpha1/auditsink.go | 23 +++++++------ .../v1alpha1/fake/fake_auditsink.go | 6 ++-- .../kubernetes/typed/authentication/v1/BUILD | 1 + .../typed/authentication/v1/fake/BUILD | 1 + .../v1/fake/fake_tokenreview.go | 3 +- .../typed/authentication/v1/tokenreview.go | 7 ++-- .../typed/authentication/v1beta1/BUILD | 1 + .../typed/authentication/v1beta1/fake/BUILD | 1 + .../v1beta1/fake/fake_tokenreview.go | 3 +- .../authentication/v1beta1/tokenreview.go | 7 ++-- .../kubernetes/typed/authorization/v1/BUILD | 1 + .../typed/authorization/v1/fake/BUILD | 1 + .../v1/fake/fake_localsubjectaccessreview.go | 3 +- .../v1/fake/fake_selfsubjectaccessreview.go | 3 +- .../v1/fake/fake_selfsubjectrulesreview.go | 3 +- .../v1/fake/fake_subjectaccessreview.go | 3 +- .../v1/localsubjectaccessreview.go | 7 ++-- .../v1/selfsubjectaccessreview.go | 7 ++-- .../v1/selfsubjectrulesreview.go | 7 ++-- .../authorization/v1/subjectaccessreview.go | 7 ++-- .../typed/authorization/v1beta1/BUILD | 1 + .../typed/authorization/v1beta1/fake/BUILD | 1 + .../fake/fake_localsubjectaccessreview.go | 3 +- .../fake/fake_selfsubjectaccessreview.go | 3 +- .../fake/fake_selfsubjectrulesreview.go | 3 +- .../v1beta1/fake/fake_subjectaccessreview.go | 3 +- .../v1beta1/localsubjectaccessreview.go | 7 ++-- .../v1beta1/selfsubjectaccessreview.go | 7 ++-- .../v1beta1/selfsubjectrulesreview.go | 7 ++-- .../v1beta1/subjectaccessreview.go | 7 ++-- .../v1/fake/fake_horizontalpodautoscaler.go | 8 ++--- .../autoscaling/v1/horizontalpodautoscaler.go | 29 +++++++++------- .../fake/fake_horizontalpodautoscaler.go | 8 ++--- .../v2beta1/horizontalpodautoscaler.go | 29 +++++++++------- .../fake/fake_horizontalpodautoscaler.go | 8 ++--- .../v2beta2/horizontalpodautoscaler.go | 29 +++++++++------- .../typed/batch/v1/fake/fake_job.go | 8 ++--- .../kubernetes/typed/batch/v1/job.go | 29 +++++++++------- .../kubernetes/typed/batch/v1beta1/cronjob.go | 29 +++++++++------- .../typed/batch/v1beta1/fake/fake_cronjob.go | 8 ++--- .../typed/batch/v2alpha1/cronjob.go | 29 +++++++++------- .../typed/batch/v2alpha1/fake/fake_cronjob.go | 8 ++--- .../v1beta1/certificatesigningrequest.go | 29 +++++++++------- .../fake/fake_certificatesigningrequest.go | 8 ++--- .../typed/coordination/v1/fake/fake_lease.go | 6 ++-- .../kubernetes/typed/coordination/v1/lease.go | 23 +++++++------ .../coordination/v1beta1/fake/fake_lease.go | 6 ++-- .../typed/coordination/v1beta1/lease.go | 23 +++++++------ .../typed/core/v1/componentstatus.go | 23 +++++++------ .../kubernetes/typed/core/v1/configmap.go | 23 +++++++------ .../kubernetes/typed/core/v1/endpoints.go | 23 +++++++------ .../kubernetes/typed/core/v1/event.go | 23 +++++++------ .../core/v1/fake/fake_componentstatus.go | 6 ++-- .../typed/core/v1/fake/fake_configmap.go | 6 ++-- .../typed/core/v1/fake/fake_endpoints.go | 6 ++-- .../typed/core/v1/fake/fake_event.go | 6 ++-- .../typed/core/v1/fake/fake_limitrange.go | 6 ++-- .../typed/core/v1/fake/fake_namespace.go | 8 ++--- .../typed/core/v1/fake/fake_node.go | 8 ++--- .../core/v1/fake/fake_persistentvolume.go | 8 ++--- .../v1/fake/fake_persistentvolumeclaim.go | 8 ++--- .../kubernetes/typed/core/v1/fake/fake_pod.go | 10 +++--- .../typed/core/v1/fake/fake_podtemplate.go | 6 ++-- .../v1/fake/fake_replicationcontroller.go | 10 +++--- .../typed/core/v1/fake/fake_resourcequota.go | 8 ++--- .../typed/core/v1/fake/fake_secret.go | 6 ++-- .../typed/core/v1/fake/fake_service.go | 8 ++--- .../typed/core/v1/fake/fake_serviceaccount.go | 8 ++--- .../kubernetes/typed/core/v1/limitrange.go | 23 +++++++------ .../kubernetes/typed/core/v1/namespace.go | 27 ++++++++------- .../kubernetes/typed/core/v1/node.go | 29 +++++++++------- .../typed/core/v1/persistentvolume.go | 29 +++++++++------- .../typed/core/v1/persistentvolumeclaim.go | 29 +++++++++------- .../client-go/kubernetes/typed/core/v1/pod.go | 34 +++++++++++-------- .../kubernetes/typed/core/v1/podtemplate.go | 23 +++++++------ .../typed/core/v1/replicationcontroller.go | 34 +++++++++++-------- .../kubernetes/typed/core/v1/resourcequota.go | 29 +++++++++------- .../kubernetes/typed/core/v1/secret.go | 23 +++++++------ .../kubernetes/typed/core/v1/service.go | 27 ++++++++------- .../typed/core/v1/serviceaccount.go | 28 ++++++++------- .../typed/discovery/v1alpha1/endpointslice.go | 23 +++++++------ .../v1alpha1/fake/fake_endpointslice.go | 6 ++-- .../typed/discovery/v1beta1/endpointslice.go | 23 +++++++------ .../v1beta1/fake/fake_endpointslice.go | 6 ++-- .../kubernetes/typed/events/v1beta1/event.go | 23 +++++++------ .../typed/events/v1beta1/fake/fake_event.go | 6 ++-- .../typed/extensions/v1beta1/daemonset.go | 29 +++++++++------- .../typed/extensions/v1beta1/deployment.go | 34 +++++++++++-------- .../extensions/v1beta1/fake/fake_daemonset.go | 8 ++--- .../v1beta1/fake/fake_deployment.go | 10 +++--- .../extensions/v1beta1/fake/fake_ingress.go | 8 ++--- .../v1beta1/fake/fake_networkpolicy.go | 6 ++-- .../v1beta1/fake/fake_podsecuritypolicy.go | 6 ++-- .../v1beta1/fake/fake_replicaset.go | 10 +++--- .../typed/extensions/v1beta1/ingress.go | 29 +++++++++------- .../typed/extensions/v1beta1/networkpolicy.go | 23 +++++++------ .../extensions/v1beta1/podsecuritypolicy.go | 23 +++++++------ .../typed/extensions/v1beta1/replicaset.go | 34 +++++++++++-------- .../v1alpha1/fake/fake_flowschema.go | 8 ++--- .../fake/fake_prioritylevelconfiguration.go | 8 ++--- .../typed/flowcontrol/v1alpha1/flowschema.go | 29 +++++++++------- .../v1alpha1/prioritylevelconfiguration.go | 29 +++++++++------- .../networking/v1/fake/fake_networkpolicy.go | 6 ++-- .../typed/networking/v1/networkpolicy.go | 23 +++++++------ .../networking/v1beta1/fake/fake_ingress.go | 8 ++--- .../typed/networking/v1beta1/ingress.go | 29 +++++++++------- .../node/v1alpha1/fake/fake_runtimeclass.go | 6 ++-- .../typed/node/v1alpha1/runtimeclass.go | 23 +++++++------ .../node/v1beta1/fake/fake_runtimeclass.go | 6 ++-- .../typed/node/v1beta1/runtimeclass.go | 23 +++++++------ .../v1beta1/fake/fake_poddisruptionbudget.go | 8 ++--- .../v1beta1/fake/fake_podsecuritypolicy.go | 6 ++-- .../policy/v1beta1/poddisruptionbudget.go | 29 +++++++++------- .../typed/policy/v1beta1/podsecuritypolicy.go | 23 +++++++------ .../kubernetes/typed/rbac/v1/clusterrole.go | 23 +++++++------ .../typed/rbac/v1/clusterrolebinding.go | 23 +++++++------ .../typed/rbac/v1/fake/fake_clusterrole.go | 6 ++-- .../rbac/v1/fake/fake_clusterrolebinding.go | 6 ++-- .../typed/rbac/v1/fake/fake_role.go | 6 ++-- .../typed/rbac/v1/fake/fake_rolebinding.go | 6 ++-- .../kubernetes/typed/rbac/v1/role.go | 23 +++++++------ .../kubernetes/typed/rbac/v1/rolebinding.go | 23 +++++++------ .../typed/rbac/v1alpha1/clusterrole.go | 23 +++++++------ .../typed/rbac/v1alpha1/clusterrolebinding.go | 23 +++++++------ .../rbac/v1alpha1/fake/fake_clusterrole.go | 6 ++-- .../v1alpha1/fake/fake_clusterrolebinding.go | 6 ++-- .../typed/rbac/v1alpha1/fake/fake_role.go | 6 ++-- .../rbac/v1alpha1/fake/fake_rolebinding.go | 6 ++-- .../kubernetes/typed/rbac/v1alpha1/role.go | 23 +++++++------ .../typed/rbac/v1alpha1/rolebinding.go | 23 +++++++------ .../typed/rbac/v1beta1/clusterrole.go | 23 +++++++------ .../typed/rbac/v1beta1/clusterrolebinding.go | 23 +++++++------ .../rbac/v1beta1/fake/fake_clusterrole.go | 6 ++-- .../v1beta1/fake/fake_clusterrolebinding.go | 6 ++-- .../typed/rbac/v1beta1/fake/fake_role.go | 6 ++-- .../rbac/v1beta1/fake/fake_rolebinding.go | 6 ++-- .../kubernetes/typed/rbac/v1beta1/role.go | 23 +++++++------ .../typed/rbac/v1beta1/rolebinding.go | 23 +++++++------ .../scheduling/v1/fake/fake_priorityclass.go | 6 ++-- .../typed/scheduling/v1/priorityclass.go | 23 +++++++------ .../v1alpha1/fake/fake_priorityclass.go | 6 ++-- .../scheduling/v1alpha1/priorityclass.go | 23 +++++++------ .../v1beta1/fake/fake_priorityclass.go | 6 ++-- .../typed/scheduling/v1beta1/priorityclass.go | 23 +++++++------ .../settings/v1alpha1/fake/fake_podpreset.go | 6 ++-- .../typed/settings/v1alpha1/podpreset.go | 23 +++++++------ .../kubernetes/typed/storage/v1/csinode.go | 23 +++++++------ .../typed/storage/v1/fake/fake_csinode.go | 6 ++-- .../storage/v1/fake/fake_storageclass.go | 6 ++-- .../storage/v1/fake/fake_volumeattachment.go | 8 ++--- .../typed/storage/v1/storageclass.go | 23 +++++++------ .../typed/storage/v1/volumeattachment.go | 29 +++++++++------- .../v1alpha1/fake/fake_volumeattachment.go | 8 ++--- .../storage/v1alpha1/volumeattachment.go | 29 +++++++++------- .../typed/storage/v1beta1/csidriver.go | 23 +++++++------ .../typed/storage/v1beta1/csinode.go | 23 +++++++------ .../storage/v1beta1/fake/fake_csidriver.go | 6 ++-- .../storage/v1beta1/fake/fake_csinode.go | 6 ++-- .../storage/v1beta1/fake/fake_storageclass.go | 6 ++-- .../v1beta1/fake/fake_volumeattachment.go | 8 ++--- .../typed/storage/v1beta1/storageclass.go | 23 +++++++------ .../typed/storage/v1beta1/volumeattachment.go | 29 +++++++++------- .../typed/example/v1/clustertesttype.go | 34 +++++++++++-------- .../example/v1/fake/fake_clustertesttype.go | 10 +++--- .../typed/example/v1/fake/fake_testtype.go | 8 ++--- .../versioned/typed/example/v1/testtype.go | 29 +++++++++------- .../typed/example/v1/clustertesttype.go | 34 +++++++++++-------- .../example/v1/fake/fake_clustertesttype.go | 10 +++--- .../typed/example/v1/fake/fake_testtype.go | 8 ++--- .../versioned/typed/example/v1/testtype.go | 29 +++++++++------- .../internalversion/fake/fake_testtype.go | 8 ++--- .../typed/example/internalversion/testtype.go | 29 +++++++++------- .../internalversion/fake/fake_testtype.go | 8 ++--- .../example2/internalversion/testtype.go | 29 +++++++++------- .../internalversion/fake/fake_testtype.go | 8 ++--- .../example3.io/internalversion/testtype.go | 29 +++++++++------- .../typed/example/v1/fake/fake_testtype.go | 8 ++--- .../versioned/typed/example/v1/testtype.go | 29 +++++++++------- .../typed/example2/v1/fake/fake_testtype.go | 8 ++--- .../versioned/typed/example2/v1/testtype.go | 29 +++++++++------- .../example3.io/v1/fake/fake_testtype.go | 8 ++--- .../typed/example3.io/v1/testtype.go | 29 +++++++++------- .../typed/example/v1/clustertesttype.go | 34 +++++++++++-------- .../example/v1/fake/fake_clustertesttype.go | 10 +++--- .../typed/example/v1/fake/fake_testtype.go | 8 ++--- .../versioned/typed/example/v1/testtype.go | 29 +++++++++------- .../typed/example2/v1/fake/fake_testtype.go | 8 ++--- .../versioned/typed/example2/v1/testtype.go | 29 +++++++++------- .../typed/apiregistration/v1/apiservice.go | 29 +++++++++------- .../v1/fake/fake_apiservice.go | 8 ++--- .../apiregistration/v1beta1/apiservice.go | 29 +++++++++------- .../v1beta1/fake/fake_apiservice.go | 8 ++--- .../typed/metrics/v1alpha1/nodemetrics.go | 2 +- .../typed/metrics/v1alpha1/podmetrics.go | 2 +- .../typed/metrics/v1beta1/nodemetrics.go | 2 +- .../typed/metrics/v1beta1/podmetrics.go | 2 +- .../wardle/v1alpha1/fake/fake_fischer.go | 6 ++-- .../wardle/v1alpha1/fake/fake_flunder.go | 8 ++--- .../typed/wardle/v1alpha1/fischer.go | 23 +++++++------ .../typed/wardle/v1alpha1/flunder.go | 29 +++++++++------- .../typed/wardle/v1beta1/fake/fake_flunder.go | 8 ++--- .../versioned/typed/wardle/v1beta1/flunder.go | 29 +++++++++------- .../v1alpha1/fake/fake_foo.go | 8 ++--- .../typed/samplecontroller/v1alpha1/foo.go | 29 +++++++++------- 244 files changed, 2048 insertions(+), 1670 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/example.go b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/example.go index b5c028dabed..e1d26401af0 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/example.go +++ b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/example.go @@ -38,14 +38,14 @@ type ExamplesGetter interface { // ExampleInterface has methods to work with Example resources. type ExampleInterface interface { - Create(context.Context, *v1.Example) (*v1.Example, error) - Update(context.Context, *v1.Example) (*v1.Example, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Example, error) + Create(ctx context.Context, example *v1.Example, opts metav1.CreateOptions) (*v1.Example, error) + Update(ctx context.Context, example *v1.Example, opts metav1.UpdateOptions) (*v1.Example, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Example, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.ExampleList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Example, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Example, err error) ExampleExpansion } @@ -109,11 +109,12 @@ func (c *examples) Watch(ctx context.Context, opts metav1.ListOptions) (watch.In } // Create takes the representation of a example and creates it. Returns the server's representation of the example, and an error, if there is any. -func (c *examples) Create(ctx context.Context, example *v1.Example) (result *v1.Example, err error) { +func (c *examples) Create(ctx context.Context, example *v1.Example, opts metav1.CreateOptions) (result *v1.Example, err error) { result = &v1.Example{} err = c.client.Post(). Namespace(c.ns). Resource("examples"). + VersionedParams(&opts, scheme.ParameterCodec). Body(example). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *examples) Create(ctx context.Context, example *v1.Example) (result *v1. } // Update takes the representation of a example and updates it. Returns the server's representation of the example, and an error, if there is any. -func (c *examples) Update(ctx context.Context, example *v1.Example) (result *v1.Example, err error) { +func (c *examples) Update(ctx context.Context, example *v1.Example, opts metav1.UpdateOptions) (result *v1.Example, err error) { result = &v1.Example{} err = c.client.Put(). Namespace(c.ns). Resource("examples"). Name(example.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(example). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *examples) DeleteCollection(ctx context.Context, options *metav1.DeleteO } // Patch applies the patch and returns the patched example. -func (c *examples) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Example, err error) { +func (c *examples) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Example, err error) { result = &v1.Example{} err = c.client.Patch(pt). Namespace(c.ns). Resource("examples"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake/fake_example.go b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake/fake_example.go index 5d8ce1be7a4..ef48d9b1835 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake/fake_example.go +++ b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake/fake_example.go @@ -81,7 +81,7 @@ func (c *FakeExamples) Watch(ctx context.Context, opts v1.ListOptions) (watch.In } // Create takes the representation of a example and creates it. Returns the server's representation of the example, and an error, if there is any. -func (c *FakeExamples) Create(ctx context.Context, example *crv1.Example) (result *crv1.Example, err error) { +func (c *FakeExamples) Create(ctx context.Context, example *crv1.Example, opts v1.CreateOptions) (result *crv1.Example, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(examplesResource, c.ns, example), &crv1.Example{}) @@ -92,7 +92,7 @@ func (c *FakeExamples) Create(ctx context.Context, example *crv1.Example) (resul } // Update takes the representation of a example and updates it. Returns the server's representation of the example, and an error, if there is any. -func (c *FakeExamples) Update(ctx context.Context, example *crv1.Example) (result *crv1.Example, err error) { +func (c *FakeExamples) Update(ctx context.Context, example *crv1.Example, opts v1.UpdateOptions) (result *crv1.Example, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(examplesResource, c.ns, example), &crv1.Example{}) @@ -119,7 +119,7 @@ func (c *FakeExamples) DeleteCollection(ctx context.Context, options *v1.DeleteO } // Patch applies the patch and returns the patched example. -func (c *FakeExamples) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *crv1.Example, err error) { +func (c *FakeExamples) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *crv1.Example, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(examplesResource, c.ns, name, pt, data, subresources...), &crv1.Example{}) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go index 1ec9f44a3cc..e74d657e4fc 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go @@ -38,15 +38,15 @@ type CustomResourceDefinitionsGetter interface { // CustomResourceDefinitionInterface has methods to work with CustomResourceDefinition resources. type CustomResourceDefinitionInterface interface { - Create(context.Context, *v1.CustomResourceDefinition) (*v1.CustomResourceDefinition, error) - Update(context.Context, *v1.CustomResourceDefinition) (*v1.CustomResourceDefinition, error) - UpdateStatus(context.Context, *v1.CustomResourceDefinition) (*v1.CustomResourceDefinition, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.CustomResourceDefinition, error) + Create(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.CreateOptions) (*v1.CustomResourceDefinition, error) + Update(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (*v1.CustomResourceDefinition, error) + UpdateStatus(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (*v1.CustomResourceDefinition, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CustomResourceDefinition, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.CustomResourceDefinitionList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CustomResourceDefinition, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CustomResourceDefinition, err error) CustomResourceDefinitionExpansion } @@ -105,10 +105,11 @@ func (c *customResourceDefinitions) Watch(ctx context.Context, opts metav1.ListO } // Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. -func (c *customResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition) (result *v1.CustomResourceDefinition, err error) { +func (c *customResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.CreateOptions) (result *v1.CustomResourceDefinition, err error) { result = &v1.CustomResourceDefinition{} err = c.client.Post(). Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). Body(customResourceDefinition). Do(ctx). Into(result) @@ -116,11 +117,12 @@ func (c *customResourceDefinitions) Create(ctx context.Context, customResourceDe } // Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. -func (c *customResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition) (result *v1.CustomResourceDefinition, err error) { +func (c *customResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (result *v1.CustomResourceDefinition, err error) { result = &v1.CustomResourceDefinition{} err = c.client.Put(). Resource("customresourcedefinitions"). Name(customResourceDefinition.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(customResourceDefinition). Do(ctx). Into(result) @@ -129,13 +131,13 @@ func (c *customResourceDefinitions) Update(ctx context.Context, customResourceDe // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *customResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition) (result *v1.CustomResourceDefinition, err error) { +func (c *customResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (result *v1.CustomResourceDefinition, err error) { result = &v1.CustomResourceDefinition{} err = c.client.Put(). Resource("customresourcedefinitions"). Name(customResourceDefinition.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(customResourceDefinition). Do(ctx). Into(result) @@ -168,12 +170,13 @@ func (c *customResourceDefinitions) DeleteCollection(ctx context.Context, option } // Patch applies the patch and returns the patched customResourceDefinition. -func (c *customResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CustomResourceDefinition, err error) { +func (c *customResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CustomResourceDefinition, err error) { result = &v1.CustomResourceDefinition{} err = c.client.Patch(pt). Resource("customresourcedefinitions"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake/fake_customresourcedefinition.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake/fake_customresourcedefinition.go index f0d669b5709..4b03e2fa3cb 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake/fake_customresourcedefinition.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake/fake_customresourcedefinition.go @@ -77,7 +77,7 @@ func (c *FakeCustomResourceDefinitions) Watch(ctx context.Context, opts v1.ListO } // Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. -func (c *FakeCustomResourceDefinitions) Create(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinition) (result *apiextensionsv1.CustomResourceDefinition, err error) { +func (c *FakeCustomResourceDefinitions) Create(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinition, opts v1.CreateOptions) (result *apiextensionsv1.CustomResourceDefinition, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(customresourcedefinitionsResource, customResourceDefinition), &apiextensionsv1.CustomResourceDefinition{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeCustomResourceDefinitions) Create(ctx context.Context, customResour } // Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. -func (c *FakeCustomResourceDefinitions) Update(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinition) (result *apiextensionsv1.CustomResourceDefinition, err error) { +func (c *FakeCustomResourceDefinitions) Update(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinition, opts v1.UpdateOptions) (result *apiextensionsv1.CustomResourceDefinition, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(customresourcedefinitionsResource, customResourceDefinition), &apiextensionsv1.CustomResourceDefinition{}) if obj == nil { @@ -98,7 +98,7 @@ func (c *FakeCustomResourceDefinitions) Update(ctx context.Context, customResour // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCustomResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinition) (*apiextensionsv1.CustomResourceDefinition, error) { +func (c *FakeCustomResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinition, opts v1.UpdateOptions) (*apiextensionsv1.CustomResourceDefinition, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(customresourcedefinitionsResource, "status", customResourceDefinition), &apiextensionsv1.CustomResourceDefinition{}) if obj == nil { @@ -123,7 +123,7 @@ func (c *FakeCustomResourceDefinitions) DeleteCollection(ctx context.Context, op } // Patch applies the patch and returns the patched customResourceDefinition. -func (c *FakeCustomResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *apiextensionsv1.CustomResourceDefinition, err error) { +func (c *FakeCustomResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *apiextensionsv1.CustomResourceDefinition, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(customresourcedefinitionsResource, name, pt, data, subresources...), &apiextensionsv1.CustomResourceDefinition{}) if obj == nil { diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go index f39da7a0eed..4bf50bcfb47 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go @@ -38,15 +38,15 @@ type CustomResourceDefinitionsGetter interface { // CustomResourceDefinitionInterface has methods to work with CustomResourceDefinition resources. type CustomResourceDefinitionInterface interface { - Create(context.Context, *v1beta1.CustomResourceDefinition) (*v1beta1.CustomResourceDefinition, error) - Update(context.Context, *v1beta1.CustomResourceDefinition) (*v1beta1.CustomResourceDefinition, error) - UpdateStatus(context.Context, *v1beta1.CustomResourceDefinition) (*v1beta1.CustomResourceDefinition, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.CustomResourceDefinition, error) + Create(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.CreateOptions) (*v1beta1.CustomResourceDefinition, error) + Update(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*v1beta1.CustomResourceDefinition, error) + UpdateStatus(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*v1beta1.CustomResourceDefinition, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CustomResourceDefinition, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CustomResourceDefinitionList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) CustomResourceDefinitionExpansion } @@ -105,10 +105,11 @@ func (c *customResourceDefinitions) Watch(ctx context.Context, opts v1.ListOptio } // Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. -func (c *customResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition) (result *v1beta1.CustomResourceDefinition, err error) { +func (c *customResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.CreateOptions) (result *v1beta1.CustomResourceDefinition, err error) { result = &v1beta1.CustomResourceDefinition{} err = c.client.Post(). Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). Body(customResourceDefinition). Do(ctx). Into(result) @@ -116,11 +117,12 @@ func (c *customResourceDefinitions) Create(ctx context.Context, customResourceDe } // Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. -func (c *customResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition) (result *v1beta1.CustomResourceDefinition, err error) { +func (c *customResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (result *v1beta1.CustomResourceDefinition, err error) { result = &v1beta1.CustomResourceDefinition{} err = c.client.Put(). Resource("customresourcedefinitions"). Name(customResourceDefinition.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(customResourceDefinition). Do(ctx). Into(result) @@ -129,13 +131,13 @@ func (c *customResourceDefinitions) Update(ctx context.Context, customResourceDe // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *customResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition) (result *v1beta1.CustomResourceDefinition, err error) { +func (c *customResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (result *v1beta1.CustomResourceDefinition, err error) { result = &v1beta1.CustomResourceDefinition{} err = c.client.Put(). Resource("customresourcedefinitions"). Name(customResourceDefinition.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(customResourceDefinition). Do(ctx). Into(result) @@ -168,12 +170,13 @@ func (c *customResourceDefinitions) DeleteCollection(ctx context.Context, option } // Patch applies the patch and returns the patched customResourceDefinition. -func (c *customResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) { +func (c *customResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) { result = &v1beta1.CustomResourceDefinition{} err = c.client.Patch(pt). Resource("customresourcedefinitions"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_customresourcedefinition.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_customresourcedefinition.go index c5f42bd29ad..a6b5e7f60a2 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_customresourcedefinition.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_customresourcedefinition.go @@ -77,7 +77,7 @@ func (c *FakeCustomResourceDefinitions) Watch(ctx context.Context, opts v1.ListO } // Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. -func (c *FakeCustomResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition) (result *v1beta1.CustomResourceDefinition, err error) { +func (c *FakeCustomResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.CreateOptions) (result *v1beta1.CustomResourceDefinition, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(customresourcedefinitionsResource, customResourceDefinition), &v1beta1.CustomResourceDefinition{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeCustomResourceDefinitions) Create(ctx context.Context, customResour } // Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. -func (c *FakeCustomResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition) (result *v1beta1.CustomResourceDefinition, err error) { +func (c *FakeCustomResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (result *v1beta1.CustomResourceDefinition, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(customresourcedefinitionsResource, customResourceDefinition), &v1beta1.CustomResourceDefinition{}) if obj == nil { @@ -98,7 +98,7 @@ func (c *FakeCustomResourceDefinitions) Update(ctx context.Context, customResour // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCustomResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition) (*v1beta1.CustomResourceDefinition, error) { +func (c *FakeCustomResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*v1beta1.CustomResourceDefinition, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(customresourcedefinitionsResource, "status", customResourceDefinition), &v1beta1.CustomResourceDefinition{}) if obj == nil { @@ -123,7 +123,7 @@ func (c *FakeCustomResourceDefinitions) DeleteCollection(ctx context.Context, op } // Patch applies the patch and returns the patched customResourceDefinition. -func (c *FakeCustomResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) { +func (c *FakeCustomResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(customresourcedefinitionsResource, name, pt, data, subresources...), &v1beta1.CustomResourceDefinition{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go index 629ea009117..3ddf59ee8b0 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go @@ -77,7 +77,7 @@ func (c *FakeMutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.L } // Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &admissionregistrationv1.MutatingWebhookConfiguration{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutating } // Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &admissionregistrationv1.MutatingWebhookConfiguration{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context } // Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, pt, data, subresources...), &admissionregistrationv1.MutatingWebhookConfiguration{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go index dba9e0b67d8..88fddd04b4b 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go @@ -77,7 +77,7 @@ func (c *FakeValidatingWebhookConfigurations) Watch(ctx context.Context, opts v1 } // Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &admissionregistrationv1.ValidatingWebhookConfiguration{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, valida } // Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &admissionregistrationv1.ValidatingWebhookConfiguration{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Conte } // Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, pt, data, subresources...), &admissionregistrationv1.ValidatingWebhookConfiguration{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go index be59ad6a36c..e9787bf408f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -38,14 +38,14 @@ type MutatingWebhookConfigurationsGetter interface { // MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. type MutatingWebhookConfigurationInterface interface { - Create(context.Context, *v1.MutatingWebhookConfiguration) (*v1.MutatingWebhookConfiguration, error) - Update(context.Context, *v1.MutatingWebhookConfiguration) (*v1.MutatingWebhookConfiguration, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.MutatingWebhookConfiguration, error) + Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (*v1.MutatingWebhookConfiguration, error) + Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (*v1.MutatingWebhookConfiguration, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.MutatingWebhookConfiguration, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.MutatingWebhookConfigurationList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) MutatingWebhookConfigurationExpansion } @@ -104,10 +104,11 @@ func (c *mutatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.L } // Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration) (result *v1.MutatingWebhookConfiguration, err error) { +func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.MutatingWebhookConfiguration, err error) { result = &v1.MutatingWebhookConfiguration{} err = c.client.Post(). Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). Body(mutatingWebhookConfiguration). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebh } // Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration) (result *v1.MutatingWebhookConfiguration, err error) { +func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.MutatingWebhookConfiguration, err error) { result = &v1.MutatingWebhookConfiguration{} err = c.client.Put(). Resource("mutatingwebhookconfigurations"). Name(mutatingWebhookConfiguration.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(mutatingWebhookConfiguration). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *mutatingWebhookConfigurations) DeleteCollection(ctx context.Context, op } // Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) { +func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) { result = &v1.MutatingWebhookConfiguration{} err = c.client.Patch(pt). Resource("mutatingwebhookconfigurations"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go index c08d6a28f21..1a9c1b4ac60 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go @@ -38,14 +38,14 @@ type ValidatingWebhookConfigurationsGetter interface { // ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. type ValidatingWebhookConfigurationInterface interface { - Create(context.Context, *v1.ValidatingWebhookConfiguration) (*v1.ValidatingWebhookConfiguration, error) - Update(context.Context, *v1.ValidatingWebhookConfiguration) (*v1.ValidatingWebhookConfiguration, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ValidatingWebhookConfiguration, error) + Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (*v1.ValidatingWebhookConfiguration, error) + Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (*v1.ValidatingWebhookConfiguration, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ValidatingWebhookConfiguration, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.ValidatingWebhookConfigurationList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) ValidatingWebhookConfigurationExpansion } @@ -104,10 +104,11 @@ func (c *validatingWebhookConfigurations) Watch(ctx context.Context, opts metav1 } // Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration) (result *v1.ValidatingWebhookConfiguration, err error) { +func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { result = &v1.ValidatingWebhookConfiguration{} err = c.client.Post(). Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). Body(validatingWebhookConfiguration). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *validatingWebhookConfigurations) Create(ctx context.Context, validating } // Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration) (result *v1.ValidatingWebhookConfiguration, err error) { +func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { result = &v1.ValidatingWebhookConfiguration{} err = c.client.Put(). Resource("validatingwebhookconfigurations"). Name(validatingWebhookConfiguration.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(validatingWebhookConfiguration). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *validatingWebhookConfigurations) DeleteCollection(ctx context.Context, } // Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) { +func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) { result = &v1.ValidatingWebhookConfiguration{} err = c.client.Patch(pt). Resource("validatingwebhookconfigurations"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go index c88c6d8e220..451de685ac6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go @@ -77,7 +77,7 @@ func (c *FakeMutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.L } // Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1beta1.MutatingWebhookConfiguration{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutating } // Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1beta1.MutatingWebhookConfiguration{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context } // Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1beta1.MutatingWebhookConfiguration{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go index 2faeacb9d4e..4bddb9ad2f1 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go @@ -77,7 +77,7 @@ func (c *FakeValidatingWebhookConfigurations) Watch(ctx context.Context, opts v1 } // Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1beta1.ValidatingWebhookConfiguration{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, valida } // Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1beta1.ValidatingWebhookConfiguration{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Conte } // Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1beta1.ValidatingWebhookConfiguration{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index d9976cd0784..4643325a393 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -38,14 +38,14 @@ type MutatingWebhookConfigurationsGetter interface { // MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. type MutatingWebhookConfigurationInterface interface { - Create(context.Context, *v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error) - Update(context.Context, *v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.MutatingWebhookConfiguration, error) + Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (*v1beta1.MutatingWebhookConfiguration, error) + Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (*v1beta1.MutatingWebhookConfiguration, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.MutatingWebhookConfiguration, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) MutatingWebhookConfigurationExpansion } @@ -104,10 +104,11 @@ func (c *mutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListO } // Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { +func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { result = &v1beta1.MutatingWebhookConfiguration{} err = c.client.Post(). Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). Body(mutatingWebhookConfiguration). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebh } // Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { +func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { result = &v1beta1.MutatingWebhookConfiguration{} err = c.client.Put(). Resource("mutatingwebhookconfigurations"). Name(mutatingWebhookConfiguration.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(mutatingWebhookConfiguration). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *mutatingWebhookConfigurations) DeleteCollection(ctx context.Context, op } // Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { +func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { result = &v1beta1.MutatingWebhookConfiguration{} err = c.client.Patch(pt). Resource("mutatingwebhookconfigurations"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 966b35d35b8..26af233d00f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -38,14 +38,14 @@ type ValidatingWebhookConfigurationsGetter interface { // ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. type ValidatingWebhookConfigurationInterface interface { - Create(context.Context, *v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error) - Update(context.Context, *v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.ValidatingWebhookConfiguration, error) + Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (*v1beta1.ValidatingWebhookConfiguration, error) + Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (*v1beta1.ValidatingWebhookConfiguration, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingWebhookConfiguration, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) ValidatingWebhookConfigurationExpansion } @@ -104,10 +104,11 @@ func (c *validatingWebhookConfigurations) Watch(ctx context.Context, opts v1.Lis } // Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { +func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { result = &v1beta1.ValidatingWebhookConfiguration{} err = c.client.Post(). Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). Body(validatingWebhookConfiguration). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *validatingWebhookConfigurations) Create(ctx context.Context, validating } // Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { +func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { result = &v1beta1.ValidatingWebhookConfiguration{} err = c.client.Put(). Resource("validatingwebhookconfigurations"). Name(validatingWebhookConfiguration.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(validatingWebhookConfiguration). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *validatingWebhookConfigurations) DeleteCollection(ctx context.Context, } // Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { +func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { result = &v1beta1.ValidatingWebhookConfiguration{} err = c.client.Patch(pt). Resource("validatingwebhookconfigurations"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go index e7f6910be53..d3321e33aaf 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -38,14 +38,14 @@ type ControllerRevisionsGetter interface { // ControllerRevisionInterface has methods to work with ControllerRevision resources. type ControllerRevisionInterface interface { - Create(context.Context, *v1.ControllerRevision) (*v1.ControllerRevision, error) - Update(context.Context, *v1.ControllerRevision) (*v1.ControllerRevision, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ControllerRevision, error) + Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (*v1.ControllerRevision, error) + Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (*v1.ControllerRevision, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ControllerRevision, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.ControllerRevisionList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) ControllerRevisionExpansion } @@ -109,11 +109,12 @@ func (c *controllerRevisions) Watch(ctx context.Context, opts metav1.ListOptions } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) { +func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (result *v1.ControllerRevision, err error) { result = &v1.ControllerRevision{} err = c.client.Post(). Namespace(c.ns). Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). Body(controllerRevision). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1 } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) { +func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (result *v1.ControllerRevision, err error) { result = &v1.ControllerRevision{} err = c.client.Put(). Namespace(c.ns). Resource("controllerrevisions"). Name(controllerRevision.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(controllerRevision). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *controllerRevisions) DeleteCollection(ctx context.Context, options *met } // Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) { +func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) { result = &v1.ControllerRevision{} err = c.client.Patch(pt). Namespace(c.ns). Resource("controllerrevisions"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go index e2f6b7c1668..4c78281ece0 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go @@ -38,15 +38,15 @@ type DaemonSetsGetter interface { // DaemonSetInterface has methods to work with DaemonSet resources. type DaemonSetInterface interface { - Create(context.Context, *v1.DaemonSet) (*v1.DaemonSet, error) - Update(context.Context, *v1.DaemonSet) (*v1.DaemonSet, error) - UpdateStatus(context.Context, *v1.DaemonSet) (*v1.DaemonSet, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.DaemonSet, error) + Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (*v1.DaemonSet, error) + Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) + UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.DaemonSet, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.DaemonSetList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DaemonSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) DaemonSetExpansion } @@ -110,11 +110,12 @@ func (c *daemonSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch. } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { +func (c *daemonSets) Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (result *v1.DaemonSet, err error) { result = &v1.DaemonSet{} err = c.client.Post(). Namespace(c.ns). Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *daemonSets) Create(ctx context.Context, daemonSet *v1.DaemonSet) (resul } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { +func (c *daemonSets) Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { result = &v1.DaemonSet{} err = c.client.Put(). Namespace(c.ns). Resource("daemonsets"). Name(daemonSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *daemonSets) Update(ctx context.Context, daemonSet *v1.DaemonSet) (resul // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { +func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { result = &v1.DaemonSet{} err = c.client.Put(). Namespace(c.ns). Resource("daemonsets"). Name(daemonSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *daemonSets) DeleteCollection(ctx context.Context, options *metav1.Delet } // Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DaemonSet, err error) { +func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) { result = &v1.DaemonSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("daemonsets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go index a9cf83ad093..c7c32a035b8 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -39,17 +39,17 @@ type DeploymentsGetter interface { // DeploymentInterface has methods to work with Deployment resources. type DeploymentInterface interface { - Create(context.Context, *v1.Deployment) (*v1.Deployment, error) - Update(context.Context, *v1.Deployment) (*v1.Deployment, error) - UpdateStatus(context.Context, *v1.Deployment) (*v1.Deployment, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Deployment, error) + Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (*v1.Deployment, error) + Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) + UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Deployment, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.DeploymentList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) - UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) DeploymentExpansion } @@ -114,11 +114,12 @@ func (c *deployments) Watch(ctx context.Context, opts metav1.ListOptions) (watch } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1.Deployment) (result *v1.Deployment, err error) { +func (c *deployments) Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (result *v1.Deployment, err error) { result = &v1.Deployment{} err = c.client.Post(). Namespace(c.ns). Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). Do(ctx). Into(result) @@ -126,12 +127,13 @@ func (c *deployments) Create(ctx context.Context, deployment *v1.Deployment) (re } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1.Deployment) (result *v1.Deployment, err error) { +func (c *deployments) Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { result = &v1.Deployment{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deployment.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). Do(ctx). Into(result) @@ -140,14 +142,14 @@ func (c *deployments) Update(ctx context.Context, deployment *v1.Deployment) (re // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment) (result *v1.Deployment, err error) { +func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { result = &v1.Deployment{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deployment.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). Do(ctx). Into(result) @@ -182,13 +184,14 @@ func (c *deployments) DeleteCollection(ctx context.Context, options *metav1.Dele } // Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) { +func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) { result = &v1.Deployment{} err = c.client.Patch(pt). Namespace(c.ns). Resource("deployments"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) @@ -210,13 +213,14 @@ func (c *deployments) GetScale(ctx context.Context, deploymentName string, optio } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { +func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deploymentName). SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). Body(scale). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go index 3f55bd768da..b1ccd180903 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go @@ -81,7 +81,7 @@ func (c *FakeControllerRevisions) Watch(ctx context.Context, opts v1.ListOptions } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *appsv1.ControllerRevision) (result *appsv1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *appsv1.ControllerRevision, opts v1.CreateOptions) (result *appsv1.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &appsv1.ControllerRevision{}) @@ -92,7 +92,7 @@ func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *appsv1.ControllerRevision) (result *appsv1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *appsv1.ControllerRevision, opts v1.UpdateOptions) (result *appsv1.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &appsv1.ControllerRevision{}) @@ -119,7 +119,7 @@ func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, options } // Patch applies the patch and returns the patched controllerRevision. -func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &appsv1.ControllerRevision{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go index 53c473f5b4f..fc424954590 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go @@ -81,7 +81,7 @@ func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *appsv1.DaemonSet) (result *appsv1.DaemonSet, err error) { +func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *appsv1.DaemonSet, opts v1.CreateOptions) (result *appsv1.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &appsv1.DaemonSet{}) @@ -92,7 +92,7 @@ func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *appsv1.DaemonSet } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *appsv1.DaemonSet) (result *appsv1.DaemonSet, err error) { +func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *appsv1.DaemonSet, opts v1.UpdateOptions) (result *appsv1.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &appsv1.DaemonSet{}) @@ -104,7 +104,7 @@ func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *appsv1.DaemonSet // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *appsv1.DaemonSet) (*appsv1.DaemonSet, error) { +func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *appsv1.DaemonSet, opts v1.UpdateOptions) (*appsv1.DaemonSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &appsv1.DaemonSet{}) @@ -131,7 +131,7 @@ func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, options *v1.Delet } // Patch applies the patch and returns the patched daemonSet. -func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.DaemonSet, err error) { +func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &appsv1.DaemonSet{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go index 813ce34ba9c..6b243acfe52 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go @@ -82,7 +82,7 @@ func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Create(ctx context.Context, deployment *appsv1.Deployment) (result *appsv1.Deployment, err error) { +func (c *FakeDeployments) Create(ctx context.Context, deployment *appsv1.Deployment, opts v1.CreateOptions) (result *appsv1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &appsv1.Deployment{}) @@ -93,7 +93,7 @@ func (c *FakeDeployments) Create(ctx context.Context, deployment *appsv1.Deploym } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Update(ctx context.Context, deployment *appsv1.Deployment) (result *appsv1.Deployment, err error) { +func (c *FakeDeployments) Update(ctx context.Context, deployment *appsv1.Deployment, opts v1.UpdateOptions) (result *appsv1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &appsv1.Deployment{}) @@ -105,7 +105,7 @@ func (c *FakeDeployments) Update(ctx context.Context, deployment *appsv1.Deploym // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *appsv1.Deployment) (*appsv1.Deployment, error) { +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *appsv1.Deployment, opts v1.UpdateOptions) (*appsv1.Deployment, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &appsv1.Deployment{}) @@ -132,7 +132,7 @@ func (c *FakeDeployments) DeleteCollection(ctx context.Context, options *v1.Dele } // Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.Deployment, err error) { +func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &appsv1.Deployment{}) @@ -154,7 +154,7 @@ func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, o } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { +func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts v1.UpdateOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go index 519dd2536ae..38c7400bad6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go @@ -82,7 +82,7 @@ func (c *FakeReplicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *appsv1.ReplicaSet) (result *appsv1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts v1.CreateOptions) (result *appsv1.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &appsv1.ReplicaSet{}) @@ -93,7 +93,7 @@ func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *appsv1.Replica } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *appsv1.ReplicaSet) (result *appsv1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts v1.UpdateOptions) (result *appsv1.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &appsv1.ReplicaSet{}) @@ -105,7 +105,7 @@ func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *appsv1.Replica // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *appsv1.ReplicaSet) (*appsv1.ReplicaSet, error) { +func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts v1.UpdateOptions) (*appsv1.ReplicaSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &appsv1.ReplicaSet{}) @@ -132,7 +132,7 @@ func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, options *v1.Dele } // Patch applies the patch and returns the patched replicaSet. -func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &appsv1.ReplicaSet{}) @@ -154,7 +154,7 @@ func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, o } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { +func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts v1.UpdateOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go index 79b3252eea7..2accba210d0 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go @@ -82,7 +82,7 @@ func (c *FakeStatefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watc } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *appsv1.StatefulSet) (result *appsv1.StatefulSet, err error) { +func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *appsv1.StatefulSet, opts v1.CreateOptions) (result *appsv1.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &appsv1.StatefulSet{}) @@ -93,7 +93,7 @@ func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *appsv1.State } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *appsv1.StatefulSet) (result *appsv1.StatefulSet, err error) { +func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *appsv1.StatefulSet, opts v1.UpdateOptions) (result *appsv1.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &appsv1.StatefulSet{}) @@ -105,7 +105,7 @@ func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *appsv1.State // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *appsv1.StatefulSet) (*appsv1.StatefulSet, error) { +func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *appsv1.StatefulSet, opts v1.UpdateOptions) (*appsv1.StatefulSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &appsv1.StatefulSet{}) @@ -132,7 +132,7 @@ func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, options *v1.Del } // Patch applies the patch and returns the patched statefulSet. -func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.StatefulSet, err error) { +func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &appsv1.StatefulSet{}) @@ -154,7 +154,7 @@ func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { +func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts v1.UpdateOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go index 09e59063528..2b94003fb05 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -39,17 +39,17 @@ type ReplicaSetsGetter interface { // ReplicaSetInterface has methods to work with ReplicaSet resources. type ReplicaSetInterface interface { - Create(context.Context, *v1.ReplicaSet) (*v1.ReplicaSet, error) - Update(context.Context, *v1.ReplicaSet) (*v1.ReplicaSet, error) - UpdateStatus(context.Context, *v1.ReplicaSet) (*v1.ReplicaSet, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ReplicaSet, error) + Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (*v1.ReplicaSet, error) + Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) + UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ReplicaSet, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.ReplicaSetList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) - UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) ReplicaSetExpansion } @@ -114,11 +114,12 @@ func (c *replicaSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { +func (c *replicaSets) Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (result *v1.ReplicaSet, err error) { result = &v1.ReplicaSet{} err = c.client.Post(). Namespace(c.ns). Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). Do(ctx). Into(result) @@ -126,12 +127,13 @@ func (c *replicaSets) Create(ctx context.Context, replicaSet *v1.ReplicaSet) (re } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { +func (c *replicaSets) Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { result = &v1.ReplicaSet{} err = c.client.Put(). Namespace(c.ns). Resource("replicasets"). Name(replicaSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). Do(ctx). Into(result) @@ -140,14 +142,14 @@ func (c *replicaSets) Update(ctx context.Context, replicaSet *v1.ReplicaSet) (re // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { +func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { result = &v1.ReplicaSet{} err = c.client.Put(). Namespace(c.ns). Resource("replicasets"). Name(replicaSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). Do(ctx). Into(result) @@ -182,13 +184,14 @@ func (c *replicaSets) DeleteCollection(ctx context.Context, options *metav1.Dele } // Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) { +func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) { result = &v1.ReplicaSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("replicasets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) @@ -210,13 +213,14 @@ func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, optio } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { +func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). Body(scale). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go index 45196ac2291..7fd748cfe9f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -39,17 +39,17 @@ type StatefulSetsGetter interface { // StatefulSetInterface has methods to work with StatefulSet resources. type StatefulSetInterface interface { - Create(context.Context, *v1.StatefulSet) (*v1.StatefulSet, error) - Update(context.Context, *v1.StatefulSet) (*v1.StatefulSet, error) - UpdateStatus(context.Context, *v1.StatefulSet) (*v1.StatefulSet, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.StatefulSet, error) + Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (*v1.StatefulSet, error) + Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) + UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.StatefulSet, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.StatefulSetList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) - UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) StatefulSetExpansion } @@ -114,11 +114,12 @@ func (c *statefulSets) Watch(ctx context.Context, opts metav1.ListOptions) (watc } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { +func (c *statefulSets) Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (result *v1.StatefulSet, err error) { result = &v1.StatefulSet{} err = c.client.Post(). Namespace(c.ns). Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). Do(ctx). Into(result) @@ -126,12 +127,13 @@ func (c *statefulSets) Create(ctx context.Context, statefulSet *v1.StatefulSet) } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { +func (c *statefulSets) Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { result = &v1.StatefulSet{} err = c.client.Put(). Namespace(c.ns). Resource("statefulsets"). Name(statefulSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). Do(ctx). Into(result) @@ -140,14 +142,14 @@ func (c *statefulSets) Update(ctx context.Context, statefulSet *v1.StatefulSet) // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { +func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { result = &v1.StatefulSet{} err = c.client.Put(). Namespace(c.ns). Resource("statefulsets"). Name(statefulSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). Do(ctx). Into(result) @@ -182,13 +184,14 @@ func (c *statefulSets) DeleteCollection(ctx context.Context, options *metav1.Del } // Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) { +func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) { result = &v1.StatefulSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("statefulsets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) @@ -210,13 +213,14 @@ func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, opt } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { +func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). Body(scale). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go index 9712b60f97c..d1833d03000 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go @@ -38,14 +38,14 @@ type ControllerRevisionsGetter interface { // ControllerRevisionInterface has methods to work with ControllerRevision resources. type ControllerRevisionInterface interface { - Create(context.Context, *v1beta1.ControllerRevision) (*v1beta1.ControllerRevision, error) - Update(context.Context, *v1beta1.ControllerRevision) (*v1beta1.ControllerRevision, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.ControllerRevision, error) + Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (*v1beta1.ControllerRevision, error) + Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (*v1beta1.ControllerRevision, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ControllerRevision, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ControllerRevisionList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) ControllerRevisionExpansion } @@ -109,11 +109,12 @@ func (c *controllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (w } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) { +func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (result *v1beta1.ControllerRevision, err error) { result = &v1beta1.ControllerRevision{} err = c.client.Post(). Namespace(c.ns). Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). Body(controllerRevision). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1 } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) { +func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (result *v1beta1.ControllerRevision, err error) { result = &v1beta1.ControllerRevision{} err = c.client.Put(). Namespace(c.ns). Resource("controllerrevisions"). Name(controllerRevision.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(controllerRevision). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *controllerRevisions) DeleteCollection(ctx context.Context, options *v1. } // Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) { +func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) { result = &v1beta1.ControllerRevision{} err = c.client.Patch(pt). Namespace(c.ns). Resource("controllerrevisions"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go index 1701f19a4b3..64480f32e1f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go @@ -38,15 +38,15 @@ type DeploymentsGetter interface { // DeploymentInterface has methods to work with Deployment resources. type DeploymentInterface interface { - Create(context.Context, *v1beta1.Deployment) (*v1beta1.Deployment, error) - Update(context.Context, *v1beta1.Deployment) (*v1beta1.Deployment, error) - UpdateStatus(context.Context, *v1beta1.Deployment) (*v1beta1.Deployment, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.Deployment, error) + Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error) + Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Deployment, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DeploymentList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) DeploymentExpansion } @@ -110,11 +110,12 @@ func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Int } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Post(). Namespace(c.ns). Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deployment.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deployment.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *deployments) DeleteCollection(ctx context.Context, options *v1.DeleteOp } // Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { +func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Patch(pt). Namespace(c.ns). Resource("deployments"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go index 3d4e2908345..f005c6a7560 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go @@ -81,7 +81,7 @@ func (c *FakeControllerRevisions) Watch(ctx context.Context, opts v1.ListOptions } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (result *v1beta1.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta1.ControllerRevision{}) @@ -92,7 +92,7 @@ func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (result *v1beta1.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta1.ControllerRevision{}) @@ -119,7 +119,7 @@ func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, options } // Patch applies the patch and returns the patched controllerRevision. -func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1beta1.ControllerRevision{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go index 322a6e28f06..1e76cde0780 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go @@ -81,7 +81,7 @@ func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) @@ -92,7 +92,7 @@ func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deploy } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) @@ -104,7 +104,7 @@ func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deploy // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment) (*v1beta1.Deployment, error) { +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) @@ -131,7 +131,7 @@ func (c *FakeDeployments) DeleteCollection(ctx context.Context, options *v1.Dele } // Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { +func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta1.Deployment{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go index cb2114b6fc6..6c443aedf35 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go @@ -81,7 +81,7 @@ func (c *FakeStatefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watc } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { +func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (result *v1beta1.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{}) @@ -92,7 +92,7 @@ func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta1.Stat } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { +func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{}) @@ -104,7 +104,7 @@ func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta1.Stat // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) { +func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta1.StatefulSet{}) @@ -131,7 +131,7 @@ func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, options *v1.Del } // Patch applies the patch and returns the patched statefulSet. -func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) { +func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1beta1.StatefulSet{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go index 55cd0588a3e..2b941a5feec 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go @@ -38,15 +38,15 @@ type StatefulSetsGetter interface { // StatefulSetInterface has methods to work with StatefulSet resources. type StatefulSetInterface interface { - Create(context.Context, *v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) - Update(context.Context, *v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) - UpdateStatus(context.Context, *v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.StatefulSet, error) + Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (*v1beta1.StatefulSet, error) + Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) + UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.StatefulSet, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.StatefulSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) StatefulSetExpansion } @@ -110,11 +110,12 @@ func (c *statefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.In } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { +func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (result *v1beta1.StatefulSet, err error) { result = &v1beta1.StatefulSet{} err = c.client.Post(). Namespace(c.ns). Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta1.Stateful } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { +func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { result = &v1beta1.StatefulSet{} err = c.client.Put(). Namespace(c.ns). Resource("statefulsets"). Name(statefulSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta1.Stateful // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { +func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { result = &v1beta1.StatefulSet{} err = c.client.Put(). Namespace(c.ns). Resource("statefulsets"). Name(statefulSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *statefulSets) DeleteCollection(ctx context.Context, options *v1.DeleteO } // Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) { +func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) { result = &v1beta1.StatefulSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("statefulsets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go index e5473d356a7..f16e9883823 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go @@ -38,14 +38,14 @@ type ControllerRevisionsGetter interface { // ControllerRevisionInterface has methods to work with ControllerRevision resources. type ControllerRevisionInterface interface { - Create(context.Context, *v1beta2.ControllerRevision) (*v1beta2.ControllerRevision, error) - Update(context.Context, *v1beta2.ControllerRevision) (*v1beta2.ControllerRevision, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta2.ControllerRevision, error) + Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (*v1beta2.ControllerRevision, error) + Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (*v1beta2.ControllerRevision, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.ControllerRevision, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta2.ControllerRevisionList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) ControllerRevisionExpansion } @@ -109,11 +109,12 @@ func (c *controllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (w } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) { +func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (result *v1beta2.ControllerRevision, err error) { result = &v1beta2.ControllerRevision{} err = c.client.Post(). Namespace(c.ns). Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). Body(controllerRevision). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1 } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) { +func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (result *v1beta2.ControllerRevision, err error) { result = &v1beta2.ControllerRevision{} err = c.client.Put(). Namespace(c.ns). Resource("controllerrevisions"). Name(controllerRevision.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(controllerRevision). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *controllerRevisions) DeleteCollection(ctx context.Context, options *v1. } // Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) { +func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) { result = &v1beta2.ControllerRevision{} err = c.client.Patch(pt). Namespace(c.ns). Resource("controllerrevisions"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go index b0fc1408581..6cfedf19fb0 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go @@ -38,15 +38,15 @@ type DaemonSetsGetter interface { // DaemonSetInterface has methods to work with DaemonSet resources. type DaemonSetInterface interface { - Create(context.Context, *v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) - Update(context.Context, *v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) - UpdateStatus(context.Context, *v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta2.DaemonSet, error) + Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (*v1beta2.DaemonSet, error) + Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) + UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.DaemonSet, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta2.DaemonSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) DaemonSetExpansion } @@ -110,11 +110,12 @@ func (c *daemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inte } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { +func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (result *v1beta2.DaemonSet, err error) { result = &v1beta2.DaemonSet{} err = c.client.Post(). Namespace(c.ns). Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet) ( } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { +func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { result = &v1beta2.DaemonSet{} err = c.client.Put(). Namespace(c.ns). Resource("daemonsets"). Name(daemonSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet) ( // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { +func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { result = &v1beta2.DaemonSet{} err = c.client.Put(). Namespace(c.ns). Resource("daemonsets"). Name(daemonSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *daemonSets) DeleteCollection(ctx context.Context, options *v1.DeleteOpt } // Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) { +func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) { result = &v1beta2.DaemonSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("daemonsets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go index 199b7e386b8..abb09d2ab2e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go @@ -38,15 +38,15 @@ type DeploymentsGetter interface { // DeploymentInterface has methods to work with Deployment resources. type DeploymentInterface interface { - Create(context.Context, *v1beta2.Deployment) (*v1beta2.Deployment, error) - Update(context.Context, *v1beta2.Deployment) (*v1beta2.Deployment, error) - UpdateStatus(context.Context, *v1beta2.Deployment) (*v1beta2.Deployment, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta2.Deployment, error) + Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (*v1beta2.Deployment, error) + Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) + UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.Deployment, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta2.DeploymentList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) DeploymentExpansion } @@ -110,11 +110,12 @@ func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Int } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { +func (c *deployments) Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (result *v1beta2.Deployment, err error) { result = &v1beta2.Deployment{} err = c.client.Post(). Namespace(c.ns). Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *deployments) Create(ctx context.Context, deployment *v1beta2.Deployment } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { +func (c *deployments) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { result = &v1beta2.Deployment{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deployment.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *deployments) Update(ctx context.Context, deployment *v1beta2.Deployment // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { +func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { result = &v1beta2.Deployment{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deployment.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *deployments) DeleteCollection(ctx context.Context, options *v1.DeleteOp } // Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) { +func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) { result = &v1beta2.Deployment{} err = c.client.Patch(pt). Namespace(c.ns). Resource("deployments"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go index a5997ff3757..a6d041f0f96 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go @@ -81,7 +81,7 @@ func (c *FakeControllerRevisions) Watch(ctx context.Context, opts v1.ListOptions } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (result *v1beta2.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta2.ControllerRevision{}) @@ -92,7 +92,7 @@ func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (result *v1beta2.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta2.ControllerRevision{}) @@ -119,7 +119,7 @@ func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, options } // Patch applies the patch and returns the patched controllerRevision. -func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1beta2.ControllerRevision{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go index ef31c2ece7c..80b09794f9b 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go @@ -81,7 +81,7 @@ func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { +func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (result *v1beta2.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta2.DaemonSet{}) @@ -92,7 +92,7 @@ func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSe } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { +func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta2.DaemonSet{}) @@ -104,7 +104,7 @@ func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSe // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) { +func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta2.DaemonSet{}) @@ -131,7 +131,7 @@ func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, options *v1.Delet } // Patch applies the patch and returns the patched daemonSet. -func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) { +func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1beta2.DaemonSet{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go index 53423db6690..26cc4a4b6ac 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go @@ -81,7 +81,7 @@ func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { +func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (result *v1beta2.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta2.Deployment{}) @@ -92,7 +92,7 @@ func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta2.Deploy } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { +func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta2.Deployment{}) @@ -104,7 +104,7 @@ func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta2.Deploy // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment) (*v1beta2.Deployment, error) { +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta2.Deployment{}) @@ -131,7 +131,7 @@ func (c *FakeDeployments) DeleteCollection(ctx context.Context, options *v1.Dele } // Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) { +func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta2.Deployment{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go index f1637e89b97..618c4ef74c8 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go @@ -81,7 +81,7 @@ func (c *FakeReplicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { +func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (result *v1beta2.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta2.ReplicaSet{}) @@ -92,7 +92,7 @@ func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta2.Replic } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { +func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta2.ReplicaSet{}) @@ -104,7 +104,7 @@ func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta2.Replic // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) { +func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta2.ReplicaSet{}) @@ -131,7 +131,7 @@ func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, options *v1.Dele } // Patch applies the patch and returns the patched replicaSet. -func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) { +func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1beta2.ReplicaSet{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go index e27dfbcf1f2..ad6b736d687 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go @@ -81,7 +81,7 @@ func (c *FakeStatefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watc } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { +func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (result *v1beta2.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta2.StatefulSet{}) @@ -92,7 +92,7 @@ func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta2.Stat } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { +func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta2.StatefulSet{}) @@ -104,7 +104,7 @@ func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta2.Stat // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) { +func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta2.StatefulSet{}) @@ -131,7 +131,7 @@ func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, options *v1.Del } // Patch applies the patch and returns the patched statefulSet. -func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) { +func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1beta2.StatefulSet{}) @@ -153,7 +153,7 @@ func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale) (result *v1beta2.Scale, err error) { +func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "scale", c.ns, scale), &v1beta2.Scale{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go index a95a19cccc6..84b7d16ae00 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go @@ -38,15 +38,15 @@ type ReplicaSetsGetter interface { // ReplicaSetInterface has methods to work with ReplicaSet resources. type ReplicaSetInterface interface { - Create(context.Context, *v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) - Update(context.Context, *v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) - UpdateStatus(context.Context, *v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta2.ReplicaSet, error) + Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (*v1beta2.ReplicaSet, error) + Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) + UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.ReplicaSet, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta2.ReplicaSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) ReplicaSetExpansion } @@ -110,11 +110,12 @@ func (c *replicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Int } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { +func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (result *v1beta2.ReplicaSet, err error) { result = &v1beta2.ReplicaSet{} err = c.client.Post(). Namespace(c.ns). Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { +func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { result = &v1beta2.ReplicaSet{} err = c.client.Put(). Namespace(c.ns). Resource("replicasets"). Name(replicaSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { +func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { result = &v1beta2.ReplicaSet{} err = c.client.Put(). Namespace(c.ns). Resource("replicasets"). Name(replicaSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *replicaSets) DeleteCollection(ctx context.Context, options *v1.DeleteOp } // Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) { +func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) { result = &v1beta2.ReplicaSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("replicasets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go index 4e402232377..ed6393043f7 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go @@ -38,17 +38,17 @@ type StatefulSetsGetter interface { // StatefulSetInterface has methods to work with StatefulSet resources. type StatefulSetInterface interface { - Create(context.Context, *v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) - Update(context.Context, *v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) - UpdateStatus(context.Context, *v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta2.StatefulSet, error) + Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (*v1beta2.StatefulSet, error) + Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) + UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.StatefulSet, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta2.StatefulSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error) - UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale) (*v1beta2.Scale, error) + UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (*v1beta2.Scale, error) StatefulSetExpansion } @@ -113,11 +113,12 @@ func (c *statefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.In } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { +func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (result *v1beta2.StatefulSet, err error) { result = &v1beta2.StatefulSet{} err = c.client.Post(). Namespace(c.ns). Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). Do(ctx). Into(result) @@ -125,12 +126,13 @@ func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta2.Stateful } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { +func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { result = &v1beta2.StatefulSet{} err = c.client.Put(). Namespace(c.ns). Resource("statefulsets"). Name(statefulSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). Do(ctx). Into(result) @@ -139,14 +141,14 @@ func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta2.Stateful // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { +func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { result = &v1beta2.StatefulSet{} err = c.client.Put(). Namespace(c.ns). Resource("statefulsets"). Name(statefulSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). Do(ctx). Into(result) @@ -181,13 +183,14 @@ func (c *statefulSets) DeleteCollection(ctx context.Context, options *v1.DeleteO } // Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) { +func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) { result = &v1beta2.StatefulSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("statefulsets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) @@ -209,13 +212,14 @@ func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, opt } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale) (result *v1beta2.Scale, err error) { +func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) { result = &v1beta2.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). Body(scale). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go b/staging/src/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go index 1ddbc702e08..68446e2176d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go @@ -38,14 +38,14 @@ type AuditSinksGetter interface { // AuditSinkInterface has methods to work with AuditSink resources. type AuditSinkInterface interface { - Create(context.Context, *v1alpha1.AuditSink) (*v1alpha1.AuditSink, error) - Update(context.Context, *v1alpha1.AuditSink) (*v1alpha1.AuditSink, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1alpha1.AuditSink, error) + Create(ctx context.Context, auditSink *v1alpha1.AuditSink, opts v1.CreateOptions) (*v1alpha1.AuditSink, error) + Update(ctx context.Context, auditSink *v1alpha1.AuditSink, opts v1.UpdateOptions) (*v1alpha1.AuditSink, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.AuditSink, error) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.AuditSinkList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AuditSink, err error) AuditSinkExpansion } @@ -104,10 +104,11 @@ func (c *auditSinks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inte } // Create takes the representation of a auditSink and creates it. Returns the server's representation of the auditSink, and an error, if there is any. -func (c *auditSinks) Create(ctx context.Context, auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { +func (c *auditSinks) Create(ctx context.Context, auditSink *v1alpha1.AuditSink, opts v1.CreateOptions) (result *v1alpha1.AuditSink, err error) { result = &v1alpha1.AuditSink{} err = c.client.Post(). Resource("auditsinks"). + VersionedParams(&opts, scheme.ParameterCodec). Body(auditSink). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *auditSinks) Create(ctx context.Context, auditSink *v1alpha1.AuditSink) } // Update takes the representation of a auditSink and updates it. Returns the server's representation of the auditSink, and an error, if there is any. -func (c *auditSinks) Update(ctx context.Context, auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { +func (c *auditSinks) Update(ctx context.Context, auditSink *v1alpha1.AuditSink, opts v1.UpdateOptions) (result *v1alpha1.AuditSink, err error) { result = &v1alpha1.AuditSink{} err = c.client.Put(). Resource("auditsinks"). Name(auditSink.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(auditSink). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *auditSinks) DeleteCollection(ctx context.Context, options *v1.DeleteOpt } // Patch applies the patch and returns the patched auditSink. -func (c *auditSinks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) { +func (c *auditSinks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AuditSink, err error) { result = &v1alpha1.AuditSink{} err = c.client.Patch(pt). Resource("auditsinks"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditsink.go b/staging/src/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditsink.go index 2db9a387217..bd0c95be21f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditsink.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditsink.go @@ -77,7 +77,7 @@ func (c *FakeAuditSinks) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a auditSink and creates it. Returns the server's representation of the auditSink, and an error, if there is any. -func (c *FakeAuditSinks) Create(ctx context.Context, auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { +func (c *FakeAuditSinks) Create(ctx context.Context, auditSink *v1alpha1.AuditSink, opts v1.CreateOptions) (result *v1alpha1.AuditSink, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(auditsinksResource, auditSink), &v1alpha1.AuditSink{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeAuditSinks) Create(ctx context.Context, auditSink *v1alpha1.AuditSi } // Update takes the representation of a auditSink and updates it. Returns the server's representation of the auditSink, and an error, if there is any. -func (c *FakeAuditSinks) Update(ctx context.Context, auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { +func (c *FakeAuditSinks) Update(ctx context.Context, auditSink *v1alpha1.AuditSink, opts v1.UpdateOptions) (result *v1alpha1.AuditSink, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(auditsinksResource, auditSink), &v1alpha1.AuditSink{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeAuditSinks) DeleteCollection(ctx context.Context, options *v1.Delet } // Patch applies the patch and returns the patched auditSink. -func (c *FakeAuditSinks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) { +func (c *FakeAuditSinks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AuditSink, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(auditsinksResource, name, pt, data, subresources...), &v1alpha1.AuditSink{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD index 7ddf71975c5..9acc4a0a335 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD @@ -18,6 +18,7 @@ go_library( importpath = "k8s.io/client-go/kubernetes/typed/authentication/v1", deps = [ "//staging/src/k8s.io/api/authentication/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", ], diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/BUILD b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/BUILD index dfa5cfa9e98..429e39473fd 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/BUILD +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/BUILD @@ -17,6 +17,7 @@ go_library( importpath = "k8s.io/client-go/kubernetes/typed/authentication/v1/fake", deps = [ "//staging/src/k8s.io/api/authentication/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go index d3cf33d547b..b85fcfbb87d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go @@ -22,6 +22,7 @@ import ( "context" v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -36,7 +37,7 @@ var tokenreviewsResource = schema.GroupVersionResource{Group: "authentication.k8 var tokenreviewsKind = schema.GroupVersionKind{Group: "authentication.k8s.io", Version: "v1", Kind: "TokenReview"} // Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. -func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview) (result *v1.TokenReview, err error) { +func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (result *v1.TokenReview, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(tokenreviewsResource, tokenReview), &v1.TokenReview{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go index 5f5a91fc9f1..ca7cd47d26b 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go @@ -22,6 +22,8 @@ import ( "context" v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -33,7 +35,7 @@ type TokenReviewsGetter interface { // TokenReviewInterface has methods to work with TokenReview resources. type TokenReviewInterface interface { - Create(context.Context, *v1.TokenReview) (*v1.TokenReview, error) + Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (*v1.TokenReview, error) TokenReviewExpansion } @@ -50,10 +52,11 @@ func newTokenReviews(c *AuthenticationV1Client) *tokenReviews { } // Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. -func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview) (result *v1.TokenReview, err error) { +func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (result *v1.TokenReview, err error) { result = &v1.TokenReview{} err = c.client.Post(). Resource("tokenreviews"). + VersionedParams(&opts, scheme.ParameterCodec). Body(tokenReview). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD index 6e4ee1bdd4d..174bc453257 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD @@ -18,6 +18,7 @@ go_library( importpath = "k8s.io/client-go/kubernetes/typed/authentication/v1beta1", deps = [ "//staging/src/k8s.io/api/authentication/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", ], diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/BUILD b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/BUILD index 4b7ee4d1d38..103e38e2056 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/BUILD +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/BUILD @@ -17,6 +17,7 @@ go_library( importpath = "k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake", deps = [ "//staging/src/k8s.io/api/authentication/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go index 83aff9e18b7..0da3ec6f437 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go @@ -22,6 +22,7 @@ import ( "context" v1beta1 "k8s.io/api/authentication/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -36,7 +37,7 @@ var tokenreviewsResource = schema.GroupVersionResource{Group: "authentication.k8 var tokenreviewsKind = schema.GroupVersionKind{Group: "authentication.k8s.io", Version: "v1beta1", Kind: "TokenReview"} // Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. -func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview) (result *v1beta1.TokenReview, err error) { +func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (result *v1beta1.TokenReview, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(tokenreviewsResource, tokenReview), &v1beta1.TokenReview{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go index c54d59ff840..5da12243373 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go @@ -22,6 +22,8 @@ import ( "context" v1beta1 "k8s.io/api/authentication/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -33,7 +35,7 @@ type TokenReviewsGetter interface { // TokenReviewInterface has methods to work with TokenReview resources. type TokenReviewInterface interface { - Create(context.Context, *v1beta1.TokenReview) (*v1beta1.TokenReview, error) + Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (*v1beta1.TokenReview, error) TokenReviewExpansion } @@ -50,10 +52,11 @@ func newTokenReviews(c *AuthenticationV1beta1Client) *tokenReviews { } // Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. -func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview) (result *v1beta1.TokenReview, err error) { +func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (result *v1beta1.TokenReview, err error) { result = &v1beta1.TokenReview{} err = c.client.Post(). Resource("tokenreviews"). + VersionedParams(&opts, scheme.ParameterCodec). Body(tokenReview). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD index fcc864f2241..f6a071181d0 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD @@ -24,6 +24,7 @@ go_library( importpath = "k8s.io/client-go/kubernetes/typed/authorization/v1", deps = [ "//staging/src/k8s.io/api/authorization/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", ], diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/BUILD b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/BUILD index 0f51b7fa6a7..969cea275cc 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/BUILD +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/BUILD @@ -23,6 +23,7 @@ go_library( importpath = "k8s.io/client-go/kubernetes/typed/authorization/v1/fake", deps = [ "//staging/src/k8s.io/api/authorization/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go index 4bf694956cb..d74ae0a474d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go @@ -22,6 +22,7 @@ import ( "context" v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -37,7 +38,7 @@ var localsubjectaccessreviewsResource = schema.GroupVersionResource{Group: "auth var localsubjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1", Kind: "LocalSubjectAccessReview"} // Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. -func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview) (result *v1.LocalSubjectAccessReview, err error) { +func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *v1.LocalSubjectAccessReview, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview), &v1.LocalSubjectAccessReview{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go index ee02428dead..80ebbbd45f3 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go @@ -22,6 +22,7 @@ import ( "context" v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -36,7 +37,7 @@ var selfsubjectaccessreviewsResource = schema.GroupVersionResource{Group: "autho var selfsubjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1", Kind: "SelfSubjectAccessReview"} // Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. -func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview) (result *v1.SelfSubjectAccessReview, err error) { +func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (result *v1.SelfSubjectAccessReview, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(selfsubjectaccessreviewsResource, selfSubjectAccessReview), &v1.SelfSubjectAccessReview{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go index 7a93225e948..dd70908ad31 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go @@ -22,6 +22,7 @@ import ( "context" v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -36,7 +37,7 @@ var selfsubjectrulesreviewsResource = schema.GroupVersionResource{Group: "author var selfsubjectrulesreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1", Kind: "SelfSubjectRulesReview"} // Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. -func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview) (result *v1.SelfSubjectRulesReview, err error) { +func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (result *v1.SelfSubjectRulesReview, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(selfsubjectrulesreviewsResource, selfSubjectRulesReview), &v1.SelfSubjectRulesReview{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go index 058f1923979..b480b2b4187 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go @@ -22,6 +22,7 @@ import ( "context" v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -36,7 +37,7 @@ var subjectaccessreviewsResource = schema.GroupVersionResource{Group: "authoriza var subjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1", Kind: "SubjectAccessReview"} // Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. -func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview) (result *v1.SubjectAccessReview, err error) { +func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReview, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(subjectaccessreviewsResource, subjectAccessReview), &v1.SubjectAccessReview{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go index b2553069190..84b2efe166f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go @@ -22,6 +22,8 @@ import ( "context" v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -33,7 +35,7 @@ type LocalSubjectAccessReviewsGetter interface { // LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. type LocalSubjectAccessReviewInterface interface { - Create(context.Context, *v1.LocalSubjectAccessReview) (*v1.LocalSubjectAccessReview, error) + Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (*v1.LocalSubjectAccessReview, error) LocalSubjectAccessReviewExpansion } @@ -52,11 +54,12 @@ func newLocalSubjectAccessReviews(c *AuthorizationV1Client, namespace string) *l } // Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. -func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview) (result *v1.LocalSubjectAccessReview, err error) { +func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *v1.LocalSubjectAccessReview, err error) { result = &v1.LocalSubjectAccessReview{} err = c.client.Post(). Namespace(c.ns). Resource("localsubjectaccessreviews"). + VersionedParams(&opts, scheme.ParameterCodec). Body(localSubjectAccessReview). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go index f5c3f4156e7..2006196c11c 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go @@ -22,6 +22,8 @@ import ( "context" v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -33,7 +35,7 @@ type SelfSubjectAccessReviewsGetter interface { // SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. type SelfSubjectAccessReviewInterface interface { - Create(context.Context, *v1.SelfSubjectAccessReview) (*v1.SelfSubjectAccessReview, error) + Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (*v1.SelfSubjectAccessReview, error) SelfSubjectAccessReviewExpansion } @@ -50,10 +52,11 @@ func newSelfSubjectAccessReviews(c *AuthorizationV1Client) *selfSubjectAccessRev } // Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. -func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview) (result *v1.SelfSubjectAccessReview, err error) { +func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (result *v1.SelfSubjectAccessReview, err error) { result = &v1.SelfSubjectAccessReview{} err = c.client.Post(). Resource("selfsubjectaccessreviews"). + VersionedParams(&opts, scheme.ParameterCodec). Body(selfSubjectAccessReview). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go index 5b02fcd9379..25d99f7b525 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go @@ -22,6 +22,8 @@ import ( "context" v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -33,7 +35,7 @@ type SelfSubjectRulesReviewsGetter interface { // SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources. type SelfSubjectRulesReviewInterface interface { - Create(context.Context, *v1.SelfSubjectRulesReview) (*v1.SelfSubjectRulesReview, error) + Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (*v1.SelfSubjectRulesReview, error) SelfSubjectRulesReviewExpansion } @@ -50,10 +52,11 @@ func newSelfSubjectRulesReviews(c *AuthorizationV1Client) *selfSubjectRulesRevie } // Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. -func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview) (result *v1.SelfSubjectRulesReview, err error) { +func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (result *v1.SelfSubjectRulesReview, err error) { result = &v1.SelfSubjectRulesReview{} err = c.client.Post(). Resource("selfsubjectrulesreviews"). + VersionedParams(&opts, scheme.ParameterCodec). Body(selfSubjectRulesReview). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go index aaeb7a1207c..8ac0566a2e6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go @@ -22,6 +22,8 @@ import ( "context" v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -33,7 +35,7 @@ type SubjectAccessReviewsGetter interface { // SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. type SubjectAccessReviewInterface interface { - Create(context.Context, *v1.SubjectAccessReview) (*v1.SubjectAccessReview, error) + Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (*v1.SubjectAccessReview, error) SubjectAccessReviewExpansion } @@ -50,10 +52,11 @@ func newSubjectAccessReviews(c *AuthorizationV1Client) *subjectAccessReviews { } // Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. -func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview) (result *v1.SubjectAccessReview, err error) { +func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReview, err error) { result = &v1.SubjectAccessReview{} err = c.client.Post(). Resource("subjectaccessreviews"). + VersionedParams(&opts, scheme.ParameterCodec). Body(subjectAccessReview). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD index c830646718f..29a7979f604 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD @@ -24,6 +24,7 @@ go_library( importpath = "k8s.io/client-go/kubernetes/typed/authorization/v1beta1", deps = [ "//staging/src/k8s.io/api/authorization/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", ], diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/BUILD b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/BUILD index 00d72bc3a5c..fc61d77816a 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/BUILD +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/BUILD @@ -24,6 +24,7 @@ go_library( importpath = "k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake", deps = [ "//staging/src/k8s.io/api/authorization/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go index e8471a579c0..2d3ba446283 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go @@ -22,6 +22,7 @@ import ( "context" v1beta1 "k8s.io/api/authorization/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -37,7 +38,7 @@ var localsubjectaccessreviewsResource = schema.GroupVersionResource{Group: "auth var localsubjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1beta1", Kind: "LocalSubjectAccessReview"} // Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. -func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview) (result *v1beta1.LocalSubjectAccessReview, err error) { +func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.LocalSubjectAccessReview, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview), &v1beta1.LocalSubjectAccessReview{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go index 41f321cceb6..febe90c77a0 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go @@ -22,6 +22,7 @@ import ( "context" v1beta1 "k8s.io/api/authorization/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -36,7 +37,7 @@ var selfsubjectaccessreviewsResource = schema.GroupVersionResource{Group: "autho var selfsubjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1beta1", Kind: "SelfSubjectAccessReview"} // Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. -func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview) (result *v1beta1.SelfSubjectAccessReview, err error) { +func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectAccessReview, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(selfsubjectaccessreviewsResource, selfSubjectAccessReview), &v1beta1.SelfSubjectAccessReview{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go index 61ad5c8ced3..02df06012a7 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go @@ -22,6 +22,7 @@ import ( "context" v1beta1 "k8s.io/api/authorization/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -36,7 +37,7 @@ var selfsubjectrulesreviewsResource = schema.GroupVersionResource{Group: "author var selfsubjectrulesreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1beta1", Kind: "SelfSubjectRulesReview"} // Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. -func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview) (result *v1beta1.SelfSubjectRulesReview, err error) { +func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectRulesReview, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(selfsubjectrulesreviewsResource, selfSubjectRulesReview), &v1beta1.SelfSubjectRulesReview{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go index 0fcd59c5468..b5be913c4bb 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go @@ -22,6 +22,7 @@ import ( "context" v1beta1 "k8s.io/api/authorization/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -36,7 +37,7 @@ var subjectaccessreviewsResource = schema.GroupVersionResource{Group: "authoriza var subjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1beta1", Kind: "SubjectAccessReview"} // Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. -func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview) (result *v1beta1.SubjectAccessReview, err error) { +func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SubjectAccessReview, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(subjectaccessreviewsResource, subjectAccessReview), &v1beta1.SubjectAccessReview{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go index d51a03aabb1..78584ba9458 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go @@ -22,6 +22,8 @@ import ( "context" v1beta1 "k8s.io/api/authorization/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -33,7 +35,7 @@ type LocalSubjectAccessReviewsGetter interface { // LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. type LocalSubjectAccessReviewInterface interface { - Create(context.Context, *v1beta1.LocalSubjectAccessReview) (*v1beta1.LocalSubjectAccessReview, error) + Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (*v1beta1.LocalSubjectAccessReview, error) LocalSubjectAccessReviewExpansion } @@ -52,11 +54,12 @@ func newLocalSubjectAccessReviews(c *AuthorizationV1beta1Client, namespace strin } // Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. -func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview) (result *v1beta1.LocalSubjectAccessReview, err error) { +func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.LocalSubjectAccessReview, err error) { result = &v1beta1.LocalSubjectAccessReview{} err = c.client.Post(). Namespace(c.ns). Resource("localsubjectaccessreviews"). + VersionedParams(&opts, scheme.ParameterCodec). Body(localSubjectAccessReview). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go index bf9fe0b1843..0286c93fe6a 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go @@ -22,6 +22,8 @@ import ( "context" v1beta1 "k8s.io/api/authorization/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -33,7 +35,7 @@ type SelfSubjectAccessReviewsGetter interface { // SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. type SelfSubjectAccessReviewInterface interface { - Create(context.Context, *v1beta1.SelfSubjectAccessReview) (*v1beta1.SelfSubjectAccessReview, error) + Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectAccessReview, error) SelfSubjectAccessReviewExpansion } @@ -50,10 +52,11 @@ func newSelfSubjectAccessReviews(c *AuthorizationV1beta1Client) *selfSubjectAcce } // Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. -func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview) (result *v1beta1.SelfSubjectAccessReview, err error) { +func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectAccessReview, err error) { result = &v1beta1.SelfSubjectAccessReview{} err = c.client.Post(). Resource("selfsubjectaccessreviews"). + VersionedParams(&opts, scheme.ParameterCodec). Body(selfSubjectAccessReview). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go index 874e9db91f4..d772973ec6e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go @@ -22,6 +22,8 @@ import ( "context" v1beta1 "k8s.io/api/authorization/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -33,7 +35,7 @@ type SelfSubjectRulesReviewsGetter interface { // SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources. type SelfSubjectRulesReviewInterface interface { - Create(context.Context, *v1beta1.SelfSubjectRulesReview) (*v1beta1.SelfSubjectRulesReview, error) + Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectRulesReview, error) SelfSubjectRulesReviewExpansion } @@ -50,10 +52,11 @@ func newSelfSubjectRulesReviews(c *AuthorizationV1beta1Client) *selfSubjectRules } // Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. -func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview) (result *v1beta1.SelfSubjectRulesReview, err error) { +func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectRulesReview, err error) { result = &v1beta1.SelfSubjectRulesReview{} err = c.client.Post(). Resource("selfsubjectrulesreviews"). + VersionedParams(&opts, scheme.ParameterCodec). Body(selfSubjectRulesReview). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go index 3445f5a9180..aebe8398c0f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go @@ -22,6 +22,8 @@ import ( "context" v1beta1 "k8s.io/api/authorization/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -33,7 +35,7 @@ type SubjectAccessReviewsGetter interface { // SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. type SubjectAccessReviewInterface interface { - Create(context.Context, *v1beta1.SubjectAccessReview) (*v1beta1.SubjectAccessReview, error) + Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (*v1beta1.SubjectAccessReview, error) SubjectAccessReviewExpansion } @@ -50,10 +52,11 @@ func newSubjectAccessReviews(c *AuthorizationV1beta1Client) *subjectAccessReview } // Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. -func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview) (result *v1beta1.SubjectAccessReview, err error) { +func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SubjectAccessReview, err error) { result = &v1beta1.SubjectAccessReview{} err = c.client.Post(). Resource("subjectaccessreviews"). + VersionedParams(&opts, scheme.ParameterCodec). Body(subjectAccessReview). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go index 1c42305dd3d..22fe6d45739 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go @@ -81,7 +81,7 @@ func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOp } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &autoscalingv1.HorizontalPodAutoscaler{}) @@ -92,7 +92,7 @@ func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPod } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &autoscalingv1.HorizontalPodAutoscaler{}) @@ -104,7 +104,7 @@ func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPod // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler) (*autoscalingv1.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv1.HorizontalPodAutoscaler, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &autoscalingv1.HorizontalPodAutoscaler{}) @@ -131,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opt } // Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &autoscalingv1.HorizontalPodAutoscaler{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go index 840b1aafc4d..1228ce73135 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -38,15 +38,15 @@ type HorizontalPodAutoscalersGetter interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - Create(context.Context, *v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) - Update(context.Context, *v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) - UpdateStatus(context.Context, *v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.HorizontalPodAutoscaler, error) + Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (*v1.HorizontalPodAutoscaler, error) + Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) + UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.HorizontalPodAutoscaler, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.HorizontalPodAutoscalerList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } @@ -110,11 +110,12 @@ func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts metav1.ListOp } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (result *v1.HorizontalPodAutoscaler, err error) { result = &v1.HorizontalPodAutoscaler{} err = c.client.Post(). Namespace(c.ns). Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAuto } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { result = &v1.HorizontalPodAutoscaler{} err = c.client.Put(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(horizontalPodAutoscaler.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAuto // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { result = &v1.HorizontalPodAutoscaler{} err = c.client.Put(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(horizontalPodAutoscaler.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, options } // Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { result = &v1.HorizontalPodAutoscaler{} err = c.client.Patch(pt). Namespace(c.ns). Resource("horizontalpodautoscalers"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go index 78c381b4d8c..de28395fa67 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go @@ -81,7 +81,7 @@ func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOp } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) @@ -92,7 +92,7 @@ func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPod } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) @@ -104,7 +104,7 @@ func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPod // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) @@ -131,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opt } // Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2beta1.HorizontalPodAutoscaler{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go index 071d49b761b..a5b564d042a 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -38,15 +38,15 @@ type HorizontalPodAutoscalersGetter interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - Create(context.Context, *v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) - Update(context.Context, *v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) - UpdateStatus(context.Context, *v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v2beta1.HorizontalPodAutoscaler, error) + Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta1.HorizontalPodAutoscaler, error) + Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) + UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v2beta1.HorizontalPodAutoscaler, error) List(ctx context.Context, opts v1.ListOptions) (*v2beta1.HorizontalPodAutoscalerList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } @@ -110,11 +110,12 @@ func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOption } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { result = &v2beta1.HorizontalPodAutoscaler{} err = c.client.Post(). Namespace(c.ns). Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAuto } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { result = &v2beta1.HorizontalPodAutoscaler{} err = c.client.Put(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(horizontalPodAutoscaler.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAuto // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { result = &v2beta1.HorizontalPodAutoscaler{} err = c.client.Put(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(horizontalPodAutoscaler.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, options } // Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { result = &v2beta1.HorizontalPodAutoscaler{} err = c.client.Patch(pt). Namespace(c.ns). Resource("horizontalpodautoscalers"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go index 6ece0b0e0c2..18ea026c164 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go @@ -81,7 +81,7 @@ func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOp } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{}) @@ -92,7 +92,7 @@ func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPod } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{}) @@ -104,7 +104,7 @@ func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPod // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{}) @@ -131,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opt } // Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2beta2.HorizontalPodAutoscaler{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go index 2ac92e7d6a3..168137d72bb 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -38,15 +38,15 @@ type HorizontalPodAutoscalersGetter interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - Create(context.Context, *v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error) - Update(context.Context, *v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error) - UpdateStatus(context.Context, *v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v2beta2.HorizontalPodAutoscaler, error) + Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta2.HorizontalPodAutoscaler, error) + Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) + UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v2beta2.HorizontalPodAutoscaler, error) List(ctx context.Context, opts v1.ListOptions) (*v2beta2.HorizontalPodAutoscalerList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } @@ -110,11 +110,12 @@ func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOption } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { result = &v2beta2.HorizontalPodAutoscaler{} err = c.client.Post(). Namespace(c.ns). Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAuto } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { result = &v2beta2.HorizontalPodAutoscaler{} err = c.client.Put(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(horizontalPodAutoscaler.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAuto // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { result = &v2beta2.HorizontalPodAutoscaler{} err = c.client.Put(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(horizontalPodAutoscaler.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, options } // Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { result = &v2beta2.HorizontalPodAutoscaler{} err = c.client.Patch(pt). Namespace(c.ns). Resource("horizontalpodautoscalers"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go index 4b06092d51b..b09872952c8 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go @@ -81,7 +81,7 @@ func (c *FakeJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interf } // Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *FakeJobs) Create(ctx context.Context, job *batchv1.Job) (result *batchv1.Job, err error) { +func (c *FakeJobs) Create(ctx context.Context, job *batchv1.Job, opts v1.CreateOptions) (result *batchv1.Job, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(jobsResource, c.ns, job), &batchv1.Job{}) @@ -92,7 +92,7 @@ func (c *FakeJobs) Create(ctx context.Context, job *batchv1.Job) (result *batchv } // Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *FakeJobs) Update(ctx context.Context, job *batchv1.Job) (result *batchv1.Job, err error) { +func (c *FakeJobs) Update(ctx context.Context, job *batchv1.Job, opts v1.UpdateOptions) (result *batchv1.Job, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(jobsResource, c.ns, job), &batchv1.Job{}) @@ -104,7 +104,7 @@ func (c *FakeJobs) Update(ctx context.Context, job *batchv1.Job) (result *batchv // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeJobs) UpdateStatus(ctx context.Context, job *batchv1.Job) (*batchv1.Job, error) { +func (c *FakeJobs) UpdateStatus(ctx context.Context, job *batchv1.Job, opts v1.UpdateOptions) (*batchv1.Job, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &batchv1.Job{}) @@ -131,7 +131,7 @@ func (c *FakeJobs) DeleteCollection(ctx context.Context, options *v1.DeleteOptio } // Patch applies the patch and returns the patched job. -func (c *FakeJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *batchv1.Job, err error) { +func (c *FakeJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *batchv1.Job, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, name, pt, data, subresources...), &batchv1.Job{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/job.go index d50f680b365..1ad12320cbd 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/job.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/job.go @@ -38,15 +38,15 @@ type JobsGetter interface { // JobInterface has methods to work with Job resources. type JobInterface interface { - Create(context.Context, *v1.Job) (*v1.Job, error) - Update(context.Context, *v1.Job) (*v1.Job, error) - UpdateStatus(context.Context, *v1.Job) (*v1.Job, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Job, error) + Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (*v1.Job, error) + Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) + UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Job, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.JobList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) JobExpansion } @@ -110,11 +110,12 @@ func (c *jobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interf } // Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Create(ctx context.Context, job *v1.Job) (result *v1.Job, err error) { +func (c *jobs) Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (result *v1.Job, err error) { result = &v1.Job{} err = c.client.Post(). Namespace(c.ns). Resource("jobs"). + VersionedParams(&opts, scheme.ParameterCodec). Body(job). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *jobs) Create(ctx context.Context, job *v1.Job) (result *v1.Job, err err } // Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Update(ctx context.Context, job *v1.Job) (result *v1.Job, err error) { +func (c *jobs) Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { result = &v1.Job{} err = c.client.Put(). Namespace(c.ns). Resource("jobs"). Name(job.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(job). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *jobs) Update(ctx context.Context, job *v1.Job) (result *v1.Job, err err // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *jobs) UpdateStatus(ctx context.Context, job *v1.Job) (result *v1.Job, err error) { +func (c *jobs) UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { result = &v1.Job{} err = c.client.Put(). Namespace(c.ns). Resource("jobs"). Name(job.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(job). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *jobs) DeleteCollection(ctx context.Context, options *metav1.DeleteOptio } // Patch applies the patch and returns the patched job. -func (c *jobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) { +func (c *jobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) { result = &v1.Job{} err = c.client.Patch(pt). Namespace(c.ns). Resource("jobs"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go index 4fa41a571fb..743360e9934 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go @@ -38,15 +38,15 @@ type CronJobsGetter interface { // CronJobInterface has methods to work with CronJob resources. type CronJobInterface interface { - Create(context.Context, *v1beta1.CronJob) (*v1beta1.CronJob, error) - Update(context.Context, *v1beta1.CronJob) (*v1beta1.CronJob, error) - UpdateStatus(context.Context, *v1beta1.CronJob) (*v1beta1.CronJob, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.CronJob, error) + Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (*v1beta1.CronJob, error) + Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) + UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CronJob, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CronJobList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) CronJobExpansion } @@ -110,11 +110,12 @@ func (c *cronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interf } // Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { +func (c *cronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (result *v1beta1.CronJob, err error) { result = &v1beta1.CronJob{} err = c.client.Post(). Namespace(c.ns). Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). Body(cronJob). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *cronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob) (result } // Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { +func (c *cronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { result = &v1beta1.CronJob{} err = c.client.Put(). Namespace(c.ns). Resource("cronjobs"). Name(cronJob.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(cronJob). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *cronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob) (result // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { +func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { result = &v1beta1.CronJob{} err = c.client.Put(). Namespace(c.ns). Resource("cronjobs"). Name(cronJob.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(cronJob). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *cronJobs) DeleteCollection(ctx context.Context, options *v1.DeleteOptio } // Patch applies the patch and returns the patched cronJob. -func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) { +func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) { result = &v1beta1.CronJob{} err = c.client.Patch(pt). Namespace(c.ns). Resource("cronjobs"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go index 411c4d8600e..3cc605dff5b 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go @@ -81,7 +81,7 @@ func (c *FakeCronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.In } // Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { +func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (result *v1beta1.CronJob, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v1beta1.CronJob{}) @@ -92,7 +92,7 @@ func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob) (re } // Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { +func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v1beta1.CronJob{}) @@ -104,7 +104,7 @@ func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob) (re // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob) (*v1beta1.CronJob, error) { +func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v1beta1.CronJob{}) @@ -131,7 +131,7 @@ func (c *FakeCronJobs) DeleteCollection(ctx context.Context, options *v1.DeleteO } // Patch applies the patch and returns the patched cronJob. -func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) { +func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &v1beta1.CronJob{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go index d8bf0b500b7..ea3cfefc93b 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go @@ -38,15 +38,15 @@ type CronJobsGetter interface { // CronJobInterface has methods to work with CronJob resources. type CronJobInterface interface { - Create(context.Context, *v2alpha1.CronJob) (*v2alpha1.CronJob, error) - Update(context.Context, *v2alpha1.CronJob) (*v2alpha1.CronJob, error) - UpdateStatus(context.Context, *v2alpha1.CronJob) (*v2alpha1.CronJob, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v2alpha1.CronJob, error) + Create(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.CreateOptions) (*v2alpha1.CronJob, error) + Update(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.UpdateOptions) (*v2alpha1.CronJob, error) + UpdateStatus(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.UpdateOptions) (*v2alpha1.CronJob, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CronJob, error) List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CronJobList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CronJob, err error) CronJobExpansion } @@ -110,11 +110,12 @@ func (c *cronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interf } // Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Create(ctx context.Context, cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { +func (c *cronJobs) Create(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.CreateOptions) (result *v2alpha1.CronJob, err error) { result = &v2alpha1.CronJob{} err = c.client.Post(). Namespace(c.ns). Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). Body(cronJob). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *cronJobs) Create(ctx context.Context, cronJob *v2alpha1.CronJob) (resul } // Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Update(ctx context.Context, cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { +func (c *cronJobs) Update(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.UpdateOptions) (result *v2alpha1.CronJob, err error) { result = &v2alpha1.CronJob{} err = c.client.Put(). Namespace(c.ns). Resource("cronjobs"). Name(cronJob.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(cronJob). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *cronJobs) Update(ctx context.Context, cronJob *v2alpha1.CronJob) (resul // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { +func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.UpdateOptions) (result *v2alpha1.CronJob, err error) { result = &v2alpha1.CronJob{} err = c.client.Put(). Namespace(c.ns). Resource("cronjobs"). Name(cronJob.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(cronJob). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *cronJobs) DeleteCollection(ctx context.Context, options *v1.DeleteOptio } // Patch applies the patch and returns the patched cronJob. -func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) { +func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CronJob, err error) { result = &v2alpha1.CronJob{} err = c.client.Patch(pt). Namespace(c.ns). Resource("cronjobs"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go index 2987aa2c7a7..f004d6793aa 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go @@ -81,7 +81,7 @@ func (c *FakeCronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.In } // Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { +func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.CreateOptions) (result *v2alpha1.CronJob, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v2alpha1.CronJob{}) @@ -92,7 +92,7 @@ func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v2alpha1.CronJob) (r } // Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { +func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.UpdateOptions) (result *v2alpha1.CronJob, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v2alpha1.CronJob{}) @@ -104,7 +104,7 @@ func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v2alpha1.CronJob) (r // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v2alpha1.CronJob) (*v2alpha1.CronJob, error) { +func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.UpdateOptions) (*v2alpha1.CronJob, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v2alpha1.CronJob{}) @@ -131,7 +131,7 @@ func (c *FakeCronJobs) DeleteCollection(ctx context.Context, options *v1.DeleteO } // Patch applies the patch and returns the patched cronJob. -func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) { +func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CronJob, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &v2alpha1.CronJob{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go index 8582e07763c..a53b7e7477e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go @@ -38,15 +38,15 @@ type CertificateSigningRequestsGetter interface { // CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources. type CertificateSigningRequestInterface interface { - Create(context.Context, *v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) - Update(context.Context, *v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) - UpdateStatus(context.Context, *v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.CertificateSigningRequest, error) + Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (*v1beta1.CertificateSigningRequest, error) + Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) + UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CertificateSigningRequest, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CertificateSigningRequestList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) CertificateSigningRequestExpansion } @@ -105,10 +105,11 @@ func (c *certificateSigningRequests) Watch(ctx context.Context, opts v1.ListOpti } // Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { +func (c *certificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (result *v1beta1.CertificateSigningRequest, err error) { result = &v1beta1.CertificateSigningRequest{} err = c.client.Post(). Resource("certificatesigningrequests"). + VersionedParams(&opts, scheme.ParameterCodec). Body(certificateSigningRequest). Do(ctx). Into(result) @@ -116,11 +117,12 @@ func (c *certificateSigningRequests) Create(ctx context.Context, certificateSign } // Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { +func (c *certificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { result = &v1beta1.CertificateSigningRequest{} err = c.client.Put(). Resource("certificatesigningrequests"). Name(certificateSigningRequest.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(certificateSigningRequest). Do(ctx). Into(result) @@ -129,13 +131,13 @@ func (c *certificateSigningRequests) Update(ctx context.Context, certificateSign // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *certificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { +func (c *certificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { result = &v1beta1.CertificateSigningRequest{} err = c.client.Put(). Resource("certificatesigningrequests"). Name(certificateSigningRequest.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(certificateSigningRequest). Do(ctx). Into(result) @@ -168,12 +170,13 @@ func (c *certificateSigningRequests) DeleteCollection(ctx context.Context, optio } // Patch applies the patch and returns the patched certificateSigningRequest. -func (c *certificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { +func (c *certificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { result = &v1beta1.CertificateSigningRequest{} err = c.client.Patch(pt). Resource("certificatesigningrequests"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go index d811dd25210..53b73318096 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go @@ -77,7 +77,7 @@ func (c *FakeCertificateSigningRequests) Watch(ctx context.Context, opts v1.List } // Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *FakeCertificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { +func (c *FakeCertificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (result *v1beta1.CertificateSigningRequest, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeCertificateSigningRequests) Create(ctx context.Context, certificate } // Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *FakeCertificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { +func (c *FakeCertificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) if obj == nil { @@ -98,7 +98,7 @@ func (c *FakeCertificateSigningRequests) Update(ctx context.Context, certificate // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) { +func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) if obj == nil { @@ -123,7 +123,7 @@ func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, o } // Patch applies the patch and returns the patched certificateSigningRequest. -func (c *FakeCertificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { +func (c *FakeCertificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, pt, data, subresources...), &v1beta1.CertificateSigningRequest{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go b/staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go index 74dffb17b05..ba65f6205e3 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go @@ -81,7 +81,7 @@ func (c *FakeLeases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inte } // Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Create(ctx context.Context, lease *coordinationv1.Lease) (result *coordinationv1.Lease, err error) { +func (c *FakeLeases) Create(ctx context.Context, lease *coordinationv1.Lease, opts v1.CreateOptions) (result *coordinationv1.Lease, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(leasesResource, c.ns, lease), &coordinationv1.Lease{}) @@ -92,7 +92,7 @@ func (c *FakeLeases) Create(ctx context.Context, lease *coordinationv1.Lease) (r } // Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Update(ctx context.Context, lease *coordinationv1.Lease) (result *coordinationv1.Lease, err error) { +func (c *FakeLeases) Update(ctx context.Context, lease *coordinationv1.Lease, opts v1.UpdateOptions) (result *coordinationv1.Lease, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(leasesResource, c.ns, lease), &coordinationv1.Lease{}) @@ -119,7 +119,7 @@ func (c *FakeLeases) DeleteCollection(ctx context.Context, options *v1.DeleteOpt } // Patch applies the patch and returns the patched lease. -func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *coordinationv1.Lease, err error) { +func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *coordinationv1.Lease, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, pt, data, subresources...), &coordinationv1.Lease{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go b/staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go index ea006236c00..5077779b0be 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go @@ -38,14 +38,14 @@ type LeasesGetter interface { // LeaseInterface has methods to work with Lease resources. type LeaseInterface interface { - Create(context.Context, *v1.Lease) (*v1.Lease, error) - Update(context.Context, *v1.Lease) (*v1.Lease, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Lease, error) + Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (*v1.Lease, error) + Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (*v1.Lease, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Lease, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.LeaseList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Lease, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) LeaseExpansion } @@ -109,11 +109,12 @@ func (c *leases) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Inte } // Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Create(ctx context.Context, lease *v1.Lease) (result *v1.Lease, err error) { +func (c *leases) Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (result *v1.Lease, err error) { result = &v1.Lease{} err = c.client.Post(). Namespace(c.ns). Resource("leases"). + VersionedParams(&opts, scheme.ParameterCodec). Body(lease). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *leases) Create(ctx context.Context, lease *v1.Lease) (result *v1.Lease, } // Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Update(ctx context.Context, lease *v1.Lease) (result *v1.Lease, err error) { +func (c *leases) Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (result *v1.Lease, err error) { result = &v1.Lease{} err = c.client.Put(). Namespace(c.ns). Resource("leases"). Name(lease.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(lease). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *leases) DeleteCollection(ctx context.Context, options *metav1.DeleteOpt } // Patch applies the patch and returns the patched lease. -func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Lease, err error) { +func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) { result = &v1.Lease{} err = c.client.Patch(pt). Namespace(c.ns). Resource("leases"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go b/staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go index c3ebc52d0ac..53fc60bafff 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go @@ -81,7 +81,7 @@ func (c *FakeLeases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inte } // Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Create(ctx context.Context, lease *v1beta1.Lease) (result *v1beta1.Lease, err error) { +func (c *FakeLeases) Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (result *v1beta1.Lease, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(leasesResource, c.ns, lease), &v1beta1.Lease{}) @@ -92,7 +92,7 @@ func (c *FakeLeases) Create(ctx context.Context, lease *v1beta1.Lease) (result * } // Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Update(ctx context.Context, lease *v1beta1.Lease) (result *v1beta1.Lease, err error) { +func (c *FakeLeases) Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (result *v1beta1.Lease, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(leasesResource, c.ns, lease), &v1beta1.Lease{}) @@ -119,7 +119,7 @@ func (c *FakeLeases) DeleteCollection(ctx context.Context, options *v1.DeleteOpt } // Patch applies the patch and returns the patched lease. -func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Lease, err error) { +func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, pt, data, subresources...), &v1beta1.Lease{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go index d8db71e3e97..ca65b57fa73 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go @@ -38,14 +38,14 @@ type LeasesGetter interface { // LeaseInterface has methods to work with Lease resources. type LeaseInterface interface { - Create(context.Context, *v1beta1.Lease) (*v1beta1.Lease, error) - Update(context.Context, *v1beta1.Lease) (*v1beta1.Lease, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.Lease, error) + Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (*v1beta1.Lease, error) + Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (*v1beta1.Lease, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Lease, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.LeaseList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Lease, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) LeaseExpansion } @@ -109,11 +109,12 @@ func (c *leases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interfac } // Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Create(ctx context.Context, lease *v1beta1.Lease) (result *v1beta1.Lease, err error) { +func (c *leases) Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (result *v1beta1.Lease, err error) { result = &v1beta1.Lease{} err = c.client.Post(). Namespace(c.ns). Resource("leases"). + VersionedParams(&opts, scheme.ParameterCodec). Body(lease). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *leases) Create(ctx context.Context, lease *v1beta1.Lease) (result *v1be } // Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Update(ctx context.Context, lease *v1beta1.Lease) (result *v1beta1.Lease, err error) { +func (c *leases) Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (result *v1beta1.Lease, err error) { result = &v1beta1.Lease{} err = c.client.Put(). Namespace(c.ns). Resource("leases"). Name(lease.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(lease). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *leases) DeleteCollection(ctx context.Context, options *v1.DeleteOptions } // Patch applies the patch and returns the patched lease. -func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Lease, err error) { +func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) { result = &v1beta1.Lease{} err = c.client.Patch(pt). Namespace(c.ns). Resource("leases"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go index 54ddedee249..230ea29c5f5 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go @@ -38,14 +38,14 @@ type ComponentStatusesGetter interface { // ComponentStatusInterface has methods to work with ComponentStatus resources. type ComponentStatusInterface interface { - Create(context.Context, *v1.ComponentStatus) (*v1.ComponentStatus, error) - Update(context.Context, *v1.ComponentStatus) (*v1.ComponentStatus, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ComponentStatus, error) + Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (*v1.ComponentStatus, error) + Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (*v1.ComponentStatus, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ComponentStatus, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.ComponentStatusList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) ComponentStatusExpansion } @@ -104,10 +104,11 @@ func (c *componentStatuses) Watch(ctx context.Context, opts metav1.ListOptions) } // Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Create(ctx context.Context, componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { +func (c *componentStatuses) Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (result *v1.ComponentStatus, err error) { result = &v1.ComponentStatus{} err = c.client.Post(). Resource("componentstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(componentStatus). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *componentStatuses) Create(ctx context.Context, componentStatus *v1.Comp } // Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Update(ctx context.Context, componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { +func (c *componentStatuses) Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (result *v1.ComponentStatus, err error) { result = &v1.ComponentStatus{} err = c.client.Put(). Resource("componentstatuses"). Name(componentStatus.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(componentStatus). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *componentStatuses) DeleteCollection(ctx context.Context, options *metav } // Patch applies the patch and returns the patched componentStatus. -func (c *componentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) { +func (c *componentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) { result = &v1.ComponentStatus{} err = c.client.Patch(pt). Resource("componentstatuses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go index dc5947cb053..c4f70908135 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go @@ -38,14 +38,14 @@ type ConfigMapsGetter interface { // ConfigMapInterface has methods to work with ConfigMap resources. type ConfigMapInterface interface { - Create(context.Context, *v1.ConfigMap) (*v1.ConfigMap, error) - Update(context.Context, *v1.ConfigMap) (*v1.ConfigMap, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ConfigMap, error) + Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (*v1.ConfigMap, error) + Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (*v1.ConfigMap, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ConfigMap, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.ConfigMapList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) ConfigMapExpansion } @@ -109,11 +109,12 @@ func (c *configMaps) Watch(ctx context.Context, opts metav1.ListOptions) (watch. } // Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Create(ctx context.Context, configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { +func (c *configMaps) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (result *v1.ConfigMap, err error) { result = &v1.ConfigMap{} err = c.client.Post(). Namespace(c.ns). Resource("configmaps"). + VersionedParams(&opts, scheme.ParameterCodec). Body(configMap). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *configMaps) Create(ctx context.Context, configMap *v1.ConfigMap) (resul } // Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Update(ctx context.Context, configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { +func (c *configMaps) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (result *v1.ConfigMap, err error) { result = &v1.ConfigMap{} err = c.client.Put(). Namespace(c.ns). Resource("configmaps"). Name(configMap.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(configMap). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *configMaps) DeleteCollection(ctx context.Context, options *metav1.Delet } // Patch applies the patch and returns the patched configMap. -func (c *configMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) { +func (c *configMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) { result = &v1.ConfigMap{} err = c.client.Patch(pt). Namespace(c.ns). Resource("configmaps"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go index 325489d23ab..925b9e94cb6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go @@ -38,14 +38,14 @@ type EndpointsGetter interface { // EndpointsInterface has methods to work with Endpoints resources. type EndpointsInterface interface { - Create(context.Context, *v1.Endpoints) (*v1.Endpoints, error) - Update(context.Context, *v1.Endpoints) (*v1.Endpoints, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Endpoints, error) + Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (*v1.Endpoints, error) + Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (*v1.Endpoints, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Endpoints, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.EndpointsList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) EndpointsExpansion } @@ -109,11 +109,12 @@ func (c *endpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.I } // Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Create(ctx context.Context, endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { +func (c *endpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) { result = &v1.Endpoints{} err = c.client.Post(). Namespace(c.ns). Resource("endpoints"). + VersionedParams(&opts, scheme.ParameterCodec). Body(endpoints). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *endpoints) Create(ctx context.Context, endpoints *v1.Endpoints) (result } // Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Update(ctx context.Context, endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { +func (c *endpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) { result = &v1.Endpoints{} err = c.client.Put(). Namespace(c.ns). Resource("endpoints"). Name(endpoints.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(endpoints). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *endpoints) DeleteCollection(ctx context.Context, options *metav1.Delete } // Patch applies the patch and returns the patched endpoints. -func (c *endpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) { +func (c *endpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) { result = &v1.Endpoints{} err = c.client.Patch(pt). Namespace(c.ns). Resource("endpoints"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/event.go index a2aa7236bc8..1073effcc6d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/event.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/event.go @@ -38,14 +38,14 @@ type EventsGetter interface { // EventInterface has methods to work with Event resources. type EventInterface interface { - Create(context.Context, *v1.Event) (*v1.Event, error) - Update(context.Context, *v1.Event) (*v1.Event, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Event, error) + Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (*v1.Event, error) + Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (*v1.Event, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Event, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.EventList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) EventExpansion } @@ -109,11 +109,12 @@ func (c *events) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Inte } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(ctx context.Context, event *v1.Event) (result *v1.Event, err error) { +func (c *events) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { result = &v1.Event{} err = c.client.Post(). Namespace(c.ns). Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). Body(event). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *events) Create(ctx context.Context, event *v1.Event) (result *v1.Event, } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(ctx context.Context, event *v1.Event) (result *v1.Event, err error) { +func (c *events) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { result = &v1.Event{} err = c.client.Put(). Namespace(c.ns). Resource("events"). Name(event.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(event). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *events) DeleteCollection(ctx context.Context, options *metav1.DeleteOpt } // Patch applies the patch and returns the patched event. -func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) { +func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { result = &v1.Event{} err = c.client.Patch(pt). Namespace(c.ns). Resource("events"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go index 87a9dffe904..967b9f7c6a2 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go @@ -77,7 +77,7 @@ func (c *FakeComponentStatuses) Watch(ctx context.Context, opts v1.ListOptions) } // Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *FakeComponentStatuses) Create(ctx context.Context, componentStatus *corev1.ComponentStatus) (result *corev1.ComponentStatus, err error) { +func (c *FakeComponentStatuses) Create(ctx context.Context, componentStatus *corev1.ComponentStatus, opts v1.CreateOptions) (result *corev1.ComponentStatus, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(componentstatusesResource, componentStatus), &corev1.ComponentStatus{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeComponentStatuses) Create(ctx context.Context, componentStatus *cor } // Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *FakeComponentStatuses) Update(ctx context.Context, componentStatus *corev1.ComponentStatus) (result *corev1.ComponentStatus, err error) { +func (c *FakeComponentStatuses) Update(ctx context.Context, componentStatus *corev1.ComponentStatus, opts v1.UpdateOptions) (result *corev1.ComponentStatus, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(componentstatusesResource, componentStatus), &corev1.ComponentStatus{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeComponentStatuses) DeleteCollection(ctx context.Context, options *v } // Patch applies the patch and returns the patched componentStatus. -func (c *FakeComponentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ComponentStatus, err error) { +func (c *FakeComponentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.ComponentStatus, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, name, pt, data, subresources...), &corev1.ComponentStatus{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go index e5cf689f9f5..299306064d8 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go @@ -81,7 +81,7 @@ func (c *FakeConfigMaps) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *FakeConfigMaps) Create(ctx context.Context, configMap *corev1.ConfigMap) (result *corev1.ConfigMap, err error) { +func (c *FakeConfigMaps) Create(ctx context.Context, configMap *corev1.ConfigMap, opts v1.CreateOptions) (result *corev1.ConfigMap, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(configmapsResource, c.ns, configMap), &corev1.ConfigMap{}) @@ -92,7 +92,7 @@ func (c *FakeConfigMaps) Create(ctx context.Context, configMap *corev1.ConfigMap } // Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *FakeConfigMaps) Update(ctx context.Context, configMap *corev1.ConfigMap) (result *corev1.ConfigMap, err error) { +func (c *FakeConfigMaps) Update(ctx context.Context, configMap *corev1.ConfigMap, opts v1.UpdateOptions) (result *corev1.ConfigMap, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(configmapsResource, c.ns, configMap), &corev1.ConfigMap{}) @@ -119,7 +119,7 @@ func (c *FakeConfigMaps) DeleteCollection(ctx context.Context, options *v1.Delet } // Patch applies the patch and returns the patched configMap. -func (c *FakeConfigMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ConfigMap, err error) { +func (c *FakeConfigMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.ConfigMap, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, name, pt, data, subresources...), &corev1.ConfigMap{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go index 2a7888afd9e..4c898728b86 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go @@ -81,7 +81,7 @@ func (c *FakeEndpoints) Watch(ctx context.Context, opts v1.ListOptions) (watch.I } // Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *FakeEndpoints) Create(ctx context.Context, endpoints *corev1.Endpoints) (result *corev1.Endpoints, err error) { +func (c *FakeEndpoints) Create(ctx context.Context, endpoints *corev1.Endpoints, opts v1.CreateOptions) (result *corev1.Endpoints, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(endpointsResource, c.ns, endpoints), &corev1.Endpoints{}) @@ -92,7 +92,7 @@ func (c *FakeEndpoints) Create(ctx context.Context, endpoints *corev1.Endpoints) } // Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *FakeEndpoints) Update(ctx context.Context, endpoints *corev1.Endpoints) (result *corev1.Endpoints, err error) { +func (c *FakeEndpoints) Update(ctx context.Context, endpoints *corev1.Endpoints, opts v1.UpdateOptions) (result *corev1.Endpoints, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(endpointsResource, c.ns, endpoints), &corev1.Endpoints{}) @@ -119,7 +119,7 @@ func (c *FakeEndpoints) DeleteCollection(ctx context.Context, options *v1.Delete } // Patch applies the patch and returns the patched endpoints. -func (c *FakeEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Endpoints, err error) { +func (c *FakeEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Endpoints, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, pt, data, subresources...), &corev1.Endpoints{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go index e5d1d5e2086..ca531f5c889 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go @@ -81,7 +81,7 @@ func (c *FakeEvents) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inte } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Create(ctx context.Context, event *corev1.Event) (result *corev1.Event, err error) { +func (c *FakeEvents) Create(ctx context.Context, event *corev1.Event, opts v1.CreateOptions) (result *corev1.Event, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &corev1.Event{}) @@ -92,7 +92,7 @@ func (c *FakeEvents) Create(ctx context.Context, event *corev1.Event) (result *c } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Update(ctx context.Context, event *corev1.Event) (result *corev1.Event, err error) { +func (c *FakeEvents) Update(ctx context.Context, event *corev1.Event, opts v1.UpdateOptions) (result *corev1.Event, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &corev1.Event{}) @@ -119,7 +119,7 @@ func (c *FakeEvents) DeleteCollection(ctx context.Context, options *v1.DeleteOpt } // Patch applies the patch and returns the patched event. -func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Event, err error) { +func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Event, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &corev1.Event{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go index 53e3b79ea64..52e473b1eb7 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go @@ -81,7 +81,7 @@ func (c *FakeLimitRanges) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *FakeLimitRanges) Create(ctx context.Context, limitRange *corev1.LimitRange) (result *corev1.LimitRange, err error) { +func (c *FakeLimitRanges) Create(ctx context.Context, limitRange *corev1.LimitRange, opts v1.CreateOptions) (result *corev1.LimitRange, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(limitrangesResource, c.ns, limitRange), &corev1.LimitRange{}) @@ -92,7 +92,7 @@ func (c *FakeLimitRanges) Create(ctx context.Context, limitRange *corev1.LimitRa } // Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *FakeLimitRanges) Update(ctx context.Context, limitRange *corev1.LimitRange) (result *corev1.LimitRange, err error) { +func (c *FakeLimitRanges) Update(ctx context.Context, limitRange *corev1.LimitRange, opts v1.UpdateOptions) (result *corev1.LimitRange, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(limitrangesResource, c.ns, limitRange), &corev1.LimitRange{}) @@ -119,7 +119,7 @@ func (c *FakeLimitRanges) DeleteCollection(ctx context.Context, options *v1.Dele } // Patch applies the patch and returns the patched limitRange. -func (c *FakeLimitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.LimitRange, err error) { +func (c *FakeLimitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.LimitRange, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, name, pt, data, subresources...), &corev1.LimitRange{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go index b4ee29d29b1..12b68516870 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go @@ -77,7 +77,7 @@ func (c *FakeNamespaces) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *FakeNamespaces) Create(ctx context.Context, namespace *corev1.Namespace) (result *corev1.Namespace, err error) { +func (c *FakeNamespaces) Create(ctx context.Context, namespace *corev1.Namespace, opts v1.CreateOptions) (result *corev1.Namespace, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(namespacesResource, namespace), &corev1.Namespace{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeNamespaces) Create(ctx context.Context, namespace *corev1.Namespace } // Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *FakeNamespaces) Update(ctx context.Context, namespace *corev1.Namespace) (result *corev1.Namespace, err error) { +func (c *FakeNamespaces) Update(ctx context.Context, namespace *corev1.Namespace, opts v1.UpdateOptions) (result *corev1.Namespace, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(namespacesResource, namespace), &corev1.Namespace{}) if obj == nil { @@ -98,7 +98,7 @@ func (c *FakeNamespaces) Update(ctx context.Context, namespace *corev1.Namespace // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNamespaces) UpdateStatus(ctx context.Context, namespace *corev1.Namespace) (*corev1.Namespace, error) { +func (c *FakeNamespaces) UpdateStatus(ctx context.Context, namespace *corev1.Namespace, opts v1.UpdateOptions) (*corev1.Namespace, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(namespacesResource, "status", namespace), &corev1.Namespace{}) if obj == nil { @@ -115,7 +115,7 @@ func (c *FakeNamespaces) Delete(ctx context.Context, name string, options *v1.De } // Patch applies the patch and returns the patched namespace. -func (c *FakeNamespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Namespace, err error) { +func (c *FakeNamespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Namespace, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, pt, data, subresources...), &corev1.Namespace{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go index 524c5766bd1..0d9f566b12a 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go @@ -77,7 +77,7 @@ func (c *FakeNodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inter } // Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. -func (c *FakeNodes) Create(ctx context.Context, node *corev1.Node) (result *corev1.Node, err error) { +func (c *FakeNodes) Create(ctx context.Context, node *corev1.Node, opts v1.CreateOptions) (result *corev1.Node, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(nodesResource, node), &corev1.Node{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeNodes) Create(ctx context.Context, node *corev1.Node) (result *core } // Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. -func (c *FakeNodes) Update(ctx context.Context, node *corev1.Node) (result *corev1.Node, err error) { +func (c *FakeNodes) Update(ctx context.Context, node *corev1.Node, opts v1.UpdateOptions) (result *corev1.Node, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(nodesResource, node), &corev1.Node{}) if obj == nil { @@ -98,7 +98,7 @@ func (c *FakeNodes) Update(ctx context.Context, node *corev1.Node) (result *core // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNodes) UpdateStatus(ctx context.Context, node *corev1.Node) (*corev1.Node, error) { +func (c *FakeNodes) UpdateStatus(ctx context.Context, node *corev1.Node, opts v1.UpdateOptions) (*corev1.Node, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(nodesResource, "status", node), &corev1.Node{}) if obj == nil { @@ -123,7 +123,7 @@ func (c *FakeNodes) DeleteCollection(ctx context.Context, options *v1.DeleteOpti } // Patch applies the patch and returns the patched node. -func (c *FakeNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Node, err error) { +func (c *FakeNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Node, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, pt, data, subresources...), &corev1.Node{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go index fb138b63bdf..f5c9f158345 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go @@ -77,7 +77,7 @@ func (c *FakePersistentVolumes) Watch(ctx context.Context, opts v1.ListOptions) } // Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *FakePersistentVolumes) Create(ctx context.Context, persistentVolume *corev1.PersistentVolume) (result *corev1.PersistentVolume, err error) { +func (c *FakePersistentVolumes) Create(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts v1.CreateOptions) (result *corev1.PersistentVolume, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(persistentvolumesResource, persistentVolume), &corev1.PersistentVolume{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakePersistentVolumes) Create(ctx context.Context, persistentVolume *co } // Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *FakePersistentVolumes) Update(ctx context.Context, persistentVolume *corev1.PersistentVolume) (result *corev1.PersistentVolume, err error) { +func (c *FakePersistentVolumes) Update(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts v1.UpdateOptions) (result *corev1.PersistentVolume, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(persistentvolumesResource, persistentVolume), &corev1.PersistentVolume{}) if obj == nil { @@ -98,7 +98,7 @@ func (c *FakePersistentVolumes) Update(ctx context.Context, persistentVolume *co // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePersistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *corev1.PersistentVolume) (*corev1.PersistentVolume, error) { +func (c *FakePersistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts v1.UpdateOptions) (*corev1.PersistentVolume, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(persistentvolumesResource, "status", persistentVolume), &corev1.PersistentVolume{}) if obj == nil { @@ -123,7 +123,7 @@ func (c *FakePersistentVolumes) DeleteCollection(ctx context.Context, options *v } // Patch applies the patch and returns the patched persistentVolume. -func (c *FakePersistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.PersistentVolume, err error) { +func (c *FakePersistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.PersistentVolume, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, name, pt, data, subresources...), &corev1.PersistentVolume{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go index 22050361b5c..de0dd1c1a5d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go @@ -81,7 +81,7 @@ func (c *FakePersistentVolumeClaims) Watch(ctx context.Context, opts v1.ListOpti } // Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *FakePersistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim) (result *corev1.PersistentVolumeClaim, err error) { +func (c *FakePersistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts v1.CreateOptions) (result *corev1.PersistentVolumeClaim, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &corev1.PersistentVolumeClaim{}) @@ -92,7 +92,7 @@ func (c *FakePersistentVolumeClaims) Create(ctx context.Context, persistentVolum } // Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *FakePersistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim) (result *corev1.PersistentVolumeClaim, err error) { +func (c *FakePersistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts v1.UpdateOptions) (result *corev1.PersistentVolumeClaim, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &corev1.PersistentVolumeClaim{}) @@ -104,7 +104,7 @@ func (c *FakePersistentVolumeClaims) Update(ctx context.Context, persistentVolum // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePersistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim) (*corev1.PersistentVolumeClaim, error) { +func (c *FakePersistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts v1.UpdateOptions) (*corev1.PersistentVolumeClaim, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim), &corev1.PersistentVolumeClaim{}) @@ -131,7 +131,7 @@ func (c *FakePersistentVolumeClaims) DeleteCollection(ctx context.Context, optio } // Patch applies the patch and returns the patched persistentVolumeClaim. -func (c *FakePersistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.PersistentVolumeClaim, err error) { +func (c *FakePersistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.PersistentVolumeClaim, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, name, pt, data, subresources...), &corev1.PersistentVolumeClaim{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go index 6a737e477a1..e408fd0af5c 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go @@ -81,7 +81,7 @@ func (c *FakePods) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interf } // Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *FakePods) Create(ctx context.Context, pod *corev1.Pod) (result *corev1.Pod, err error) { +func (c *FakePods) Create(ctx context.Context, pod *corev1.Pod, opts v1.CreateOptions) (result *corev1.Pod, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(podsResource, c.ns, pod), &corev1.Pod{}) @@ -92,7 +92,7 @@ func (c *FakePods) Create(ctx context.Context, pod *corev1.Pod) (result *corev1. } // Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *FakePods) Update(ctx context.Context, pod *corev1.Pod) (result *corev1.Pod, err error) { +func (c *FakePods) Update(ctx context.Context, pod *corev1.Pod, opts v1.UpdateOptions) (result *corev1.Pod, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(podsResource, c.ns, pod), &corev1.Pod{}) @@ -104,7 +104,7 @@ func (c *FakePods) Update(ctx context.Context, pod *corev1.Pod) (result *corev1. // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePods) UpdateStatus(ctx context.Context, pod *corev1.Pod) (*corev1.Pod, error) { +func (c *FakePods) UpdateStatus(ctx context.Context, pod *corev1.Pod, opts v1.UpdateOptions) (*corev1.Pod, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &corev1.Pod{}) @@ -131,7 +131,7 @@ func (c *FakePods) DeleteCollection(ctx context.Context, options *v1.DeleteOptio } // Patch applies the patch and returns the patched pod. -func (c *FakePods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Pod, err error) { +func (c *FakePods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Pod, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, pt, data, subresources...), &corev1.Pod{}) @@ -153,7 +153,7 @@ func (c *FakePods) GetEphemeralContainers(ctx context.Context, podName string, o } // UpdateEphemeralContainers takes the representation of a ephemeralContainers and updates it. Returns the server's representation of the ephemeralContainers, and an error, if there is any. -func (c *FakePods) UpdateEphemeralContainers(ctx context.Context, podName string, ephemeralContainers *corev1.EphemeralContainers) (result *corev1.EphemeralContainers, err error) { +func (c *FakePods) UpdateEphemeralContainers(ctx context.Context, podName string, ephemeralContainers *corev1.EphemeralContainers, opts v1.UpdateOptions) (result *corev1.EphemeralContainers, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(podsResource, "ephemeralcontainers", c.ns, ephemeralContainers), &corev1.EphemeralContainers{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go index 57e45b75494..905cafeae4b 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go @@ -81,7 +81,7 @@ func (c *FakePodTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watc } // Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *FakePodTemplates) Create(ctx context.Context, podTemplate *corev1.PodTemplate) (result *corev1.PodTemplate, err error) { +func (c *FakePodTemplates) Create(ctx context.Context, podTemplate *corev1.PodTemplate, opts v1.CreateOptions) (result *corev1.PodTemplate, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(podtemplatesResource, c.ns, podTemplate), &corev1.PodTemplate{}) @@ -92,7 +92,7 @@ func (c *FakePodTemplates) Create(ctx context.Context, podTemplate *corev1.PodTe } // Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *FakePodTemplates) Update(ctx context.Context, podTemplate *corev1.PodTemplate) (result *corev1.PodTemplate, err error) { +func (c *FakePodTemplates) Update(ctx context.Context, podTemplate *corev1.PodTemplate, opts v1.UpdateOptions) (result *corev1.PodTemplate, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(podtemplatesResource, c.ns, podTemplate), &corev1.PodTemplate{}) @@ -119,7 +119,7 @@ func (c *FakePodTemplates) DeleteCollection(ctx context.Context, options *v1.Del } // Patch applies the patch and returns the patched podTemplate. -func (c *FakePodTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.PodTemplate, err error) { +func (c *FakePodTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.PodTemplate, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, name, pt, data, subresources...), &corev1.PodTemplate{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go index 0a6c8b6f6fe..12d5d9423a8 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go @@ -82,7 +82,7 @@ func (c *FakeReplicationControllers) Watch(ctx context.Context, opts v1.ListOpti } // Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *FakeReplicationControllers) Create(ctx context.Context, replicationController *corev1.ReplicationController) (result *corev1.ReplicationController, err error) { +func (c *FakeReplicationControllers) Create(ctx context.Context, replicationController *corev1.ReplicationController, opts v1.CreateOptions) (result *corev1.ReplicationController, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(replicationcontrollersResource, c.ns, replicationController), &corev1.ReplicationController{}) @@ -93,7 +93,7 @@ func (c *FakeReplicationControllers) Create(ctx context.Context, replicationCont } // Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *FakeReplicationControllers) Update(ctx context.Context, replicationController *corev1.ReplicationController) (result *corev1.ReplicationController, err error) { +func (c *FakeReplicationControllers) Update(ctx context.Context, replicationController *corev1.ReplicationController, opts v1.UpdateOptions) (result *corev1.ReplicationController, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(replicationcontrollersResource, c.ns, replicationController), &corev1.ReplicationController{}) @@ -105,7 +105,7 @@ func (c *FakeReplicationControllers) Update(ctx context.Context, replicationCont // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicationControllers) UpdateStatus(ctx context.Context, replicationController *corev1.ReplicationController) (*corev1.ReplicationController, error) { +func (c *FakeReplicationControllers) UpdateStatus(ctx context.Context, replicationController *corev1.ReplicationController, opts v1.UpdateOptions) (*corev1.ReplicationController, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "status", c.ns, replicationController), &corev1.ReplicationController{}) @@ -132,7 +132,7 @@ func (c *FakeReplicationControllers) DeleteCollection(ctx context.Context, optio } // Patch applies the patch and returns the patched replicationController. -func (c *FakeReplicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ReplicationController, err error) { +func (c *FakeReplicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.ReplicationController, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, name, pt, data, subresources...), &corev1.ReplicationController{}) @@ -154,7 +154,7 @@ func (c *FakeReplicationControllers) GetScale(ctx context.Context, replicationCo } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeReplicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { +func (c *FakeReplicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts v1.UpdateOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go index 83ff3258961..c867270c0b5 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go @@ -81,7 +81,7 @@ func (c *FakeResourceQuotas) Watch(ctx context.Context, opts v1.ListOptions) (wa } // Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *FakeResourceQuotas) Create(ctx context.Context, resourceQuota *corev1.ResourceQuota) (result *corev1.ResourceQuota, err error) { +func (c *FakeResourceQuotas) Create(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts v1.CreateOptions) (result *corev1.ResourceQuota, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(resourcequotasResource, c.ns, resourceQuota), &corev1.ResourceQuota{}) @@ -92,7 +92,7 @@ func (c *FakeResourceQuotas) Create(ctx context.Context, resourceQuota *corev1.R } // Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *FakeResourceQuotas) Update(ctx context.Context, resourceQuota *corev1.ResourceQuota) (result *corev1.ResourceQuota, err error) { +func (c *FakeResourceQuotas) Update(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts v1.UpdateOptions) (result *corev1.ResourceQuota, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(resourcequotasResource, c.ns, resourceQuota), &corev1.ResourceQuota{}) @@ -104,7 +104,7 @@ func (c *FakeResourceQuotas) Update(ctx context.Context, resourceQuota *corev1.R // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeResourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *corev1.ResourceQuota) (*corev1.ResourceQuota, error) { +func (c *FakeResourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts v1.UpdateOptions) (*corev1.ResourceQuota, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(resourcequotasResource, "status", c.ns, resourceQuota), &corev1.ResourceQuota{}) @@ -131,7 +131,7 @@ func (c *FakeResourceQuotas) DeleteCollection(ctx context.Context, options *v1.D } // Patch applies the patch and returns the patched resourceQuota. -func (c *FakeResourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ResourceQuota, err error) { +func (c *FakeResourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.ResourceQuota, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, name, pt, data, subresources...), &corev1.ResourceQuota{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go index b45c05b4a0a..38703f14682 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go @@ -81,7 +81,7 @@ func (c *FakeSecrets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Int } // Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *FakeSecrets) Create(ctx context.Context, secret *corev1.Secret) (result *corev1.Secret, err error) { +func (c *FakeSecrets) Create(ctx context.Context, secret *corev1.Secret, opts v1.CreateOptions) (result *corev1.Secret, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(secretsResource, c.ns, secret), &corev1.Secret{}) @@ -92,7 +92,7 @@ func (c *FakeSecrets) Create(ctx context.Context, secret *corev1.Secret) (result } // Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *FakeSecrets) Update(ctx context.Context, secret *corev1.Secret) (result *corev1.Secret, err error) { +func (c *FakeSecrets) Update(ctx context.Context, secret *corev1.Secret, opts v1.UpdateOptions) (result *corev1.Secret, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(secretsResource, c.ns, secret), &corev1.Secret{}) @@ -119,7 +119,7 @@ func (c *FakeSecrets) DeleteCollection(ctx context.Context, options *v1.DeleteOp } // Patch applies the patch and returns the patched secret. -func (c *FakeSecrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Secret, err error) { +func (c *FakeSecrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Secret, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, pt, data, subresources...), &corev1.Secret{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go index 138626e2d6b..dd47bee64e0 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go @@ -81,7 +81,7 @@ func (c *FakeServices) Watch(ctx context.Context, opts v1.ListOptions) (watch.In } // Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *FakeServices) Create(ctx context.Context, service *corev1.Service) (result *corev1.Service, err error) { +func (c *FakeServices) Create(ctx context.Context, service *corev1.Service, opts v1.CreateOptions) (result *corev1.Service, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(servicesResource, c.ns, service), &corev1.Service{}) @@ -92,7 +92,7 @@ func (c *FakeServices) Create(ctx context.Context, service *corev1.Service) (res } // Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *FakeServices) Update(ctx context.Context, service *corev1.Service) (result *corev1.Service, err error) { +func (c *FakeServices) Update(ctx context.Context, service *corev1.Service, opts v1.UpdateOptions) (result *corev1.Service, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(servicesResource, c.ns, service), &corev1.Service{}) @@ -104,7 +104,7 @@ func (c *FakeServices) Update(ctx context.Context, service *corev1.Service) (res // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeServices) UpdateStatus(ctx context.Context, service *corev1.Service) (*corev1.Service, error) { +func (c *FakeServices) UpdateStatus(ctx context.Context, service *corev1.Service, opts v1.UpdateOptions) (*corev1.Service, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &corev1.Service{}) @@ -123,7 +123,7 @@ func (c *FakeServices) Delete(ctx context.Context, name string, options *v1.Dele } // Patch applies the patch and returns the patched service. -func (c *FakeServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Service, err error) { +func (c *FakeServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Service, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, pt, data, subresources...), &corev1.Service{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go index cc48d6148f0..78556d147d0 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go @@ -82,7 +82,7 @@ func (c *FakeServiceAccounts) Watch(ctx context.Context, opts v1.ListOptions) (w } // Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *FakeServiceAccounts) Create(ctx context.Context, serviceAccount *corev1.ServiceAccount) (result *corev1.ServiceAccount, err error) { +func (c *FakeServiceAccounts) Create(ctx context.Context, serviceAccount *corev1.ServiceAccount, opts v1.CreateOptions) (result *corev1.ServiceAccount, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(serviceaccountsResource, c.ns, serviceAccount), &corev1.ServiceAccount{}) @@ -93,7 +93,7 @@ func (c *FakeServiceAccounts) Create(ctx context.Context, serviceAccount *corev1 } // Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *FakeServiceAccounts) Update(ctx context.Context, serviceAccount *corev1.ServiceAccount) (result *corev1.ServiceAccount, err error) { +func (c *FakeServiceAccounts) Update(ctx context.Context, serviceAccount *corev1.ServiceAccount, opts v1.UpdateOptions) (result *corev1.ServiceAccount, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(serviceaccountsResource, c.ns, serviceAccount), &corev1.ServiceAccount{}) @@ -120,7 +120,7 @@ func (c *FakeServiceAccounts) DeleteCollection(ctx context.Context, options *v1. } // Patch applies the patch and returns the patched serviceAccount. -func (c *FakeServiceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ServiceAccount, err error) { +func (c *FakeServiceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.ServiceAccount, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, name, pt, data, subresources...), &corev1.ServiceAccount{}) @@ -131,7 +131,7 @@ func (c *FakeServiceAccounts) Patch(ctx context.Context, name string, pt types.P } // CreateToken takes the representation of a tokenRequest and creates it. Returns the server's representation of the tokenRequest, and an error, if there is any. -func (c *FakeServiceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest) (result *authenticationv1.TokenRequest, err error) { +func (c *FakeServiceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts v1.CreateOptions) (result *authenticationv1.TokenRequest, err error) { obj, err := c.Fake. Invokes(testing.NewCreateSubresourceAction(serviceaccountsResource, serviceAccountName, "token", c.ns, tokenRequest), &authenticationv1.TokenRequest{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go index f4832928352..c6ecda1f0d3 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go @@ -38,14 +38,14 @@ type LimitRangesGetter interface { // LimitRangeInterface has methods to work with LimitRange resources. type LimitRangeInterface interface { - Create(context.Context, *v1.LimitRange) (*v1.LimitRange, error) - Update(context.Context, *v1.LimitRange) (*v1.LimitRange, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.LimitRange, error) + Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (*v1.LimitRange, error) + Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (*v1.LimitRange, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.LimitRange, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.LimitRangeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) LimitRangeExpansion } @@ -109,11 +109,12 @@ func (c *limitRanges) Watch(ctx context.Context, opts metav1.ListOptions) (watch } // Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Create(ctx context.Context, limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { +func (c *limitRanges) Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (result *v1.LimitRange, err error) { result = &v1.LimitRange{} err = c.client.Post(). Namespace(c.ns). Resource("limitranges"). + VersionedParams(&opts, scheme.ParameterCodec). Body(limitRange). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *limitRanges) Create(ctx context.Context, limitRange *v1.LimitRange) (re } // Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Update(ctx context.Context, limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { +func (c *limitRanges) Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (result *v1.LimitRange, err error) { result = &v1.LimitRange{} err = c.client.Put(). Namespace(c.ns). Resource("limitranges"). Name(limitRange.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(limitRange). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *limitRanges) DeleteCollection(ctx context.Context, options *metav1.Dele } // Patch applies the patch and returns the patched limitRange. -func (c *limitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) { +func (c *limitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) { result = &v1.LimitRange{} err = c.client.Patch(pt). Namespace(c.ns). Resource("limitranges"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go index b6dac34b073..afd4237bfc8 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go @@ -38,14 +38,14 @@ type NamespacesGetter interface { // NamespaceInterface has methods to work with Namespace resources. type NamespaceInterface interface { - Create(context.Context, *v1.Namespace) (*v1.Namespace, error) - Update(context.Context, *v1.Namespace) (*v1.Namespace, error) - UpdateStatus(context.Context, *v1.Namespace) (*v1.Namespace, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Namespace, error) + Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (*v1.Namespace, error) + Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) + UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Namespace, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.NamespaceList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) NamespaceExpansion } @@ -104,10 +104,11 @@ func (c *namespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch. } // Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Create(ctx context.Context, namespace *v1.Namespace) (result *v1.Namespace, err error) { +func (c *namespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) { result = &v1.Namespace{} err = c.client.Post(). Resource("namespaces"). + VersionedParams(&opts, scheme.ParameterCodec). Body(namespace). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *namespaces) Create(ctx context.Context, namespace *v1.Namespace) (resul } // Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Update(ctx context.Context, namespace *v1.Namespace) (result *v1.Namespace, err error) { +func (c *namespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { result = &v1.Namespace{} err = c.client.Put(). Resource("namespaces"). Name(namespace.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(namespace). Do(ctx). Into(result) @@ -128,13 +130,13 @@ func (c *namespaces) Update(ctx context.Context, namespace *v1.Namespace) (resul // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *namespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace) (result *v1.Namespace, err error) { +func (c *namespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { result = &v1.Namespace{} err = c.client.Put(). Resource("namespaces"). Name(namespace.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(namespace). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *namespaces) Delete(ctx context.Context, name string, options *metav1.De } // Patch applies the patch and returns the patched namespace. -func (c *namespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) { +func (c *namespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) { result = &v1.Namespace{} err = c.client.Patch(pt). Resource("namespaces"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/node.go index 44dadcbd048..f90657fd180 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/node.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/node.go @@ -38,15 +38,15 @@ type NodesGetter interface { // NodeInterface has methods to work with Node resources. type NodeInterface interface { - Create(context.Context, *v1.Node) (*v1.Node, error) - Update(context.Context, *v1.Node) (*v1.Node, error) - UpdateStatus(context.Context, *v1.Node) (*v1.Node, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Node, error) + Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (*v1.Node, error) + Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) + UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Node, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.NodeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) NodeExpansion } @@ -105,10 +105,11 @@ func (c *nodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Inter } // Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Create(ctx context.Context, node *v1.Node) (result *v1.Node, err error) { +func (c *nodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) { result = &v1.Node{} err = c.client.Post(). Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(node). Do(ctx). Into(result) @@ -116,11 +117,12 @@ func (c *nodes) Create(ctx context.Context, node *v1.Node) (result *v1.Node, err } // Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Update(ctx context.Context, node *v1.Node) (result *v1.Node, err error) { +func (c *nodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { result = &v1.Node{} err = c.client.Put(). Resource("nodes"). Name(node.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(node). Do(ctx). Into(result) @@ -129,13 +131,13 @@ func (c *nodes) Update(ctx context.Context, node *v1.Node) (result *v1.Node, err // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *nodes) UpdateStatus(ctx context.Context, node *v1.Node) (result *v1.Node, err error) { +func (c *nodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { result = &v1.Node{} err = c.client.Put(). Resource("nodes"). Name(node.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(node). Do(ctx). Into(result) @@ -168,12 +170,13 @@ func (c *nodes) DeleteCollection(ctx context.Context, options *metav1.DeleteOpti } // Patch applies the patch and returns the patched node. -func (c *nodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) { +func (c *nodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) { result = &v1.Node{} err = c.client.Patch(pt). Resource("nodes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go index a593a56aeec..9392aa0e744 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go @@ -38,15 +38,15 @@ type PersistentVolumesGetter interface { // PersistentVolumeInterface has methods to work with PersistentVolume resources. type PersistentVolumeInterface interface { - Create(context.Context, *v1.PersistentVolume) (*v1.PersistentVolume, error) - Update(context.Context, *v1.PersistentVolume) (*v1.PersistentVolume, error) - UpdateStatus(context.Context, *v1.PersistentVolume) (*v1.PersistentVolume, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.PersistentVolume, error) + Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (*v1.PersistentVolume, error) + Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) + UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PersistentVolume, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.PersistentVolumeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) PersistentVolumeExpansion } @@ -105,10 +105,11 @@ func (c *persistentVolumes) Watch(ctx context.Context, opts metav1.ListOptions) } // Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Create(ctx context.Context, persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { +func (c *persistentVolumes) Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (result *v1.PersistentVolume, err error) { result = &v1.PersistentVolume{} err = c.client.Post(). Resource("persistentvolumes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(persistentVolume). Do(ctx). Into(result) @@ -116,11 +117,12 @@ func (c *persistentVolumes) Create(ctx context.Context, persistentVolume *v1.Per } // Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Update(ctx context.Context, persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { +func (c *persistentVolumes) Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { result = &v1.PersistentVolume{} err = c.client.Put(). Resource("persistentvolumes"). Name(persistentVolume.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(persistentVolume). Do(ctx). Into(result) @@ -129,13 +131,13 @@ func (c *persistentVolumes) Update(ctx context.Context, persistentVolume *v1.Per // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *persistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { +func (c *persistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { result = &v1.PersistentVolume{} err = c.client.Put(). Resource("persistentvolumes"). Name(persistentVolume.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(persistentVolume). Do(ctx). Into(result) @@ -168,12 +170,13 @@ func (c *persistentVolumes) DeleteCollection(ctx context.Context, options *metav } // Patch applies the patch and returns the patched persistentVolume. -func (c *persistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) { +func (c *persistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) { result = &v1.PersistentVolume{} err = c.client.Patch(pt). Resource("persistentvolumes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go index 7da6de09a6f..f91db21e823 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go @@ -38,15 +38,15 @@ type PersistentVolumeClaimsGetter interface { // PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources. type PersistentVolumeClaimInterface interface { - Create(context.Context, *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) - Update(context.Context, *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) - UpdateStatus(context.Context, *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.PersistentVolumeClaim, error) + Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (*v1.PersistentVolumeClaim, error) + Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) + UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PersistentVolumeClaim, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.PersistentVolumeClaimList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) PersistentVolumeClaimExpansion } @@ -110,11 +110,12 @@ func (c *persistentVolumeClaims) Watch(ctx context.Context, opts metav1.ListOpti } // Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { +func (c *persistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (result *v1.PersistentVolumeClaim, err error) { result = &v1.PersistentVolumeClaim{} err = c.client.Post(). Namespace(c.ns). Resource("persistentvolumeclaims"). + VersionedParams(&opts, scheme.ParameterCodec). Body(persistentVolumeClaim). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *persistentVolumeClaims) Create(ctx context.Context, persistentVolumeCla } // Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { +func (c *persistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { result = &v1.PersistentVolumeClaim{} err = c.client.Put(). Namespace(c.ns). Resource("persistentvolumeclaims"). Name(persistentVolumeClaim.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(persistentVolumeClaim). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *persistentVolumeClaims) Update(ctx context.Context, persistentVolumeCla // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *persistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { +func (c *persistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { result = &v1.PersistentVolumeClaim{} err = c.client.Put(). Namespace(c.ns). Resource("persistentvolumeclaims"). Name(persistentVolumeClaim.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(persistentVolumeClaim). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *persistentVolumeClaims) DeleteCollection(ctx context.Context, options * } // Patch applies the patch and returns the patched persistentVolumeClaim. -func (c *persistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { +func (c *persistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { result = &v1.PersistentVolumeClaim{} err = c.client.Patch(pt). Namespace(c.ns). Resource("persistentvolumeclaims"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index 3a0fbbdb718..cc143a38a30 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -38,17 +38,17 @@ type PodsGetter interface { // PodInterface has methods to work with Pod resources. type PodInterface interface { - Create(context.Context, *v1.Pod) (*v1.Pod, error) - Update(context.Context, *v1.Pod) (*v1.Pod, error) - UpdateStatus(context.Context, *v1.Pod) (*v1.Pod, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Pod, error) + Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (*v1.Pod, error) + Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) + UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Pod, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.PodList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) GetEphemeralContainers(ctx context.Context, podName string, options metav1.GetOptions) (*v1.EphemeralContainers, error) - UpdateEphemeralContainers(ctx context.Context, podName string, ephemeralContainers *v1.EphemeralContainers) (*v1.EphemeralContainers, error) + UpdateEphemeralContainers(ctx context.Context, podName string, ephemeralContainers *v1.EphemeralContainers, opts metav1.UpdateOptions) (*v1.EphemeralContainers, error) PodExpansion } @@ -113,11 +113,12 @@ func (c *pods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interf } // Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Create(ctx context.Context, pod *v1.Pod) (result *v1.Pod, err error) { +func (c *pods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) { result = &v1.Pod{} err = c.client.Post(). Namespace(c.ns). Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). Body(pod). Do(ctx). Into(result) @@ -125,12 +126,13 @@ func (c *pods) Create(ctx context.Context, pod *v1.Pod) (result *v1.Pod, err err } // Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Update(ctx context.Context, pod *v1.Pod) (result *v1.Pod, err error) { +func (c *pods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { result = &v1.Pod{} err = c.client.Put(). Namespace(c.ns). Resource("pods"). Name(pod.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(pod). Do(ctx). Into(result) @@ -139,14 +141,14 @@ func (c *pods) Update(ctx context.Context, pod *v1.Pod) (result *v1.Pod, err err // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *pods) UpdateStatus(ctx context.Context, pod *v1.Pod) (result *v1.Pod, err error) { +func (c *pods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { result = &v1.Pod{} err = c.client.Put(). Namespace(c.ns). Resource("pods"). Name(pod.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(pod). Do(ctx). Into(result) @@ -181,13 +183,14 @@ func (c *pods) DeleteCollection(ctx context.Context, options *metav1.DeleteOptio } // Patch applies the patch and returns the patched pod. -func (c *pods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) { +func (c *pods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) { result = &v1.Pod{} err = c.client.Patch(pt). Namespace(c.ns). Resource("pods"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) @@ -209,13 +212,14 @@ func (c *pods) GetEphemeralContainers(ctx context.Context, podName string, optio } // UpdateEphemeralContainers takes the top resource name and the representation of a ephemeralContainers and updates it. Returns the server's representation of the ephemeralContainers, and an error, if there is any. -func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, ephemeralContainers *v1.EphemeralContainers) (result *v1.EphemeralContainers, err error) { +func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, ephemeralContainers *v1.EphemeralContainers, opts metav1.UpdateOptions) (result *v1.EphemeralContainers, err error) { result = &v1.EphemeralContainers{} err = c.client.Put(). Namespace(c.ns). Resource("pods"). Name(podName). SubResource("ephemeralcontainers"). + VersionedParams(&opts, scheme.ParameterCodec). Body(ephemeralContainers). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go index a1114403e21..22b427232e4 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go @@ -38,14 +38,14 @@ type PodTemplatesGetter interface { // PodTemplateInterface has methods to work with PodTemplate resources. type PodTemplateInterface interface { - Create(context.Context, *v1.PodTemplate) (*v1.PodTemplate, error) - Update(context.Context, *v1.PodTemplate) (*v1.PodTemplate, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.PodTemplate, error) + Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (*v1.PodTemplate, error) + Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (*v1.PodTemplate, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PodTemplate, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.PodTemplateList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) PodTemplateExpansion } @@ -109,11 +109,12 @@ func (c *podTemplates) Watch(ctx context.Context, opts metav1.ListOptions) (watc } // Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Create(ctx context.Context, podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { +func (c *podTemplates) Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (result *v1.PodTemplate, err error) { result = &v1.PodTemplate{} err = c.client.Post(). Namespace(c.ns). Resource("podtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). Body(podTemplate). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *podTemplates) Create(ctx context.Context, podTemplate *v1.PodTemplate) } // Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Update(ctx context.Context, podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { +func (c *podTemplates) Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (result *v1.PodTemplate, err error) { result = &v1.PodTemplate{} err = c.client.Put(). Namespace(c.ns). Resource("podtemplates"). Name(podTemplate.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(podTemplate). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *podTemplates) DeleteCollection(ctx context.Context, options *metav1.Del } // Patch applies the patch and returns the patched podTemplate. -func (c *podTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) { +func (c *podTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) { result = &v1.PodTemplate{} err = c.client.Patch(pt). Namespace(c.ns). Resource("podtemplates"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go index 7b4bbc94429..1ecbdaf775a 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go @@ -39,17 +39,17 @@ type ReplicationControllersGetter interface { // ReplicationControllerInterface has methods to work with ReplicationController resources. type ReplicationControllerInterface interface { - Create(context.Context, *v1.ReplicationController) (*v1.ReplicationController, error) - Update(context.Context, *v1.ReplicationController) (*v1.ReplicationController, error) - UpdateStatus(context.Context, *v1.ReplicationController) (*v1.ReplicationController, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ReplicationController, error) + Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (*v1.ReplicationController, error) + Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) + UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ReplicationController, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.ReplicationControllerList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) - UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) ReplicationControllerExpansion } @@ -114,11 +114,12 @@ func (c *replicationControllers) Watch(ctx context.Context, opts metav1.ListOpti } // Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Create(ctx context.Context, replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { +func (c *replicationControllers) Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (result *v1.ReplicationController, err error) { result = &v1.ReplicationController{} err = c.client.Post(). Namespace(c.ns). Resource("replicationcontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicationController). Do(ctx). Into(result) @@ -126,12 +127,13 @@ func (c *replicationControllers) Create(ctx context.Context, replicationControll } // Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Update(ctx context.Context, replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { +func (c *replicationControllers) Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { result = &v1.ReplicationController{} err = c.client.Put(). Namespace(c.ns). Resource("replicationcontrollers"). Name(replicationController.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicationController). Do(ctx). Into(result) @@ -140,14 +142,14 @@ func (c *replicationControllers) Update(ctx context.Context, replicationControll // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *replicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { +func (c *replicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { result = &v1.ReplicationController{} err = c.client.Put(). Namespace(c.ns). Resource("replicationcontrollers"). Name(replicationController.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicationController). Do(ctx). Into(result) @@ -182,13 +184,14 @@ func (c *replicationControllers) DeleteCollection(ctx context.Context, options * } // Patch applies the patch and returns the patched replicationController. -func (c *replicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) { +func (c *replicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) { result = &v1.ReplicationController{} err = c.client.Patch(pt). Namespace(c.ns). Resource("replicationcontrollers"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) @@ -210,13 +213,14 @@ func (c *replicationControllers) GetScale(ctx context.Context, replicationContro } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { +func (c *replicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("replicationcontrollers"). Name(replicationControllerName). SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). Body(scale). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go index 982a94ad1d8..12dc525eeb3 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go @@ -38,15 +38,15 @@ type ResourceQuotasGetter interface { // ResourceQuotaInterface has methods to work with ResourceQuota resources. type ResourceQuotaInterface interface { - Create(context.Context, *v1.ResourceQuota) (*v1.ResourceQuota, error) - Update(context.Context, *v1.ResourceQuota) (*v1.ResourceQuota, error) - UpdateStatus(context.Context, *v1.ResourceQuota) (*v1.ResourceQuota, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ResourceQuota, error) + Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (*v1.ResourceQuota, error) + Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) + UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ResourceQuota, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.ResourceQuotaList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) ResourceQuotaExpansion } @@ -110,11 +110,12 @@ func (c *resourceQuotas) Watch(ctx context.Context, opts metav1.ListOptions) (wa } // Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Create(ctx context.Context, resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { +func (c *resourceQuotas) Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (result *v1.ResourceQuota, err error) { result = &v1.ResourceQuota{} err = c.client.Post(). Namespace(c.ns). Resource("resourcequotas"). + VersionedParams(&opts, scheme.ParameterCodec). Body(resourceQuota). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *resourceQuotas) Create(ctx context.Context, resourceQuota *v1.ResourceQ } // Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Update(ctx context.Context, resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { +func (c *resourceQuotas) Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { result = &v1.ResourceQuota{} err = c.client.Put(). Namespace(c.ns). Resource("resourcequotas"). Name(resourceQuota.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(resourceQuota). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *resourceQuotas) Update(ctx context.Context, resourceQuota *v1.ResourceQ // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *resourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { +func (c *resourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { result = &v1.ResourceQuota{} err = c.client.Put(). Namespace(c.ns). Resource("resourcequotas"). Name(resourceQuota.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(resourceQuota). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *resourceQuotas) DeleteCollection(ctx context.Context, options *metav1.D } // Patch applies the patch and returns the patched resourceQuota. -func (c *resourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) { +func (c *resourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) { result = &v1.ResourceQuota{} err = c.client.Patch(pt). Namespace(c.ns). Resource("resourcequotas"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/secret.go index 9168e598087..fb9ff7fb328 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/secret.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/secret.go @@ -38,14 +38,14 @@ type SecretsGetter interface { // SecretInterface has methods to work with Secret resources. type SecretInterface interface { - Create(context.Context, *v1.Secret) (*v1.Secret, error) - Update(context.Context, *v1.Secret) (*v1.Secret, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Secret, error) + Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (*v1.Secret, error) + Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (*v1.Secret, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Secret, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.SecretList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) SecretExpansion } @@ -109,11 +109,12 @@ func (c *secrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Int } // Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Create(ctx context.Context, secret *v1.Secret) (result *v1.Secret, err error) { +func (c *secrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) { result = &v1.Secret{} err = c.client.Post(). Namespace(c.ns). Resource("secrets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(secret). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *secrets) Create(ctx context.Context, secret *v1.Secret) (result *v1.Sec } // Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Update(ctx context.Context, secret *v1.Secret) (result *v1.Secret, err error) { +func (c *secrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) { result = &v1.Secret{} err = c.client.Put(). Namespace(c.ns). Resource("secrets"). Name(secret.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(secret). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *secrets) DeleteCollection(ctx context.Context, options *metav1.DeleteOp } // Patch applies the patch and returns the patched secret. -func (c *secrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) { +func (c *secrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) { result = &v1.Secret{} err = c.client.Patch(pt). Namespace(c.ns). Resource("secrets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/service.go index bd8431212ee..56630c7cdf3 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/service.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/service.go @@ -38,14 +38,14 @@ type ServicesGetter interface { // ServiceInterface has methods to work with Service resources. type ServiceInterface interface { - Create(context.Context, *v1.Service) (*v1.Service, error) - Update(context.Context, *v1.Service) (*v1.Service, error) - UpdateStatus(context.Context, *v1.Service) (*v1.Service, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Service, error) + Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (*v1.Service, error) + Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) + UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Service, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) ServiceExpansion } @@ -109,11 +109,12 @@ func (c *services) Watch(ctx context.Context, opts metav1.ListOptions) (watch.In } // Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Create(ctx context.Context, service *v1.Service) (result *v1.Service, err error) { +func (c *services) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) { result = &v1.Service{} err = c.client.Post(). Namespace(c.ns). Resource("services"). + VersionedParams(&opts, scheme.ParameterCodec). Body(service). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *services) Create(ctx context.Context, service *v1.Service) (result *v1. } // Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Update(ctx context.Context, service *v1.Service) (result *v1.Service, err error) { +func (c *services) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { result = &v1.Service{} err = c.client.Put(). Namespace(c.ns). Resource("services"). Name(service.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(service). Do(ctx). Into(result) @@ -135,14 +137,14 @@ func (c *services) Update(ctx context.Context, service *v1.Service) (result *v1. // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *services) UpdateStatus(ctx context.Context, service *v1.Service) (result *v1.Service, err error) { +func (c *services) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { result = &v1.Service{} err = c.client.Put(). Namespace(c.ns). Resource("services"). Name(service.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(service). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *services) Delete(ctx context.Context, name string, options *metav1.Dele } // Patch applies the patch and returns the patched service. -func (c *services) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) { +func (c *services) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) { result = &v1.Service{} err = c.client.Patch(pt). Namespace(c.ns). Resource("services"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go index 2f101c1e7e7..671c3726f49 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go @@ -39,15 +39,15 @@ type ServiceAccountsGetter interface { // ServiceAccountInterface has methods to work with ServiceAccount resources. type ServiceAccountInterface interface { - Create(context.Context, *v1.ServiceAccount) (*v1.ServiceAccount, error) - Update(context.Context, *v1.ServiceAccount) (*v1.ServiceAccount, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ServiceAccount, error) + Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (*v1.ServiceAccount, error) + Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (*v1.ServiceAccount, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ServiceAccount, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceAccountList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) - CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) + CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (*authenticationv1.TokenRequest, error) ServiceAccountExpansion } @@ -112,11 +112,12 @@ func (c *serviceAccounts) Watch(ctx context.Context, opts metav1.ListOptions) (w } // Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Create(ctx context.Context, serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { +func (c *serviceAccounts) Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (result *v1.ServiceAccount, err error) { result = &v1.ServiceAccount{} err = c.client.Post(). Namespace(c.ns). Resource("serviceaccounts"). + VersionedParams(&opts, scheme.ParameterCodec). Body(serviceAccount). Do(ctx). Into(result) @@ -124,12 +125,13 @@ func (c *serviceAccounts) Create(ctx context.Context, serviceAccount *v1.Service } // Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Update(ctx context.Context, serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { +func (c *serviceAccounts) Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (result *v1.ServiceAccount, err error) { result = &v1.ServiceAccount{} err = c.client.Put(). Namespace(c.ns). Resource("serviceaccounts"). Name(serviceAccount.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(serviceAccount). Do(ctx). Into(result) @@ -164,13 +166,14 @@ func (c *serviceAccounts) DeleteCollection(ctx context.Context, options *metav1. } // Patch applies the patch and returns the patched serviceAccount. -func (c *serviceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) { +func (c *serviceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) { result = &v1.ServiceAccount{} err = c.client.Patch(pt). Namespace(c.ns). Resource("serviceaccounts"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) @@ -178,13 +181,14 @@ func (c *serviceAccounts) Patch(ctx context.Context, name string, pt types.Patch } // CreateToken takes the representation of a tokenRequest and creates it. Returns the server's representation of the tokenRequest, and an error, if there is any. -func (c *serviceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest) (result *authenticationv1.TokenRequest, err error) { +func (c *serviceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (result *authenticationv1.TokenRequest, err error) { result = &authenticationv1.TokenRequest{} err = c.client.Post(). Namespace(c.ns). Resource("serviceaccounts"). Name(serviceAccountName). SubResource("token"). + VersionedParams(&opts, scheme.ParameterCodec). Body(tokenRequest). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go b/staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go index 9691684c84c..6f39aa71413 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go @@ -38,14 +38,14 @@ type EndpointSlicesGetter interface { // EndpointSliceInterface has methods to work with EndpointSlice resources. type EndpointSliceInterface interface { - Create(context.Context, *v1alpha1.EndpointSlice) (*v1alpha1.EndpointSlice, error) - Update(context.Context, *v1alpha1.EndpointSlice) (*v1alpha1.EndpointSlice, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1alpha1.EndpointSlice, error) + Create(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice, opts v1.CreateOptions) (*v1alpha1.EndpointSlice, error) + Update(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice, opts v1.UpdateOptions) (*v1alpha1.EndpointSlice, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.EndpointSlice, error) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.EndpointSliceList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EndpointSlice, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.EndpointSlice, err error) EndpointSliceExpansion } @@ -109,11 +109,12 @@ func (c *endpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { +func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice, opts v1.CreateOptions) (result *v1alpha1.EndpointSlice, err error) { result = &v1alpha1.EndpointSlice{} err = c.client.Post(). Namespace(c.ns). Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). Body(endpointSlice). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1alpha1.End } // Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { +func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice, opts v1.UpdateOptions) (result *v1alpha1.EndpointSlice, err error) { result = &v1alpha1.EndpointSlice{} err = c.client.Put(). Namespace(c.ns). Resource("endpointslices"). Name(endpointSlice.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(endpointSlice). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *endpointSlices) DeleteCollection(ctx context.Context, options *v1.Delet } // Patch applies the patch and returns the patched endpointSlice. -func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EndpointSlice, err error) { +func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.EndpointSlice, err error) { result = &v1alpha1.EndpointSlice{} err = c.client.Patch(pt). Namespace(c.ns). Resource("endpointslices"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/fake_endpointslice.go b/staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/fake_endpointslice.go index 2eaceb3a36d..61d6a5873fc 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/fake_endpointslice.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/fake_endpointslice.go @@ -81,7 +81,7 @@ func (c *FakeEndpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (wa } // Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice, opts v1.CreateOptions) (result *v1alpha1.EndpointSlice, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1alpha1.EndpointSlice{}) @@ -92,7 +92,7 @@ func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1alpha1 } // Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice, opts v1.UpdateOptions) (result *v1alpha1.EndpointSlice, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1alpha1.EndpointSlice{}) @@ -119,7 +119,7 @@ func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, options *v1.D } // Patch applies the patch and returns the patched endpointSlice. -func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.EndpointSlice, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1alpha1.EndpointSlice{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go b/staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go index 4829f1dc169..2989fc953bf 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go @@ -38,14 +38,14 @@ type EndpointSlicesGetter interface { // EndpointSliceInterface has methods to work with EndpointSlice resources. type EndpointSliceInterface interface { - Create(context.Context, *v1beta1.EndpointSlice) (*v1beta1.EndpointSlice, error) - Update(context.Context, *v1beta1.EndpointSlice) (*v1beta1.EndpointSlice, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.EndpointSlice, error) + Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (*v1beta1.EndpointSlice, error) + Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (*v1beta1.EndpointSlice, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.EndpointSlice, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.EndpointSliceList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.EndpointSlice, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) EndpointSliceExpansion } @@ -109,11 +109,12 @@ func (c *endpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) { +func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) { result = &v1beta1.EndpointSlice{} err = c.client.Post(). Namespace(c.ns). Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). Body(endpointSlice). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.Endp } // Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) { +func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) { result = &v1beta1.EndpointSlice{} err = c.client.Put(). Namespace(c.ns). Resource("endpointslices"). Name(endpointSlice.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(endpointSlice). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *endpointSlices) DeleteCollection(ctx context.Context, options *v1.Delet } // Patch applies the patch and returns the patched endpointSlice. -func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.EndpointSlice, err error) { +func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) { result = &v1beta1.EndpointSlice{} err = c.client.Patch(pt). Namespace(c.ns). Resource("endpointslices"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go b/staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go index 9e2f06833f7..c67678331e6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go @@ -81,7 +81,7 @@ func (c *FakeEndpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (wa } // Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{}) @@ -92,7 +92,7 @@ func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1beta1. } // Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{}) @@ -119,7 +119,7 @@ func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, options *v1.D } // Patch applies the patch and returns the patched endpointSlice. -func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1beta1.EndpointSlice{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go index 38b46fd17a7..22eed88007f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -38,14 +38,14 @@ type EventsGetter interface { // EventInterface has methods to work with Event resources. type EventInterface interface { - Create(context.Context, *v1beta1.Event) (*v1beta1.Event, error) - Update(context.Context, *v1beta1.Event) (*v1beta1.Event, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.Event, error) + Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (*v1beta1.Event, error) + Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (*v1beta1.Event, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Event, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.EventList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) EventExpansion } @@ -109,11 +109,12 @@ func (c *events) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interfac } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(ctx context.Context, event *v1beta1.Event) (result *v1beta1.Event, err error) { +func (c *events) Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (result *v1beta1.Event, err error) { result = &v1beta1.Event{} err = c.client.Post(). Namespace(c.ns). Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). Body(event). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *events) Create(ctx context.Context, event *v1beta1.Event) (result *v1be } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(ctx context.Context, event *v1beta1.Event) (result *v1beta1.Event, err error) { +func (c *events) Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (result *v1beta1.Event, err error) { result = &v1beta1.Event{} err = c.client.Put(). Namespace(c.ns). Resource("events"). Name(event.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(event). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *events) DeleteCollection(ctx context.Context, options *v1.DeleteOptions } // Patch applies the patch and returns the patched event. -func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) { +func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) { result = &v1beta1.Event{} err = c.client.Patch(pt). Namespace(c.ns). Resource("events"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go index 2ce93d7a6a8..41c6865abeb 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go @@ -81,7 +81,7 @@ func (c *FakeEvents) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inte } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Create(ctx context.Context, event *v1beta1.Event) (result *v1beta1.Event, err error) { +func (c *FakeEvents) Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (result *v1beta1.Event, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1beta1.Event{}) @@ -92,7 +92,7 @@ func (c *FakeEvents) Create(ctx context.Context, event *v1beta1.Event) (result * } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Update(ctx context.Context, event *v1beta1.Event) (result *v1beta1.Event, err error) { +func (c *FakeEvents) Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (result *v1beta1.Event, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1beta1.Event{}) @@ -119,7 +119,7 @@ func (c *FakeEvents) DeleteCollection(ctx context.Context, options *v1.DeleteOpt } // Patch applies the patch and returns the patched event. -func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) { +func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1beta1.Event{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go index 2b1440fda62..d1aa54c989e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go @@ -38,15 +38,15 @@ type DaemonSetsGetter interface { // DaemonSetInterface has methods to work with DaemonSet resources. type DaemonSetInterface interface { - Create(context.Context, *v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) - Update(context.Context, *v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) - UpdateStatus(context.Context, *v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.DaemonSet, error) + Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (*v1beta1.DaemonSet, error) + Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) + UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.DaemonSet, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DaemonSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) DaemonSetExpansion } @@ -110,11 +110,12 @@ func (c *daemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inte } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { +func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (result *v1beta1.DaemonSet, err error) { result = &v1beta1.DaemonSet{} err = c.client.Post(). Namespace(c.ns). Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet) ( } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { +func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { result = &v1beta1.DaemonSet{} err = c.client.Put(). Namespace(c.ns). Resource("daemonsets"). Name(daemonSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet) ( // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { +func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { result = &v1beta1.DaemonSet{} err = c.client.Put(). Namespace(c.ns). Resource("daemonsets"). Name(daemonSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *daemonSets) DeleteCollection(ctx context.Context, options *v1.DeleteOpt } // Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) { +func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) { result = &v1beta1.DaemonSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("daemonsets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go index 28479029e19..70c07fa9649 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -38,17 +38,17 @@ type DeploymentsGetter interface { // DeploymentInterface has methods to work with Deployment resources. type DeploymentInterface interface { - Create(context.Context, *v1beta1.Deployment) (*v1beta1.Deployment, error) - Update(context.Context, *v1beta1.Deployment) (*v1beta1.Deployment, error) - UpdateStatus(context.Context, *v1beta1.Deployment) (*v1beta1.Deployment, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.Deployment, error) + Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error) + Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Deployment, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DeploymentList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error) - UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) + UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error) DeploymentExpansion } @@ -113,11 +113,12 @@ func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Int } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Post(). Namespace(c.ns). Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). Do(ctx). Into(result) @@ -125,12 +126,13 @@ func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deployment.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). Do(ctx). Into(result) @@ -139,14 +141,14 @@ func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deployment.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). Do(ctx). Into(result) @@ -181,13 +183,14 @@ func (c *deployments) DeleteCollection(ctx context.Context, options *v1.DeleteOp } // Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { +func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Patch(pt). Namespace(c.ns). Resource("deployments"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) @@ -209,13 +212,14 @@ func (c *deployments) GetScale(ctx context.Context, deploymentName string, optio } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { +func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deploymentName). SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). Body(scale). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go index 44f3a5e9290..dfce4a041da 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go @@ -81,7 +81,7 @@ func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { +func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (result *v1beta1.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) @@ -92,7 +92,7 @@ func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSe } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { +func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) @@ -104,7 +104,7 @@ func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSe // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) { +func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta1.DaemonSet{}) @@ -131,7 +131,7 @@ func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, options *v1.Delet } // Patch applies the patch and returns the patched daemonSet. -func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) { +func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1beta1.DaemonSet{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go index 2c7004db865..737f690e7d0 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go @@ -81,7 +81,7 @@ func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) @@ -92,7 +92,7 @@ func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deploy } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) @@ -104,7 +104,7 @@ func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deploy // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment) (*v1beta1.Deployment, error) { +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) @@ -131,7 +131,7 @@ func (c *FakeDeployments) DeleteCollection(ctx context.Context, options *v1.Dele } // Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { +func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta1.Deployment{}) @@ -153,7 +153,7 @@ func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, o } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { +func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "scale", c.ns, scale), &v1beta1.Scale{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go index 119f6f3263f..4dc0f1694e1 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go @@ -81,7 +81,7 @@ func (c *FakeIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.I } // Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) @@ -92,7 +92,7 @@ func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress) (r } // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) @@ -104,7 +104,7 @@ func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress) (r // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress) (*v1beta1.Ingress, error) { +func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{}) @@ -131,7 +131,7 @@ func (c *FakeIngresses) DeleteCollection(ctx context.Context, options *v1.Delete } // Patch applies the patch and returns the patched ingress. -func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { +func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1beta1.Ingress{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go index 403f44756ed..5997132d620 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go @@ -81,7 +81,7 @@ func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (w } // Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (result *v1beta1.NetworkPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &v1beta1.NetworkPolicy{}) @@ -92,7 +92,7 @@ func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *v1beta1 } // Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &v1beta1.NetworkPolicy{}) @@ -119,7 +119,7 @@ func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, options *v1. } // Patch applies the patch and returns the patched networkPolicy. -func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &v1beta1.NetworkPolicy{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go index a659e8c836f..5ade0028ab6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go @@ -77,7 +77,7 @@ func (c *FakePodSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions } // Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *FakePodSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *FakePodSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakePodSecurityPolicies) Create(ctx context.Context, podSecurityPolicy } // Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *FakePodSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *FakePodSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakePodSecurityPolicies) DeleteCollection(ctx context.Context, options } // Patch applies the patch and returns the patched podSecurityPolicy. -func (c *FakePodSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *FakePodSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, pt, data, subresources...), &v1beta1.PodSecurityPolicy{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go index 62293014ba1..48e1f629074 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go @@ -81,7 +81,7 @@ func (c *FakeReplicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (result *v1beta1.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) @@ -92,7 +92,7 @@ func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta1.Replic } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) @@ -104,7 +104,7 @@ func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta1.Replic // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) { +func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta1.ReplicaSet{}) @@ -131,7 +131,7 @@ func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, options *v1.Dele } // Patch applies the patch and returns the patched replicaSet. -func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1beta1.ReplicaSet{}) @@ -153,7 +153,7 @@ func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, o } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { +func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "scale", c.ns, scale), &v1beta1.Scale{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go index cf5b88c6904..7c162c67c0e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go @@ -38,15 +38,15 @@ type IngressesGetter interface { // IngressInterface has methods to work with Ingress resources. type IngressInterface interface { - Create(context.Context, *v1beta1.Ingress) (*v1beta1.Ingress, error) - Update(context.Context, *v1beta1.Ingress) (*v1beta1.Ingress, error) - UpdateStatus(context.Context, *v1beta1.Ingress) (*v1beta1.Ingress, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.Ingress, error) + Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error) + Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Ingress, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IngressList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) IngressExpansion } @@ -110,11 +110,12 @@ func (c *ingresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inter } // Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Post(). Namespace(c.ns). Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(ingress). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress) (resul } // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Put(). Namespace(c.ns). Resource("ingresses"). Name(ingress.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(ingress). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress) (resul // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Put(). Namespace(c.ns). Resource("ingresses"). Name(ingress.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(ingress). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *ingresses) DeleteCollection(ctx context.Context, options *v1.DeleteOpti } // Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { +func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Patch(pt). Namespace(c.ns). Resource("ingresses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go index ab5ef66dd89..92ff833105e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go @@ -38,14 +38,14 @@ type NetworkPoliciesGetter interface { // NetworkPolicyInterface has methods to work with NetworkPolicy resources. type NetworkPolicyInterface interface { - Create(context.Context, *v1beta1.NetworkPolicy) (*v1beta1.NetworkPolicy, error) - Update(context.Context, *v1beta1.NetworkPolicy) (*v1beta1.NetworkPolicy, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.NetworkPolicy, error) + Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (*v1beta1.NetworkPolicy, error) + Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (*v1beta1.NetworkPolicy, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.NetworkPolicy, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.NetworkPolicyList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.NetworkPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) NetworkPolicyExpansion } @@ -109,11 +109,12 @@ func (c *networkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) { +func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (result *v1beta1.NetworkPolicy, err error) { result = &v1beta1.NetworkPolicy{} err = c.client.Post(). Namespace(c.ns). Resource("networkpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). Body(networkPolicy). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.Net } // Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) { +func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) { result = &v1beta1.NetworkPolicy{} err = c.client.Put(). Namespace(c.ns). Resource("networkpolicies"). Name(networkPolicy.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(networkPolicy). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *networkPolicies) DeleteCollection(ctx context.Context, options *v1.Dele } // Patch applies the patch and returns the patched networkPolicy. -func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { +func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { result = &v1beta1.NetworkPolicy{} err = c.client.Patch(pt). Namespace(c.ns). Resource("networkpolicies"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go index 7e1da69c0df..e57b8de71a6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go @@ -38,14 +38,14 @@ type PodSecurityPoliciesGetter interface { // PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. type PodSecurityPolicyInterface interface { - Create(context.Context, *v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) - Update(context.Context, *v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) + Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (*v1beta1.PodSecurityPolicy, error) + Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (*v1beta1.PodSecurityPolicy, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) PodSecurityPolicyExpansion } @@ -104,10 +104,11 @@ func (c *podSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (w } // Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { result = &v1beta1.PodSecurityPolicy{} err = c.client.Post(). Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). Body(podSecurityPolicy). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1b } // Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { result = &v1beta1.PodSecurityPolicy{} err = c.client.Put(). Resource("podsecuritypolicies"). Name(podSecurityPolicy.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(podSecurityPolicy). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *podSecurityPolicies) DeleteCollection(ctx context.Context, options *v1. } // Patch applies the patch and returns the patched podSecurityPolicy. -func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { result = &v1beta1.PodSecurityPolicy{} err = c.client.Patch(pt). Resource("podsecuritypolicies"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go index 6fb352ef0be..9db0cbfdba7 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -38,17 +38,17 @@ type ReplicaSetsGetter interface { // ReplicaSetInterface has methods to work with ReplicaSet resources. type ReplicaSetInterface interface { - Create(context.Context, *v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) - Update(context.Context, *v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) - UpdateStatus(context.Context, *v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.ReplicaSet, error) + Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (*v1beta1.ReplicaSet, error) + Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) + UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ReplicaSet, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ReplicaSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error) - UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) + UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error) ReplicaSetExpansion } @@ -113,11 +113,12 @@ func (c *replicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Int } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { +func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (result *v1beta1.ReplicaSet, err error) { result = &v1beta1.ReplicaSet{} err = c.client.Post(). Namespace(c.ns). Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). Do(ctx). Into(result) @@ -125,12 +126,13 @@ func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { +func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { result = &v1beta1.ReplicaSet{} err = c.client.Put(). Namespace(c.ns). Resource("replicasets"). Name(replicaSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). Do(ctx). Into(result) @@ -139,14 +141,14 @@ func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { +func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { result = &v1beta1.ReplicaSet{} err = c.client.Put(). Namespace(c.ns). Resource("replicasets"). Name(replicaSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). Do(ctx). Into(result) @@ -181,13 +183,14 @@ func (c *replicaSets) DeleteCollection(ctx context.Context, options *v1.DeleteOp } // Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) { +func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) { result = &v1beta1.ReplicaSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("replicasets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) @@ -209,13 +212,14 @@ func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, optio } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { +func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). Body(scale). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go b/staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go index a0a7572fceb..f3dd97f195c 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go @@ -77,7 +77,7 @@ func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (result *v1alpha1.FlowSchema, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1alpha1.FlowSchema{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1alpha1.FlowS } // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1alpha1.FlowSchema{}) if obj == nil { @@ -98,7 +98,7 @@ func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1alpha1.FlowS // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error) { +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1alpha1.FlowSchema{}) if obj == nil { @@ -123,7 +123,7 @@ func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, options *v1.Dele } // Patch applies the patch and returns the patched flowSchema. -func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1alpha1.FlowSchema{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go b/staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go index 6f8503d4720..d5b4e7998bd 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go @@ -77,7 +77,7 @@ func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.Lis } // Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1alpha1.PriorityLevelConfiguration{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLe } // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1alpha1.PriorityLevelConfiguration{}) if obj == nil { @@ -98,7 +98,7 @@ func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLe // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error) { +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1alpha1.PriorityLevelConfiguration{}) if obj == nil { @@ -123,7 +123,7 @@ func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, } // Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1alpha1.PriorityLevelConfiguration{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go b/staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go index cb4768136d1..ca209f0979e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go @@ -38,15 +38,15 @@ type FlowSchemasGetter interface { // FlowSchemaInterface has methods to work with FlowSchema resources. type FlowSchemaInterface interface { - Create(context.Context, *v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error) - Update(context.Context, *v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error) - UpdateStatus(context.Context, *v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1alpha1.FlowSchema, error) + Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (*v1alpha1.FlowSchema, error) + Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error) + UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.FlowSchema, error) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.FlowSchemaList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.FlowSchema, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error) FlowSchemaExpansion } @@ -105,10 +105,11 @@ func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Int } // Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { +func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (result *v1alpha1.FlowSchema, err error) { result = &v1alpha1.FlowSchema{} err = c.client.Post(). Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). Body(flowSchema). Do(ctx). Into(result) @@ -116,11 +117,12 @@ func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1alpha1.FlowSchem } // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { +func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) { result = &v1alpha1.FlowSchema{} err = c.client.Put(). Resource("flowschemas"). Name(flowSchema.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(flowSchema). Do(ctx). Into(result) @@ -129,13 +131,13 @@ func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1alpha1.FlowSchem // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { +func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) { result = &v1alpha1.FlowSchema{} err = c.client.Put(). Resource("flowschemas"). Name(flowSchema.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(flowSchema). Do(ctx). Into(result) @@ -168,12 +170,13 @@ func (c *flowSchemas) DeleteCollection(ctx context.Context, options *v1.DeleteOp } // Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.FlowSchema, err error) { +func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error) { result = &v1alpha1.FlowSchema{} err = c.client.Patch(pt). Resource("flowschemas"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go index 3ccb0c596a1..413fea4bc0e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go @@ -38,15 +38,15 @@ type PriorityLevelConfigurationsGetter interface { // PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. type PriorityLevelConfigurationInterface interface { - Create(context.Context, *v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error) - Update(context.Context, *v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error) - UpdateStatus(context.Context, *v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1alpha1.PriorityLevelConfiguration, error) + Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1alpha1.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error) + UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PriorityLevelConfiguration, error) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PriorityLevelConfigurationList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } @@ -105,10 +105,11 @@ func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOpt } // Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { result = &v1alpha1.PriorityLevelConfiguration{} err = c.client.Post(). Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityLevelConfiguration). Do(ctx). Into(result) @@ -116,11 +117,12 @@ func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelC } // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { result = &v1alpha1.PriorityLevelConfiguration{} err = c.client.Put(). Resource("prioritylevelconfigurations"). Name(priorityLevelConfiguration.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityLevelConfiguration). Do(ctx). Into(result) @@ -129,13 +131,13 @@ func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelC // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { result = &v1alpha1.PriorityLevelConfiguration{} err = c.client.Put(). Resource("prioritylevelconfigurations"). Name(priorityLevelConfiguration.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityLevelConfiguration). Do(ctx). Into(result) @@ -168,12 +170,13 @@ func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opti } // Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) { result = &v1alpha1.PriorityLevelConfiguration{} err = c.client.Patch(pt). Resource("prioritylevelconfigurations"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go index 56d28d07092..c251b667516 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go @@ -81,7 +81,7 @@ func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (w } // Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *networkingv1.NetworkPolicy) (result *networkingv1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *networkingv1.NetworkPolicy, opts v1.CreateOptions) (result *networkingv1.NetworkPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &networkingv1.NetworkPolicy{}) @@ -92,7 +92,7 @@ func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *network } // Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *networkingv1.NetworkPolicy) (result *networkingv1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *networkingv1.NetworkPolicy, opts v1.UpdateOptions) (result *networkingv1.NetworkPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &networkingv1.NetworkPolicy{}) @@ -119,7 +119,7 @@ func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, options *v1. } // Patch applies the patch and returns the patched networkPolicy. -func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *networkingv1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1.NetworkPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &networkingv1.NetworkPolicy{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go index 690ba435591..4fa19860387 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go @@ -38,14 +38,14 @@ type NetworkPoliciesGetter interface { // NetworkPolicyInterface has methods to work with NetworkPolicy resources. type NetworkPolicyInterface interface { - Create(context.Context, *v1.NetworkPolicy) (*v1.NetworkPolicy, error) - Update(context.Context, *v1.NetworkPolicy) (*v1.NetworkPolicy, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.NetworkPolicy, error) + Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (*v1.NetworkPolicy, error) + Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (*v1.NetworkPolicy, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.NetworkPolicy, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.NetworkPolicyList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NetworkPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) NetworkPolicyExpansion } @@ -109,11 +109,12 @@ func (c *networkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (w } // Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy) (result *v1.NetworkPolicy, err error) { +func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) { result = &v1.NetworkPolicy{} err = c.client.Post(). Namespace(c.ns). Resource("networkpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). Body(networkPolicy). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkP } // Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy) (result *v1.NetworkPolicy, err error) { +func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) { result = &v1.NetworkPolicy{} err = c.client.Put(). Namespace(c.ns). Resource("networkpolicies"). Name(networkPolicy.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(networkPolicy). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *networkPolicies) DeleteCollection(ctx context.Context, options *metav1. } // Patch applies the patch and returns the patched networkPolicy. -func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NetworkPolicy, err error) { +func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) { result = &v1.NetworkPolicy{} err = c.client.Patch(pt). Namespace(c.ns). Resource("networkpolicies"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go index 3def9951fd3..130d037d569 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go @@ -81,7 +81,7 @@ func (c *FakeIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.I } // Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) @@ -92,7 +92,7 @@ func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress) (r } // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) @@ -104,7 +104,7 @@ func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress) (r // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress) (*v1beta1.Ingress, error) { +func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{}) @@ -131,7 +131,7 @@ func (c *FakeIngresses) DeleteCollection(ctx context.Context, options *v1.Delete } // Patch applies the patch and returns the patched ingress. -func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { +func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1beta1.Ingress{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go index 4de1206cf72..b8420c0dc77 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go @@ -38,15 +38,15 @@ type IngressesGetter interface { // IngressInterface has methods to work with Ingress resources. type IngressInterface interface { - Create(context.Context, *v1beta1.Ingress) (*v1beta1.Ingress, error) - Update(context.Context, *v1beta1.Ingress) (*v1beta1.Ingress, error) - UpdateStatus(context.Context, *v1beta1.Ingress) (*v1beta1.Ingress, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.Ingress, error) + Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error) + Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Ingress, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IngressList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) IngressExpansion } @@ -110,11 +110,12 @@ func (c *ingresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inter } // Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Post(). Namespace(c.ns). Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(ingress). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress) (resul } // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Put(). Namespace(c.ns). Resource("ingresses"). Name(ingress.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(ingress). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress) (resul // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Put(). Namespace(c.ns). Resource("ingresses"). Name(ingress.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(ingress). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *ingresses) DeleteCollection(ctx context.Context, options *v1.DeleteOpti } // Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { +func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Patch(pt). Namespace(c.ns). Resource("ingresses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go index 920df4f77a3..1495ba72a9d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go @@ -77,7 +77,7 @@ func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (wa } // Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (result *v1alpha1.RuntimeClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1alpha1.RuntimeClass{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1. } // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (result *v1alpha1.RuntimeClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1alpha1.RuntimeClass{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, options *v1.D } // Patch applies the patch and returns the patched runtimeClass. -func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1alpha1.RuntimeClass{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go index c4a1af5f23f..c60f24211a8 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go @@ -38,14 +38,14 @@ type RuntimeClassesGetter interface { // RuntimeClassInterface has methods to work with RuntimeClass resources. type RuntimeClassInterface interface { - Create(context.Context, *v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error) - Update(context.Context, *v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1alpha1.RuntimeClass, error) + Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (*v1alpha1.RuntimeClass, error) + Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (*v1alpha1.RuntimeClass, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RuntimeClass, error) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RuntimeClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) RuntimeClassExpansion } @@ -104,10 +104,11 @@ func (c *runtimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { +func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (result *v1alpha1.RuntimeClass, err error) { result = &v1alpha1.RuntimeClass{} err = c.client.Post(). Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(runtimeClass). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.Runt } // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { +func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (result *v1alpha1.RuntimeClass, err error) { result = &v1alpha1.RuntimeClass{} err = c.client.Put(). Resource("runtimeclasses"). Name(runtimeClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(runtimeClass). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *runtimeClasses) DeleteCollection(ctx context.Context, options *v1.Delet } // Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { +func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { result = &v1alpha1.RuntimeClass{} err = c.client.Patch(pt). Resource("runtimeclasses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go index 40fd252a33d..1eef57f96eb 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go @@ -77,7 +77,7 @@ func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (wa } // Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (result *v1beta1.RuntimeClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1beta1.RuntimeClass{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.R } // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (result *v1beta1.RuntimeClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1beta1.RuntimeClass{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, options *v1.D } // Patch applies the patch and returns the patched runtimeClass. -func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1beta1.RuntimeClass{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go index b38ea56afe2..7cdd58f2927 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go @@ -38,14 +38,14 @@ type RuntimeClassesGetter interface { // RuntimeClassInterface has methods to work with RuntimeClass resources. type RuntimeClassInterface interface { - Create(context.Context, *v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error) - Update(context.Context, *v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.RuntimeClass, error) + Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (*v1beta1.RuntimeClass, error) + Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (*v1beta1.RuntimeClass, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.RuntimeClass, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RuntimeClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) RuntimeClassExpansion } @@ -104,10 +104,11 @@ func (c *runtimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { +func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (result *v1beta1.RuntimeClass, err error) { result = &v1beta1.RuntimeClass{} err = c.client.Post(). Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(runtimeClass). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.Runti } // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { +func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (result *v1beta1.RuntimeClass, err error) { result = &v1beta1.RuntimeClass{} err = c.client.Put(). Resource("runtimeclasses"). Name(runtimeClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(runtimeClass). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *runtimeClasses) DeleteCollection(ctx context.Context, options *v1.Delet } // Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) { +func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) { result = &v1beta1.RuntimeClass{} err = c.client.Patch(pt). Resource("runtimeclasses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go index 8246c902359..bd7a82819c9 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go @@ -81,7 +81,7 @@ func (c *FakePodDisruptionBudgets) Watch(ctx context.Context, opts v1.ListOption } // Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *FakePodDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { +func (c *FakePodDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (result *v1beta1.PodDisruptionBudget, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) @@ -92,7 +92,7 @@ func (c *FakePodDisruptionBudgets) Create(ctx context.Context, podDisruptionBudg } // Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *FakePodDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { +func (c *FakePodDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) @@ -104,7 +104,7 @@ func (c *FakePodDisruptionBudgets) Update(ctx context.Context, podDisruptionBudg // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) { +func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) @@ -131,7 +131,7 @@ func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, options } // Patch applies the patch and returns the patched podDisruptionBudget. -func (c *FakePodDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { +func (c *FakePodDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, pt, data, subresources...), &v1beta1.PodDisruptionBudget{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go index b60d2a2a598..a1f850d2be6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go @@ -77,7 +77,7 @@ func (c *FakePodSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions } // Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *FakePodSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *FakePodSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakePodSecurityPolicies) Create(ctx context.Context, podSecurityPolicy } // Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *FakePodSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *FakePodSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakePodSecurityPolicies) DeleteCollection(ctx context.Context, options } // Patch applies the patch and returns the patched podSecurityPolicy. -func (c *FakePodSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *FakePodSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, pt, data, subresources...), &v1beta1.PodSecurityPolicy{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go index 9fed91bf7db..e82a6d20e58 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go @@ -38,15 +38,15 @@ type PodDisruptionBudgetsGetter interface { // PodDisruptionBudgetInterface has methods to work with PodDisruptionBudget resources. type PodDisruptionBudgetInterface interface { - Create(context.Context, *v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) - Update(context.Context, *v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) - UpdateStatus(context.Context, *v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.PodDisruptionBudget, error) + Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (*v1beta1.PodDisruptionBudget, error) + Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) + UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodDisruptionBudget, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodDisruptionBudgetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) PodDisruptionBudgetExpansion } @@ -110,11 +110,12 @@ func (c *podDisruptionBudgets) Watch(ctx context.Context, opts v1.ListOptions) ( } // Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { +func (c *podDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (result *v1beta1.PodDisruptionBudget, err error) { result = &v1beta1.PodDisruptionBudget{} err = c.client.Post(). Namespace(c.ns). Resource("poddisruptionbudgets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(podDisruptionBudget). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *podDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget * } // Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { +func (c *podDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { result = &v1beta1.PodDisruptionBudget{} err = c.client.Put(). Namespace(c.ns). Resource("poddisruptionbudgets"). Name(podDisruptionBudget.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(podDisruptionBudget). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *podDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget * // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *podDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { +func (c *podDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { result = &v1beta1.PodDisruptionBudget{} err = c.client.Put(). Namespace(c.ns). Resource("poddisruptionbudgets"). Name(podDisruptionBudget.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(podDisruptionBudget). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *podDisruptionBudgets) DeleteCollection(ctx context.Context, options *v1 } // Patch applies the patch and returns the patched podDisruptionBudget. -func (c *podDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { +func (c *podDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { result = &v1beta1.PodDisruptionBudget{} err = c.client.Patch(pt). Namespace(c.ns). Resource("poddisruptionbudgets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go index 2add7c59008..7cadf263517 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go @@ -38,14 +38,14 @@ type PodSecurityPoliciesGetter interface { // PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. type PodSecurityPolicyInterface interface { - Create(context.Context, *v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) - Update(context.Context, *v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) + Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (*v1beta1.PodSecurityPolicy, error) + Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (*v1beta1.PodSecurityPolicy, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) PodSecurityPolicyExpansion } @@ -104,10 +104,11 @@ func (c *podSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (w } // Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { result = &v1beta1.PodSecurityPolicy{} err = c.client.Post(). Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). Body(podSecurityPolicy). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1b } // Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { result = &v1beta1.PodSecurityPolicy{} err = c.client.Put(). Resource("podsecuritypolicies"). Name(podSecurityPolicy.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(podSecurityPolicy). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *podSecurityPolicies) DeleteCollection(ctx context.Context, options *v1. } // Patch applies the patch and returns the patched podSecurityPolicy. -func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { result = &v1beta1.PodSecurityPolicy{} err = c.client.Patch(pt). Resource("podsecuritypolicies"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go index 9fc2968c8d2..467c6ddb029 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go @@ -38,14 +38,14 @@ type ClusterRolesGetter interface { // ClusterRoleInterface has methods to work with ClusterRole resources. type ClusterRoleInterface interface { - Create(context.Context, *v1.ClusterRole) (*v1.ClusterRole, error) - Update(context.Context, *v1.ClusterRole) (*v1.ClusterRole, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ClusterRole, error) + Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (*v1.ClusterRole, error) + Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (*v1.ClusterRole, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterRole, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterRoleList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRole, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) ClusterRoleExpansion } @@ -104,10 +104,11 @@ func (c *clusterRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watc } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1.ClusterRole) (result *v1.ClusterRole, err error) { +func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (result *v1.ClusterRole, err error) { result = &v1.ClusterRole{} err = c.client.Post(). Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRole). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1.ClusterRole) } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1.ClusterRole) (result *v1.ClusterRole, err error) { +func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (result *v1.ClusterRole, err error) { result = &v1.ClusterRole{} err = c.client.Put(). Resource("clusterroles"). Name(clusterRole.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRole). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *clusterRoles) DeleteCollection(ctx context.Context, options *metav1.Del } // Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRole, err error) { +func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) { result = &v1.ClusterRole{} err = c.client.Patch(pt). Resource("clusterroles"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go index a0d282b05ba..19ace55f513 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go @@ -38,14 +38,14 @@ type ClusterRoleBindingsGetter interface { // ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. type ClusterRoleBindingInterface interface { - Create(context.Context, *v1.ClusterRoleBinding) (*v1.ClusterRoleBinding, error) - Update(context.Context, *v1.ClusterRoleBinding) (*v1.ClusterRoleBinding, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ClusterRoleBinding, error) + Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (*v1.ClusterRoleBinding, error) + Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (*v1.ClusterRoleBinding, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterRoleBinding, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterRoleBindingList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) ClusterRoleBindingExpansion } @@ -104,10 +104,11 @@ func (c *clusterRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding) (result *v1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (result *v1.ClusterRoleBinding, err error) { result = &v1.ClusterRoleBinding{} err = c.client.Post(). Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRoleBinding). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1 } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding) (result *v1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (result *v1.ClusterRoleBinding, err error) { result = &v1.ClusterRoleBinding{} err = c.client.Put(). Resource("clusterrolebindings"). Name(clusterRoleBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRoleBinding). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, options *met } // Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) { result = &v1.ClusterRoleBinding{} err = c.client.Patch(pt). Resource("clusterrolebindings"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go index 947552751f9..2eec9defdaa 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go @@ -77,7 +77,7 @@ func (c *FakeClusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watc } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *rbacv1.ClusterRole) (result *rbacv1.ClusterRole, err error) { +func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *rbacv1.ClusterRole, opts v1.CreateOptions) (result *rbacv1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &rbacv1.ClusterRole{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *rbacv1.Clust } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *rbacv1.ClusterRole) (result *rbacv1.ClusterRole, err error) { +func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *rbacv1.ClusterRole, opts v1.UpdateOptions) (result *rbacv1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &rbacv1.ClusterRole{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, options *v1.Del } // Patch applies the patch and returns the patched clusterRole. -func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.ClusterRole, err error) { +func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &rbacv1.ClusterRole{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go index 7bc37a62791..c7943659f66 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go @@ -77,7 +77,7 @@ func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBinding) (result *rbacv1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBinding, opts v1.CreateOptions) (result *rbacv1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &rbacv1.ClusterRoleBinding{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBinding) (result *rbacv1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBinding, opts v1.UpdateOptions) (result *rbacv1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &rbacv1.ClusterRoleBinding{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, options } // Patch applies the patch and returns the patched clusterRoleBinding. -func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &rbacv1.ClusterRoleBinding{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go index eee46d44dce..1d3cee64ed8 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go @@ -81,7 +81,7 @@ func (c *FakeRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inter } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Create(ctx context.Context, role *rbacv1.Role) (result *rbacv1.Role, err error) { +func (c *FakeRoles) Create(ctx context.Context, role *rbacv1.Role, opts v1.CreateOptions) (result *rbacv1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &rbacv1.Role{}) @@ -92,7 +92,7 @@ func (c *FakeRoles) Create(ctx context.Context, role *rbacv1.Role) (result *rbac } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Update(ctx context.Context, role *rbacv1.Role) (result *rbacv1.Role, err error) { +func (c *FakeRoles) Update(ctx context.Context, role *rbacv1.Role, opts v1.UpdateOptions) (result *rbacv1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &rbacv1.Role{}) @@ -119,7 +119,7 @@ func (c *FakeRoles) DeleteCollection(ctx context.Context, options *v1.DeleteOpti } // Patch applies the patch and returns the patched role. -func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.Role, err error) { +func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &rbacv1.Role{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go index 18412907833..dcdc0897512 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go @@ -81,7 +81,7 @@ func (c *FakeRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watc } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *rbacv1.RoleBinding) (result *rbacv1.RoleBinding, err error) { +func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *rbacv1.RoleBinding, opts v1.CreateOptions) (result *rbacv1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &rbacv1.RoleBinding{}) @@ -92,7 +92,7 @@ func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *rbacv1.RoleB } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *rbacv1.RoleBinding) (result *rbacv1.RoleBinding, err error) { +func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *rbacv1.RoleBinding, opts v1.UpdateOptions) (result *rbacv1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &rbacv1.RoleBinding{}) @@ -119,7 +119,7 @@ func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, options *v1.Del } // Patch applies the patch and returns the patched roleBinding. -func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.RoleBinding, err error) { +func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &rbacv1.RoleBinding{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go index b50b1d8fd70..64c08912b0a 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go @@ -38,14 +38,14 @@ type RolesGetter interface { // RoleInterface has methods to work with Role resources. type RoleInterface interface { - Create(context.Context, *v1.Role) (*v1.Role, error) - Update(context.Context, *v1.Role) (*v1.Role, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Role, error) + Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (*v1.Role, error) + Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (*v1.Role, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Role, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.RoleList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Role, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) RoleExpansion } @@ -109,11 +109,12 @@ func (c *roles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Inter } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(ctx context.Context, role *v1.Role) (result *v1.Role, err error) { +func (c *roles) Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (result *v1.Role, err error) { result = &v1.Role{} err = c.client.Post(). Namespace(c.ns). Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). Body(role). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *roles) Create(ctx context.Context, role *v1.Role) (result *v1.Role, err } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(ctx context.Context, role *v1.Role) (result *v1.Role, err error) { +func (c *roles) Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (result *v1.Role, err error) { result = &v1.Role{} err = c.client.Put(). Namespace(c.ns). Resource("roles"). Name(role.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(role). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *roles) DeleteCollection(ctx context.Context, options *metav1.DeleteOpti } // Patch applies the patch and returns the patched role. -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Role, err error) { +func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) { result = &v1.Role{} err = c.client.Patch(pt). Namespace(c.ns). Resource("roles"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go index 714069ffc57..70334e356c9 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go @@ -38,14 +38,14 @@ type RoleBindingsGetter interface { // RoleBindingInterface has methods to work with RoleBinding resources. type RoleBindingInterface interface { - Create(context.Context, *v1.RoleBinding) (*v1.RoleBinding, error) - Update(context.Context, *v1.RoleBinding) (*v1.RoleBinding, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.RoleBinding, error) + Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (*v1.RoleBinding, error) + Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (*v1.RoleBinding, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.RoleBinding, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.RoleBindingList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.RoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) RoleBindingExpansion } @@ -109,11 +109,12 @@ func (c *roleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watc } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1.RoleBinding) (result *v1.RoleBinding, err error) { +func (c *roleBindings) Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (result *v1.RoleBinding, err error) { result = &v1.RoleBinding{} err = c.client.Post(). Namespace(c.ns). Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). Body(roleBinding). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *roleBindings) Create(ctx context.Context, roleBinding *v1.RoleBinding) } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1.RoleBinding) (result *v1.RoleBinding, err error) { +func (c *roleBindings) Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (result *v1.RoleBinding, err error) { result = &v1.RoleBinding{} err = c.client.Put(). Namespace(c.ns). Resource("rolebindings"). Name(roleBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(roleBinding). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *roleBindings) DeleteCollection(ctx context.Context, options *metav1.Del } // Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.RoleBinding, err error) { +func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) { result = &v1.RoleBinding{} err = c.client.Patch(pt). Namespace(c.ns). Resource("rolebindings"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go index a6a305524f5..9397cd8d427 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -38,14 +38,14 @@ type ClusterRolesGetter interface { // ClusterRoleInterface has methods to work with ClusterRole resources. type ClusterRoleInterface interface { - Create(context.Context, *v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error) - Update(context.Context, *v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1alpha1.ClusterRole, error) + Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (*v1alpha1.ClusterRole, error) + Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (*v1alpha1.ClusterRole, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterRole, error) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterRoleList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) ClusterRoleExpansion } @@ -104,10 +104,11 @@ func (c *clusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.In } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { +func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (result *v1alpha1.ClusterRole, err error) { result = &v1alpha1.ClusterRole{} err = c.client.Post(). Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRole). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.Cluster } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { +func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (result *v1alpha1.ClusterRole, err error) { result = &v1alpha1.ClusterRole{} err = c.client.Put(). Resource("clusterroles"). Name(clusterRole.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRole). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *clusterRoles) DeleteCollection(ctx context.Context, options *v1.DeleteO } // Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) { +func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) { result = &v1alpha1.ClusterRole{} err = c.client.Patch(pt). Resource("clusterroles"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go index 0e87ee3175d..3d36d87b906 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -38,14 +38,14 @@ type ClusterRoleBindingsGetter interface { // ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. type ClusterRoleBindingInterface interface { - Create(context.Context, *v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error) - Update(context.Context, *v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1alpha1.ClusterRoleBinding, error) + Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (*v1alpha1.ClusterRoleBinding, error) + Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (*v1alpha1.ClusterRoleBinding, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterRoleBinding, error) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterRoleBindingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) ClusterRoleBindingExpansion } @@ -104,10 +104,11 @@ func (c *clusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (w } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { result = &v1alpha1.ClusterRoleBinding{} err = c.client.Post(). Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRoleBinding). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1 } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { result = &v1alpha1.ClusterRoleBinding{} err = c.client.Put(). Resource("clusterrolebindings"). Name(clusterRoleBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRoleBinding). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, options *v1. } // Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { result = &v1alpha1.ClusterRoleBinding{} err = c.client.Patch(pt). Resource("clusterrolebindings"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go index ec698872a95..966c601f3b2 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go @@ -77,7 +77,7 @@ func (c *FakeClusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watc } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { +func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (result *v1alpha1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.Clu } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { +func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (result *v1alpha1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, options *v1.Del } // Patch applies the patch and returns the patched clusterRole. -func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) { +func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1alpha1.ClusterRole{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go index 65e0fe08b27..c1d43dcd888 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go @@ -77,7 +77,7 @@ func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, options } // Patch applies the patch and returns the patched clusterRoleBinding. -func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1alpha1.ClusterRoleBinding{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go index 76c2bb1dfb7..63579bbbb74 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go @@ -81,7 +81,7 @@ func (c *FakeRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inter } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Create(ctx context.Context, role *v1alpha1.Role) (result *v1alpha1.Role, err error) { +func (c *FakeRoles) Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (result *v1alpha1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) @@ -92,7 +92,7 @@ func (c *FakeRoles) Create(ctx context.Context, role *v1alpha1.Role) (result *v1 } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Update(ctx context.Context, role *v1alpha1.Role) (result *v1alpha1.Role, err error) { +func (c *FakeRoles) Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (result *v1alpha1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) @@ -119,7 +119,7 @@ func (c *FakeRoles) DeleteCollection(ctx context.Context, options *v1.DeleteOpti } // Patch applies the patch and returns the patched role. -func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) { +func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Role{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go index af5a93411dd..21bbb6c8dcb 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go @@ -81,7 +81,7 @@ func (c *FakeRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watc } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { +func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (result *v1alpha1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) @@ -92,7 +92,7 @@ func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1alpha1.Rol } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { +func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (result *v1alpha1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) @@ -119,7 +119,7 @@ func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, options *v1.Del } // Patch applies the patch and returns the patched roleBinding. -func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) { +func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1alpha1.RoleBinding{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go index 806ce3ca328..b2bd377961b 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go @@ -38,14 +38,14 @@ type RolesGetter interface { // RoleInterface has methods to work with Role resources. type RoleInterface interface { - Create(context.Context, *v1alpha1.Role) (*v1alpha1.Role, error) - Update(context.Context, *v1alpha1.Role) (*v1alpha1.Role, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1alpha1.Role, error) + Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (*v1alpha1.Role, error) + Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (*v1alpha1.Role, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Role, error) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RoleList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) RoleExpansion } @@ -109,11 +109,12 @@ func (c *roles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(ctx context.Context, role *v1alpha1.Role) (result *v1alpha1.Role, err error) { +func (c *roles) Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (result *v1alpha1.Role, err error) { result = &v1alpha1.Role{} err = c.client.Post(). Namespace(c.ns). Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). Body(role). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *roles) Create(ctx context.Context, role *v1alpha1.Role) (result *v1alph } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(ctx context.Context, role *v1alpha1.Role) (result *v1alpha1.Role, err error) { +func (c *roles) Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (result *v1alpha1.Role, err error) { result = &v1alpha1.Role{} err = c.client.Put(). Namespace(c.ns). Resource("roles"). Name(role.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(role). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *roles) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, } // Patch applies the patch and returns the patched role. -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) { +func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) { result = &v1alpha1.Role{} err = c.client.Patch(pt). Namespace(c.ns). Resource("roles"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go index fe40777a8d0..859515bf091 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -38,14 +38,14 @@ type RoleBindingsGetter interface { // RoleBindingInterface has methods to work with RoleBinding resources. type RoleBindingInterface interface { - Create(context.Context, *v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error) - Update(context.Context, *v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1alpha1.RoleBinding, error) + Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (*v1alpha1.RoleBinding, error) + Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (*v1alpha1.RoleBinding, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RoleBinding, error) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RoleBindingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) RoleBindingExpansion } @@ -109,11 +109,12 @@ func (c *roleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.In } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { +func (c *roleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (result *v1alpha1.RoleBinding, err error) { result = &v1alpha1.RoleBinding{} err = c.client.Post(). Namespace(c.ns). Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). Body(roleBinding). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *roleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBin } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { +func (c *roleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (result *v1alpha1.RoleBinding, err error) { result = &v1alpha1.RoleBinding{} err = c.client.Put(). Namespace(c.ns). Resource("rolebindings"). Name(roleBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(roleBinding). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *roleBindings) DeleteCollection(ctx context.Context, options *v1.DeleteO } // Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) { +func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) { result = &v1alpha1.RoleBinding{} err = c.client.Patch(pt). Namespace(c.ns). Resource("rolebindings"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go index d118817484c..803b892e746 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go @@ -38,14 +38,14 @@ type ClusterRolesGetter interface { // ClusterRoleInterface has methods to work with ClusterRole resources. type ClusterRoleInterface interface { - Create(context.Context, *v1beta1.ClusterRole) (*v1beta1.ClusterRole, error) - Update(context.Context, *v1beta1.ClusterRole) (*v1beta1.ClusterRole, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.ClusterRole, error) + Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (*v1beta1.ClusterRole, error) + Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (*v1beta1.ClusterRole, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ClusterRole, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ClusterRoleList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) ClusterRoleExpansion } @@ -104,10 +104,11 @@ func (c *clusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.In } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { +func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (result *v1beta1.ClusterRole, err error) { result = &v1beta1.ClusterRole{} err = c.client.Post(). Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRole). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterR } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { +func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (result *v1beta1.ClusterRole, err error) { result = &v1beta1.ClusterRole{} err = c.client.Put(). Resource("clusterroles"). Name(clusterRole.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRole). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *clusterRoles) DeleteCollection(ctx context.Context, options *v1.DeleteO } // Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) { +func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) { result = &v1beta1.ClusterRole{} err = c.client.Patch(pt). Resource("clusterroles"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go index 29f17e7e9b7..81e07f33226 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go @@ -38,14 +38,14 @@ type ClusterRoleBindingsGetter interface { // ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. type ClusterRoleBindingInterface interface { - Create(context.Context, *v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error) - Update(context.Context, *v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.ClusterRoleBinding, error) + Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (*v1beta1.ClusterRoleBinding, error) + Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (*v1beta1.ClusterRoleBinding, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ClusterRoleBinding, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ClusterRoleBindingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) ClusterRoleBindingExpansion } @@ -104,10 +104,11 @@ func (c *clusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (w } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1beta1.ClusterRoleBinding, err error) { result = &v1beta1.ClusterRoleBinding{} err = c.client.Post(). Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRoleBinding). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1 } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1beta1.ClusterRoleBinding, err error) { result = &v1beta1.ClusterRoleBinding{} err = c.client.Put(). Resource("clusterrolebindings"). Name(clusterRoleBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRoleBinding). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, options *v1. } // Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { result = &v1beta1.ClusterRoleBinding{} err = c.client.Patch(pt). Resource("clusterrolebindings"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go index 2aa98ef990b..48d437cabaf 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go @@ -77,7 +77,7 @@ func (c *FakeClusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watc } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { +func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (result *v1beta1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1beta1.Clus } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { +func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (result *v1beta1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, options *v1.Del } // Patch applies the patch and returns the patched clusterRole. -func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) { +func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1beta1.ClusterRole{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go index 9fffbc3a96f..b20a6fa6cf7 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go @@ -77,7 +77,7 @@ func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1beta1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1beta1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, options } // Patch applies the patch and returns the patched clusterRoleBinding. -func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1beta1.ClusterRoleBinding{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go index a6952c228ff..eec0ef6f898 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go @@ -81,7 +81,7 @@ func (c *FakeRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inter } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Create(ctx context.Context, role *v1beta1.Role) (result *v1beta1.Role, err error) { +func (c *FakeRoles) Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (result *v1beta1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1beta1.Role{}) @@ -92,7 +92,7 @@ func (c *FakeRoles) Create(ctx context.Context, role *v1beta1.Role) (result *v1b } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Update(ctx context.Context, role *v1beta1.Role) (result *v1beta1.Role, err error) { +func (c *FakeRoles) Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (result *v1beta1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1beta1.Role{}) @@ -119,7 +119,7 @@ func (c *FakeRoles) DeleteCollection(ctx context.Context, options *v1.DeleteOpti } // Patch applies the patch and returns the patched role. -func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) { +func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1beta1.Role{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go index 5b43246dcfa..80f9638bcbd 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go @@ -81,7 +81,7 @@ func (c *FakeRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watc } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { +func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (result *v1beta1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{}) @@ -92,7 +92,7 @@ func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1beta1.Role } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { +func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (result *v1beta1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{}) @@ -119,7 +119,7 @@ func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, options *v1.Del } // Patch applies the patch and returns the patched roleBinding. -func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) { +func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1beta1.RoleBinding{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go index 7b23a0ea1b0..3a0233265ca 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go @@ -38,14 +38,14 @@ type RolesGetter interface { // RoleInterface has methods to work with Role resources. type RoleInterface interface { - Create(context.Context, *v1beta1.Role) (*v1beta1.Role, error) - Update(context.Context, *v1beta1.Role) (*v1beta1.Role, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.Role, error) + Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (*v1beta1.Role, error) + Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (*v1beta1.Role, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Role, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RoleList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) RoleExpansion } @@ -109,11 +109,12 @@ func (c *roles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(ctx context.Context, role *v1beta1.Role) (result *v1beta1.Role, err error) { +func (c *roles) Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (result *v1beta1.Role, err error) { result = &v1beta1.Role{} err = c.client.Post(). Namespace(c.ns). Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). Body(role). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *roles) Create(ctx context.Context, role *v1beta1.Role) (result *v1beta1 } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(ctx context.Context, role *v1beta1.Role) (result *v1beta1.Role, err error) { +func (c *roles) Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (result *v1beta1.Role, err error) { result = &v1beta1.Role{} err = c.client.Put(). Namespace(c.ns). Resource("roles"). Name(role.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(role). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *roles) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, } // Patch applies the patch and returns the patched role. -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) { +func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) { result = &v1beta1.Role{} err = c.client.Patch(pt). Namespace(c.ns). Resource("roles"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go index 92e7746fbc1..a0cb0763d7d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go @@ -38,14 +38,14 @@ type RoleBindingsGetter interface { // RoleBindingInterface has methods to work with RoleBinding resources. type RoleBindingInterface interface { - Create(context.Context, *v1beta1.RoleBinding) (*v1beta1.RoleBinding, error) - Update(context.Context, *v1beta1.RoleBinding) (*v1beta1.RoleBinding, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.RoleBinding, error) + Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (*v1beta1.RoleBinding, error) + Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (*v1beta1.RoleBinding, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.RoleBinding, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RoleBindingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) RoleBindingExpansion } @@ -109,11 +109,12 @@ func (c *roleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.In } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { +func (c *roleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (result *v1beta1.RoleBinding, err error) { result = &v1beta1.RoleBinding{} err = c.client.Post(). Namespace(c.ns). Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). Body(roleBinding). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *roleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBind } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { +func (c *roleBindings) Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (result *v1beta1.RoleBinding, err error) { result = &v1beta1.RoleBinding{} err = c.client.Put(). Namespace(c.ns). Resource("rolebindings"). Name(roleBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(roleBinding). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *roleBindings) DeleteCollection(ctx context.Context, options *v1.DeleteO } // Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) { +func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) { result = &v1beta1.RoleBinding{} err = c.client.Patch(pt). Namespace(c.ns). Resource("rolebindings"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go index f69280932ea..05c2d7ae66d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go @@ -77,7 +77,7 @@ func (c *FakePriorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (w } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *schedulingv1.PriorityClass) (result *schedulingv1.PriorityClass, err error) { +func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *schedulingv1.PriorityClass, opts v1.CreateOptions) (result *schedulingv1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &schedulingv1.PriorityClass{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *schedul } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *schedulingv1.PriorityClass) (result *schedulingv1.PriorityClass, err error) { +func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *schedulingv1.PriorityClass, opts v1.UpdateOptions) (result *schedulingv1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &schedulingv1.PriorityClass{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, options *v1. } // Patch applies the patch and returns the patched priorityClass. -func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *schedulingv1.PriorityClass, err error) { +func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *schedulingv1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &schedulingv1.PriorityClass{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go index 34123303680..394adf44606 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go @@ -38,14 +38,14 @@ type PriorityClassesGetter interface { // PriorityClassInterface has methods to work with PriorityClass resources. type PriorityClassInterface interface { - Create(context.Context, *v1.PriorityClass) (*v1.PriorityClass, error) - Update(context.Context, *v1.PriorityClass) (*v1.PriorityClass, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.PriorityClass, error) + Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (*v1.PriorityClass, error) + Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (*v1.PriorityClass, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PriorityClass, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.PriorityClassList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PriorityClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) PriorityClassExpansion } @@ -104,10 +104,11 @@ func (c *priorityClasses) Watch(ctx context.Context, opts metav1.ListOptions) (w } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1.PriorityClass) (result *v1.PriorityClass, err error) { +func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (result *v1.PriorityClass, err error) { result = &v1.PriorityClass{} err = c.client.Post(). Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityClass). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1.Priority } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1.PriorityClass) (result *v1.PriorityClass, err error) { +func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (result *v1.PriorityClass, err error) { result = &v1.PriorityClass{} err = c.client.Put(). Resource("priorityclasses"). Name(priorityClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityClass). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *priorityClasses) DeleteCollection(ctx context.Context, options *metav1. } // Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PriorityClass, err error) { +func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) { result = &v1.PriorityClass{} err = c.client.Patch(pt). Resource("priorityclasses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go index e6001447a53..8b264e07acb 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go @@ -77,7 +77,7 @@ func (c *FakePriorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (w } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) { +func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (result *v1alpha1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1alpha1.PriorityClass{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1alpha } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) { +func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (result *v1alpha1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1alpha1.PriorityClass{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, options *v1. } // Patch applies the patch and returns the patched priorityClass. -func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) { +func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1alpha1.PriorityClass{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go index 529a19ff1a1..74a9b44ed5f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go @@ -38,14 +38,14 @@ type PriorityClassesGetter interface { // PriorityClassInterface has methods to work with PriorityClass resources. type PriorityClassInterface interface { - Create(context.Context, *v1alpha1.PriorityClass) (*v1alpha1.PriorityClass, error) - Update(context.Context, *v1alpha1.PriorityClass) (*v1alpha1.PriorityClass, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1alpha1.PriorityClass, error) + Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (*v1alpha1.PriorityClass, error) + Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (*v1alpha1.PriorityClass, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PriorityClass, error) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PriorityClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) PriorityClassExpansion } @@ -104,10 +104,11 @@ func (c *priorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) { +func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (result *v1alpha1.PriorityClass, err error) { result = &v1alpha1.PriorityClass{} err = c.client.Post(). Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityClass). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.Pr } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) { +func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (result *v1alpha1.PriorityClass, err error) { result = &v1alpha1.PriorityClass{} err = c.client.Put(). Resource("priorityclasses"). Name(priorityClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityClass). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *priorityClasses) DeleteCollection(ctx context.Context, options *v1.Dele } // Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) { +func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) { result = &v1alpha1.PriorityClass{} err = c.client.Patch(pt). Resource("priorityclasses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go index f0c1d12ee1d..7381e634507 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go @@ -77,7 +77,7 @@ func (c *FakePriorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (w } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) { +func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (result *v1beta1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1beta1.PriorityClass{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1beta1 } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) { +func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (result *v1beta1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1beta1.PriorityClass{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, options *v1. } // Patch applies the patch and returns the patched priorityClass. -func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) { +func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1beta1.PriorityClass{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go index 7ace1a00a06..80db5299b84 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go @@ -38,14 +38,14 @@ type PriorityClassesGetter interface { // PriorityClassInterface has methods to work with PriorityClass resources. type PriorityClassInterface interface { - Create(context.Context, *v1beta1.PriorityClass) (*v1beta1.PriorityClass, error) - Update(context.Context, *v1beta1.PriorityClass) (*v1beta1.PriorityClass, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.PriorityClass, error) + Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (*v1beta1.PriorityClass, error) + Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (*v1beta1.PriorityClass, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PriorityClass, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PriorityClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) PriorityClassExpansion } @@ -104,10 +104,11 @@ func (c *priorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) { +func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (result *v1beta1.PriorityClass, err error) { result = &v1beta1.PriorityClass{} err = c.client.Post(). Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityClass). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1beta1.Pri } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) { +func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (result *v1beta1.PriorityClass, err error) { result = &v1beta1.PriorityClass{} err = c.client.Put(). Resource("priorityclasses"). Name(priorityClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityClass). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *priorityClasses) DeleteCollection(ctx context.Context, options *v1.Dele } // Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) { +func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) { result = &v1beta1.PriorityClass{} err = c.client.Patch(pt). Resource("priorityclasses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go index 4dbdd9a0161..00dc34d2c32 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go @@ -81,7 +81,7 @@ func (c *FakePodPresets) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *FakePodPresets) Create(ctx context.Context, podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { +func (c *FakePodPresets) Create(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.CreateOptions) (result *v1alpha1.PodPreset, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(podpresetsResource, c.ns, podPreset), &v1alpha1.PodPreset{}) @@ -92,7 +92,7 @@ func (c *FakePodPresets) Create(ctx context.Context, podPreset *v1alpha1.PodPres } // Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *FakePodPresets) Update(ctx context.Context, podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { +func (c *FakePodPresets) Update(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.UpdateOptions) (result *v1alpha1.PodPreset, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(podpresetsResource, c.ns, podPreset), &v1alpha1.PodPreset{}) @@ -119,7 +119,7 @@ func (c *FakePodPresets) DeleteCollection(ctx context.Context, options *v1.Delet } // Patch applies the patch and returns the patched podPreset. -func (c *FakePodPresets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) { +func (c *FakePodPresets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodPreset, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(podpresetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.PodPreset{}) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go index d3e43ea037f..01eaa9a82ca 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go @@ -38,14 +38,14 @@ type PodPresetsGetter interface { // PodPresetInterface has methods to work with PodPreset resources. type PodPresetInterface interface { - Create(context.Context, *v1alpha1.PodPreset) (*v1alpha1.PodPreset, error) - Update(context.Context, *v1alpha1.PodPreset) (*v1alpha1.PodPreset, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1alpha1.PodPreset, error) + Create(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.CreateOptions) (*v1alpha1.PodPreset, error) + Update(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.UpdateOptions) (*v1alpha1.PodPreset, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PodPreset, error) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PodPresetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodPreset, err error) PodPresetExpansion } @@ -109,11 +109,12 @@ func (c *podPresets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inte } // Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *podPresets) Create(ctx context.Context, podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { +func (c *podPresets) Create(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.CreateOptions) (result *v1alpha1.PodPreset, err error) { result = &v1alpha1.PodPreset{} err = c.client.Post(). Namespace(c.ns). Resource("podpresets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(podPreset). Do(ctx). Into(result) @@ -121,12 +122,13 @@ func (c *podPresets) Create(ctx context.Context, podPreset *v1alpha1.PodPreset) } // Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *podPresets) Update(ctx context.Context, podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { +func (c *podPresets) Update(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.UpdateOptions) (result *v1alpha1.PodPreset, err error) { result = &v1alpha1.PodPreset{} err = c.client.Put(). Namespace(c.ns). Resource("podpresets"). Name(podPreset.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(podPreset). Do(ctx). Into(result) @@ -161,13 +163,14 @@ func (c *podPresets) DeleteCollection(ctx context.Context, options *v1.DeleteOpt } // Patch applies the patch and returns the patched podPreset. -func (c *podPresets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) { +func (c *podPresets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodPreset, err error) { result = &v1alpha1.PodPreset{} err = c.client.Patch(pt). Namespace(c.ns). Resource("podpresets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go index b4e9b4a62e8..6ecde5933cb 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go @@ -38,14 +38,14 @@ type CSINodesGetter interface { // CSINodeInterface has methods to work with CSINode resources. type CSINodeInterface interface { - Create(context.Context, *v1.CSINode) (*v1.CSINode, error) - Update(context.Context, *v1.CSINode) (*v1.CSINode, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.CSINode, error) + Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (*v1.CSINode, error) + Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (*v1.CSINode, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CSINode, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.CSINodeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CSINode, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) CSINodeExpansion } @@ -104,10 +104,11 @@ func (c *cSINodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.In } // Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Create(ctx context.Context, cSINode *v1.CSINode) (result *v1.CSINode, err error) { +func (c *cSINodes) Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (result *v1.CSINode, err error) { result = &v1.CSINode{} err = c.client.Post(). Resource("csinodes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(cSINode). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *cSINodes) Create(ctx context.Context, cSINode *v1.CSINode) (result *v1. } // Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Update(ctx context.Context, cSINode *v1.CSINode) (result *v1.CSINode, err error) { +func (c *cSINodes) Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (result *v1.CSINode, err error) { result = &v1.CSINode{} err = c.client.Put(). Resource("csinodes"). Name(cSINode.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(cSINode). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *cSINodes) DeleteCollection(ctx context.Context, options *metav1.DeleteO } // Patch applies the patch and returns the patched cSINode. -func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CSINode, err error) { +func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) { result = &v1.CSINode{} err = c.client.Patch(pt). Resource("csinodes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go index 283a9628fb1..94452160de8 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go @@ -77,7 +77,7 @@ func (c *FakeCSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.In } // Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Create(ctx context.Context, cSINode *storagev1.CSINode) (result *storagev1.CSINode, err error) { +func (c *FakeCSINodes) Create(ctx context.Context, cSINode *storagev1.CSINode, opts v1.CreateOptions) (result *storagev1.CSINode, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(csinodesResource, cSINode), &storagev1.CSINode{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeCSINodes) Create(ctx context.Context, cSINode *storagev1.CSINode) ( } // Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Update(ctx context.Context, cSINode *storagev1.CSINode) (result *storagev1.CSINode, err error) { +func (c *FakeCSINodes) Update(ctx context.Context, cSINode *storagev1.CSINode, opts v1.UpdateOptions) (result *storagev1.CSINode, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(csinodesResource, cSINode), &storagev1.CSINode{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeCSINodes) DeleteCollection(ctx context.Context, options *v1.DeleteO } // Patch applies the patch and returns the patched cSINode. -func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *storagev1.CSINode, err error) { +func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1.CSINode, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, name, pt, data, subresources...), &storagev1.CSINode{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go index 0c47a2dd784..2d5d1f2c76c 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go @@ -77,7 +77,7 @@ func (c *FakeStorageClasses) Watch(ctx context.Context, opts v1.ListOptions) (wa } // Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *storagev1.StorageClass) (result *storagev1.StorageClass, err error) { +func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *storagev1.StorageClass, opts v1.CreateOptions) (result *storagev1.StorageClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &storagev1.StorageClass{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *storagev1 } // Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *storagev1.StorageClass) (result *storagev1.StorageClass, err error) { +func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *storagev1.StorageClass, opts v1.UpdateOptions) (result *storagev1.StorageClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &storagev1.StorageClass{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, options *v1.D } // Patch applies the patch and returns the patched storageClass. -func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *storagev1.StorageClass, err error) { +func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1.StorageClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &storagev1.StorageClass{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go index 7ddd5893c2a..479bf6ff8c5 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go @@ -77,7 +77,7 @@ func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment) (result *storagev1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts v1.CreateOptions) (result *storagev1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &storagev1.VolumeAttachment{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *st } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment) (result *storagev1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts v1.UpdateOptions) (result *storagev1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &storagev1.VolumeAttachment{}) if obj == nil { @@ -98,7 +98,7 @@ func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *st // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment) (*storagev1.VolumeAttachment, error) { +func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts v1.UpdateOptions) (*storagev1.VolumeAttachment, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &storagev1.VolumeAttachment{}) if obj == nil { @@ -123,7 +123,7 @@ func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, options *v } // Patch applies the patch and returns the patched volumeAttachment. -func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *storagev1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &storagev1.VolumeAttachment{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go index 1815804b6be..b23725cce9c 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go @@ -38,14 +38,14 @@ type StorageClassesGetter interface { // StorageClassInterface has methods to work with StorageClass resources. type StorageClassInterface interface { - Create(context.Context, *v1.StorageClass) (*v1.StorageClass, error) - Update(context.Context, *v1.StorageClass) (*v1.StorageClass, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.StorageClass, error) + Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (*v1.StorageClass, error) + Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (*v1.StorageClass, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.StorageClass, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.StorageClassList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) StorageClassExpansion } @@ -104,10 +104,11 @@ func (c *storageClasses) Watch(ctx context.Context, opts metav1.ListOptions) (wa } // Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Create(ctx context.Context, storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { +func (c *storageClasses) Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (result *v1.StorageClass, err error) { result = &v1.StorageClass{} err = c.client.Post(). Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(storageClass). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *storageClasses) Create(ctx context.Context, storageClass *v1.StorageCla } // Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Update(ctx context.Context, storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { +func (c *storageClasses) Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (result *v1.StorageClass, err error) { result = &v1.StorageClass{} err = c.client.Put(). Resource("storageclasses"). Name(storageClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(storageClass). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *storageClasses) DeleteCollection(ctx context.Context, options *metav1.D } // Patch applies the patch and returns the patched storageClass. -func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) { +func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) { result = &v1.StorageClass{} err = c.client.Patch(pt). Resource("storageclasses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go index 40a91b81f9e..1a466070e9e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go @@ -38,15 +38,15 @@ type VolumeAttachmentsGetter interface { // VolumeAttachmentInterface has methods to work with VolumeAttachment resources. type VolumeAttachmentInterface interface { - Create(context.Context, *v1.VolumeAttachment) (*v1.VolumeAttachment, error) - Update(context.Context, *v1.VolumeAttachment) (*v1.VolumeAttachment, error) - UpdateStatus(context.Context, *v1.VolumeAttachment) (*v1.VolumeAttachment, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.VolumeAttachment, error) + Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (*v1.VolumeAttachment, error) + Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) + UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.VolumeAttachment, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.VolumeAttachmentList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) VolumeAttachmentExpansion } @@ -105,10 +105,11 @@ func (c *volumeAttachments) Watch(ctx context.Context, opts metav1.ListOptions) } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { +func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (result *v1.VolumeAttachment, err error) { result = &v1.VolumeAttachment{} err = c.client.Post(). Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). Do(ctx). Into(result) @@ -116,11 +117,12 @@ func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1.Vol } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { +func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { result = &v1.VolumeAttachment{} err = c.client.Put(). Resource("volumeattachments"). Name(volumeAttachment.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). Do(ctx). Into(result) @@ -129,13 +131,13 @@ func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1.Vol // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { +func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { result = &v1.VolumeAttachment{} err = c.client.Put(). Resource("volumeattachments"). Name(volumeAttachment.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). Do(ctx). Into(result) @@ -168,12 +170,13 @@ func (c *volumeAttachments) DeleteCollection(ctx context.Context, options *metav } // Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) { +func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) { result = &v1.VolumeAttachment{} err = c.client.Patch(pt). Resource("volumeattachments"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go index 8f00a8a81f2..96cbf0259fd 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go @@ -77,7 +77,7 @@ func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (result *v1alpha1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1alpha1.VolumeAttachment{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1 } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1alpha1.VolumeAttachment{}) if obj == nil { @@ -98,7 +98,7 @@ func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1 // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) { +func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1alpha1.VolumeAttachment{}) if obj == nil { @@ -123,7 +123,7 @@ func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, options *v } // Patch applies the patch and returns the patched volumeAttachment. -func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1alpha1.VolumeAttachment{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go index d7a0e8051fb..4ecc7eb21c4 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -38,15 +38,15 @@ type VolumeAttachmentsGetter interface { // VolumeAttachmentInterface has methods to work with VolumeAttachment resources. type VolumeAttachmentInterface interface { - Create(context.Context, *v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) - Update(context.Context, *v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) - UpdateStatus(context.Context, *v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1alpha1.VolumeAttachment, error) + Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (*v1alpha1.VolumeAttachment, error) + Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) + UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VolumeAttachment, error) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VolumeAttachmentList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) VolumeAttachmentExpansion } @@ -105,10 +105,11 @@ func (c *volumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (wat } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { +func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (result *v1alpha1.VolumeAttachment, err error) { result = &v1alpha1.VolumeAttachment{} err = c.client.Post(). Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). Do(ctx). Into(result) @@ -116,11 +117,12 @@ func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1alph } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { +func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { result = &v1alpha1.VolumeAttachment{} err = c.client.Put(). Resource("volumeattachments"). Name(volumeAttachment.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). Do(ctx). Into(result) @@ -129,13 +131,13 @@ func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1alph // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { +func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { result = &v1alpha1.VolumeAttachment{} err = c.client.Put(). Resource("volumeattachments"). Name(volumeAttachment.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). Do(ctx). Into(result) @@ -168,12 +170,13 @@ func (c *volumeAttachments) DeleteCollection(ctx context.Context, options *v1.De } // Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { +func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { result = &v1alpha1.VolumeAttachment{} err = c.client.Patch(pt). Resource("volumeattachments"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go index 1ec1e142eb5..0420bf9b26e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go @@ -38,14 +38,14 @@ type CSIDriversGetter interface { // CSIDriverInterface has methods to work with CSIDriver resources. type CSIDriverInterface interface { - Create(context.Context, *v1beta1.CSIDriver) (*v1beta1.CSIDriver, error) - Update(context.Context, *v1beta1.CSIDriver) (*v1beta1.CSIDriver, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.CSIDriver, error) + Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (*v1beta1.CSIDriver, error) + Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (*v1beta1.CSIDriver, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CSIDriver, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CSIDriverList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) CSIDriverExpansion } @@ -104,10 +104,11 @@ func (c *cSIDrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inte } // Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { +func (c *cSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (result *v1beta1.CSIDriver, err error) { result = &v1beta1.CSIDriver{} err = c.client.Post(). Resource("csidrivers"). + VersionedParams(&opts, scheme.ParameterCodec). Body(cSIDriver). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *cSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver) ( } // Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { +func (c *cSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (result *v1beta1.CSIDriver, err error) { result = &v1beta1.CSIDriver{} err = c.client.Put(). Resource("csidrivers"). Name(cSIDriver.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(cSIDriver). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *cSIDrivers) DeleteCollection(ctx context.Context, options *v1.DeleteOpt } // Patch applies the patch and returns the patched cSIDriver. -func (c *cSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error) { +func (c *cSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) { result = &v1beta1.CSIDriver{} err = c.client.Patch(pt). Resource("csidrivers"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go index a2b5e4167ed..7f737017535 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go @@ -38,14 +38,14 @@ type CSINodesGetter interface { // CSINodeInterface has methods to work with CSINode resources. type CSINodeInterface interface { - Create(context.Context, *v1beta1.CSINode) (*v1beta1.CSINode, error) - Update(context.Context, *v1beta1.CSINode) (*v1beta1.CSINode, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.CSINode, error) + Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (*v1beta1.CSINode, error) + Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (*v1beta1.CSINode, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CSINode, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CSINodeList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) CSINodeExpansion } @@ -104,10 +104,11 @@ func (c *cSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interf } // Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { +func (c *cSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (result *v1beta1.CSINode, err error) { result = &v1beta1.CSINode{} err = c.client.Post(). Resource("csinodes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(cSINode). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *cSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode) (result } // Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { +func (c *cSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (result *v1beta1.CSINode, err error) { result = &v1beta1.CSINode{} err = c.client.Put(). Resource("csinodes"). Name(cSINode.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(cSINode). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *cSINodes) DeleteCollection(ctx context.Context, options *v1.DeleteOptio } // Patch applies the patch and returns the patched cSINode. -func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error) { +func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) { result = &v1beta1.CSINode{} err = c.client.Patch(pt). Resource("csinodes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go index d1d8a798119..7fe60c7304c 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go @@ -77,7 +77,7 @@ func (c *FakeCSIDrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *FakeCSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { +func (c *FakeCSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (result *v1beta1.CSIDriver, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(csidriversResource, cSIDriver), &v1beta1.CSIDriver{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeCSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDrive } // Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *FakeCSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { +func (c *FakeCSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (result *v1beta1.CSIDriver, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(csidriversResource, cSIDriver), &v1beta1.CSIDriver{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, options *v1.Delet } // Patch applies the patch and returns the patched cSIDriver. -func (c *FakeCSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error) { +func (c *FakeCSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, name, pt, data, subresources...), &v1beta1.CSIDriver{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go index 3463d86a0f7..af22e22f9d5 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go @@ -77,7 +77,7 @@ func (c *FakeCSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.In } // Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { +func (c *FakeCSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (result *v1beta1.CSINode, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(csinodesResource, cSINode), &v1beta1.CSINode{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeCSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode) (re } // Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { +func (c *FakeCSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (result *v1beta1.CSINode, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(csinodesResource, cSINode), &v1beta1.CSINode{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeCSINodes) DeleteCollection(ctx context.Context, options *v1.DeleteO } // Patch applies the patch and returns the patched cSINode. -func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error) { +func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, name, pt, data, subresources...), &v1beta1.CSINode{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go index 80b10bc7e78..8d53f8240dc 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go @@ -77,7 +77,7 @@ func (c *FakeStorageClasses) Watch(ctx context.Context, opts v1.ListOptions) (wa } // Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { +func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (result *v1beta1.StorageClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *v1beta1.S } // Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { +func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (result *v1beta1.StorageClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, options *v1.D } // Patch applies the patch and returns the patched storageClass. -func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) { +func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &v1beta1.StorageClass{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go index d7f6ad8af05..30061b0619b 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go @@ -77,7 +77,7 @@ func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (result *v1beta1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1beta1.VolumeAttachment{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1 } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1beta1.VolumeAttachment{}) if obj == nil { @@ -98,7 +98,7 @@ func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1 // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) { +func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1beta1.VolumeAttachment{}) if obj == nil { @@ -123,7 +123,7 @@ func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, options *v } // Patch applies the patch and returns the patched volumeAttachment. -func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1beta1.VolumeAttachment{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go index 8c713d3115e..553d40f6840 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go @@ -38,14 +38,14 @@ type StorageClassesGetter interface { // StorageClassInterface has methods to work with StorageClass resources. type StorageClassInterface interface { - Create(context.Context, *v1beta1.StorageClass) (*v1beta1.StorageClass, error) - Update(context.Context, *v1beta1.StorageClass) (*v1beta1.StorageClass, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.StorageClass, error) + Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (*v1beta1.StorageClass, error) + Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (*v1beta1.StorageClass, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.StorageClass, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.StorageClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) StorageClassExpansion } @@ -104,10 +104,11 @@ func (c *storageClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Create(ctx context.Context, storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { +func (c *storageClasses) Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (result *v1beta1.StorageClass, err error) { result = &v1beta1.StorageClass{} err = c.client.Post(). Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(storageClass). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *storageClasses) Create(ctx context.Context, storageClass *v1beta1.Stora } // Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Update(ctx context.Context, storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { +func (c *storageClasses) Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (result *v1beta1.StorageClass, err error) { result = &v1beta1.StorageClass{} err = c.client.Put(). Resource("storageclasses"). Name(storageClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(storageClass). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *storageClasses) DeleteCollection(ctx context.Context, options *v1.Delet } // Patch applies the patch and returns the patched storageClass. -func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) { +func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) { result = &v1beta1.StorageClass{} err = c.client.Patch(pt). Resource("storageclasses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go index 31d481f5a68..f9eb953845c 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go @@ -38,15 +38,15 @@ type VolumeAttachmentsGetter interface { // VolumeAttachmentInterface has methods to work with VolumeAttachment resources. type VolumeAttachmentInterface interface { - Create(context.Context, *v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) - Update(context.Context, *v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) - UpdateStatus(context.Context, *v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.VolumeAttachment, error) + Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (*v1beta1.VolumeAttachment, error) + Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) + UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.VolumeAttachment, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.VolumeAttachmentList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) VolumeAttachmentExpansion } @@ -105,10 +105,11 @@ func (c *volumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (wat } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { +func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (result *v1beta1.VolumeAttachment, err error) { result = &v1beta1.VolumeAttachment{} err = c.client.Post(). Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). Do(ctx). Into(result) @@ -116,11 +117,12 @@ func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { +func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { result = &v1beta1.VolumeAttachment{} err = c.client.Put(). Resource("volumeattachments"). Name(volumeAttachment.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). Do(ctx). Into(result) @@ -129,13 +131,13 @@ func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { +func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { result = &v1beta1.VolumeAttachment{} err = c.client.Put(). Resource("volumeattachments"). Name(volumeAttachment.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). Do(ctx). Into(result) @@ -168,12 +170,13 @@ func (c *volumeAttachments) DeleteCollection(ctx context.Context, options *v1.De } // Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { +func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { result = &v1beta1.VolumeAttachment{} err = c.client.Patch(pt). Resource("volumeattachments"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/code-generator/_examples/HyphenGroup/clientset/versioned/typed/example/v1/clustertesttype.go b/staging/src/k8s.io/code-generator/_examples/HyphenGroup/clientset/versioned/typed/example/v1/clustertesttype.go index 3aebe542dc8..defcd8d37ca 100644 --- a/staging/src/k8s.io/code-generator/_examples/HyphenGroup/clientset/versioned/typed/example/v1/clustertesttype.go +++ b/staging/src/k8s.io/code-generator/_examples/HyphenGroup/clientset/versioned/typed/example/v1/clustertesttype.go @@ -39,17 +39,17 @@ type ClusterTestTypesGetter interface { // ClusterTestTypeInterface has methods to work with ClusterTestType resources. type ClusterTestTypeInterface interface { - Create(context.Context, *v1.ClusterTestType) (*v1.ClusterTestType, error) - Update(context.Context, *v1.ClusterTestType) (*v1.ClusterTestType, error) - UpdateStatus(context.Context, *v1.ClusterTestType) (*v1.ClusterTestType, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ClusterTestType, error) + Create(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.CreateOptions) (*v1.ClusterTestType, error) + Update(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.UpdateOptions) (*v1.ClusterTestType, error) + UpdateStatus(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.UpdateOptions) (*v1.ClusterTestType, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterTestType, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterTestTypeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterTestType, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterTestType, err error) GetScale(ctx context.Context, clusterTestTypeName string, options metav1.GetOptions) (*autoscaling.Scale, error) - UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale) (*autoscaling.Scale, error) + UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale, opts metav1.UpdateOptions) (*autoscaling.Scale, error) ClusterTestTypeExpansion } @@ -109,10 +109,11 @@ func (c *clusterTestTypes) Watch(ctx context.Context, opts metav1.ListOptions) ( } // Create takes the representation of a clusterTestType and creates it. Returns the server's representation of the clusterTestType, and an error, if there is any. -func (c *clusterTestTypes) Create(ctx context.Context, clusterTestType *v1.ClusterTestType) (result *v1.ClusterTestType, err error) { +func (c *clusterTestTypes) Create(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.CreateOptions) (result *v1.ClusterTestType, err error) { result = &v1.ClusterTestType{} err = c.client.Post(). Resource("clustertesttypes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterTestType). Do(ctx). Into(result) @@ -120,11 +121,12 @@ func (c *clusterTestTypes) Create(ctx context.Context, clusterTestType *v1.Clust } // Update takes the representation of a clusterTestType and updates it. Returns the server's representation of the clusterTestType, and an error, if there is any. -func (c *clusterTestTypes) Update(ctx context.Context, clusterTestType *v1.ClusterTestType) (result *v1.ClusterTestType, err error) { +func (c *clusterTestTypes) Update(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.UpdateOptions) (result *v1.ClusterTestType, err error) { result = &v1.ClusterTestType{} err = c.client.Put(). Resource("clustertesttypes"). Name(clusterTestType.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterTestType). Do(ctx). Into(result) @@ -133,13 +135,13 @@ func (c *clusterTestTypes) Update(ctx context.Context, clusterTestType *v1.Clust // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *clusterTestTypes) UpdateStatus(ctx context.Context, clusterTestType *v1.ClusterTestType) (result *v1.ClusterTestType, err error) { +func (c *clusterTestTypes) UpdateStatus(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.UpdateOptions) (result *v1.ClusterTestType, err error) { result = &v1.ClusterTestType{} err = c.client.Put(). Resource("clustertesttypes"). Name(clusterTestType.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterTestType). Do(ctx). Into(result) @@ -172,12 +174,13 @@ func (c *clusterTestTypes) DeleteCollection(ctx context.Context, options *metav1 } // Patch applies the patch and returns the patched clusterTestType. -func (c *clusterTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterTestType, err error) { +func (c *clusterTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterTestType, err error) { result = &v1.ClusterTestType{} err = c.client.Patch(pt). Resource("clustertesttypes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) @@ -198,12 +201,13 @@ func (c *clusterTestTypes) GetScale(ctx context.Context, clusterTestTypeName str } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *clusterTestTypes) UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { +func (c *clusterTestTypes) UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale, opts metav1.UpdateOptions) (result *autoscaling.Scale, err error) { result = &autoscaling.Scale{} err = c.client.Put(). Resource("clustertesttypes"). Name(clusterTestTypeName). SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). Body(scale). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/code-generator/_examples/HyphenGroup/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go b/staging/src/k8s.io/code-generator/_examples/HyphenGroup/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go index 0d4cf547d86..f703e11cb27 100644 --- a/staging/src/k8s.io/code-generator/_examples/HyphenGroup/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go +++ b/staging/src/k8s.io/code-generator/_examples/HyphenGroup/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go @@ -78,7 +78,7 @@ func (c *FakeClusterTestTypes) Watch(ctx context.Context, opts v1.ListOptions) ( } // Create takes the representation of a clusterTestType and creates it. Returns the server's representation of the clusterTestType, and an error, if there is any. -func (c *FakeClusterTestTypes) Create(ctx context.Context, clusterTestType *examplev1.ClusterTestType) (result *examplev1.ClusterTestType, err error) { +func (c *FakeClusterTestTypes) Create(ctx context.Context, clusterTestType *examplev1.ClusterTestType, opts v1.CreateOptions) (result *examplev1.ClusterTestType, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(clustertesttypesResource, clusterTestType), &examplev1.ClusterTestType{}) if obj == nil { @@ -88,7 +88,7 @@ func (c *FakeClusterTestTypes) Create(ctx context.Context, clusterTestType *exam } // Update takes the representation of a clusterTestType and updates it. Returns the server's representation of the clusterTestType, and an error, if there is any. -func (c *FakeClusterTestTypes) Update(ctx context.Context, clusterTestType *examplev1.ClusterTestType) (result *examplev1.ClusterTestType, err error) { +func (c *FakeClusterTestTypes) Update(ctx context.Context, clusterTestType *examplev1.ClusterTestType, opts v1.UpdateOptions) (result *examplev1.ClusterTestType, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(clustertesttypesResource, clusterTestType), &examplev1.ClusterTestType{}) if obj == nil { @@ -99,7 +99,7 @@ func (c *FakeClusterTestTypes) Update(ctx context.Context, clusterTestType *exam // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeClusterTestTypes) UpdateStatus(ctx context.Context, clusterTestType *examplev1.ClusterTestType) (*examplev1.ClusterTestType, error) { +func (c *FakeClusterTestTypes) UpdateStatus(ctx context.Context, clusterTestType *examplev1.ClusterTestType, opts v1.UpdateOptions) (*examplev1.ClusterTestType, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(clustertesttypesResource, "status", clusterTestType), &examplev1.ClusterTestType{}) if obj == nil { @@ -124,7 +124,7 @@ func (c *FakeClusterTestTypes) DeleteCollection(ctx context.Context, options *v1 } // Patch applies the patch and returns the patched clusterTestType. -func (c *FakeClusterTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *examplev1.ClusterTestType, err error) { +func (c *FakeClusterTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *examplev1.ClusterTestType, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(clustertesttypesResource, name, pt, data, subresources...), &examplev1.ClusterTestType{}) if obj == nil { @@ -144,7 +144,7 @@ func (c *FakeClusterTestTypes) GetScale(ctx context.Context, clusterTestTypeName } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeClusterTestTypes) UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { +func (c *FakeClusterTestTypes) UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale, opts v1.UpdateOptions) (result *autoscaling.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(clustertesttypesResource, "scale", scale), &autoscaling.Scale{}) if obj == nil { diff --git a/staging/src/k8s.io/code-generator/_examples/HyphenGroup/clientset/versioned/typed/example/v1/fake/fake_testtype.go b/staging/src/k8s.io/code-generator/_examples/HyphenGroup/clientset/versioned/typed/example/v1/fake/fake_testtype.go index ea8e388aa00..b669a795f53 100644 --- a/staging/src/k8s.io/code-generator/_examples/HyphenGroup/clientset/versioned/typed/example/v1/fake/fake_testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/HyphenGroup/clientset/versioned/typed/example/v1/fake/fake_testtype.go @@ -81,7 +81,7 @@ func (c *FakeTestTypes) Watch(ctx context.Context, opts v1.ListOptions) (watch.I } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Create(ctx context.Context, testType *examplev1.TestType) (result *examplev1.TestType, err error) { +func (c *FakeTestTypes) Create(ctx context.Context, testType *examplev1.TestType, opts v1.CreateOptions) (result *examplev1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(testtypesResource, c.ns, testType), &examplev1.TestType{}) @@ -92,7 +92,7 @@ func (c *FakeTestTypes) Create(ctx context.Context, testType *examplev1.TestType } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Update(ctx context.Context, testType *examplev1.TestType) (result *examplev1.TestType, err error) { +func (c *FakeTestTypes) Update(ctx context.Context, testType *examplev1.TestType, opts v1.UpdateOptions) (result *examplev1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(testtypesResource, c.ns, testType), &examplev1.TestType{}) @@ -104,7 +104,7 @@ func (c *FakeTestTypes) Update(ctx context.Context, testType *examplev1.TestType // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *examplev1.TestType) (*examplev1.TestType, error) { +func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *examplev1.TestType, opts v1.UpdateOptions) (*examplev1.TestType, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(testtypesResource, "status", c.ns, testType), &examplev1.TestType{}) @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(ctx context.Context, options *v1.Delete } // Patch applies the patch and returns the patched testType. -func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *examplev1.TestType, err error) { +func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *examplev1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &examplev1.TestType{}) diff --git a/staging/src/k8s.io/code-generator/_examples/HyphenGroup/clientset/versioned/typed/example/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/HyphenGroup/clientset/versioned/typed/example/v1/testtype.go index c2c04954de7..69a7bc1868d 100644 --- a/staging/src/k8s.io/code-generator/_examples/HyphenGroup/clientset/versioned/typed/example/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/HyphenGroup/clientset/versioned/typed/example/v1/testtype.go @@ -38,15 +38,15 @@ type TestTypesGetter interface { // TestTypeInterface has methods to work with TestType resources. type TestTypeInterface interface { - Create(context.Context, *v1.TestType) (*v1.TestType, error) - Update(context.Context, *v1.TestType) (*v1.TestType, error) - UpdateStatus(context.Context, *v1.TestType) (*v1.TestType, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.TestType, error) + Create(ctx context.Context, testType *v1.TestType, opts metav1.CreateOptions) (*v1.TestType, error) + Update(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (*v1.TestType, error) + UpdateStatus(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (*v1.TestType, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.TestType, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.TestTypeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TestType, err error) TestTypeExpansion } @@ -110,11 +110,12 @@ func (c *testTypes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.I } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Create(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) Create(ctx context.Context, testType *v1.TestType, opts metav1.CreateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Post(). Namespace(c.ns). Resource("testtypes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *testTypes) Create(ctx context.Context, testType *v1.TestType) (result * } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Update(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) Update(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *testTypes) Update(ctx context.Context, testType *v1.TestType) (result * // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *testTypes) UpdateStatus(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) UpdateStatus(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *testTypes) DeleteCollection(ctx context.Context, options *metav1.Delete } // Patch applies the patch and returns the patched testType. -func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) { +func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Patch(pt). Namespace(c.ns). Resource("testtypes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/clustertesttype.go b/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/clustertesttype.go index c19df6695e5..f8ca73b0a20 100644 --- a/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/clustertesttype.go +++ b/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/clustertesttype.go @@ -39,17 +39,17 @@ type ClusterTestTypesGetter interface { // ClusterTestTypeInterface has methods to work with ClusterTestType resources. type ClusterTestTypeInterface interface { - Create(context.Context, *v1.ClusterTestType) (*v1.ClusterTestType, error) - Update(context.Context, *v1.ClusterTestType) (*v1.ClusterTestType, error) - UpdateStatus(context.Context, *v1.ClusterTestType) (*v1.ClusterTestType, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ClusterTestType, error) + Create(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.CreateOptions) (*v1.ClusterTestType, error) + Update(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.UpdateOptions) (*v1.ClusterTestType, error) + UpdateStatus(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.UpdateOptions) (*v1.ClusterTestType, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterTestType, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterTestTypeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterTestType, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterTestType, err error) GetScale(ctx context.Context, clusterTestTypeName string, options metav1.GetOptions) (*autoscaling.Scale, error) - UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale) (*autoscaling.Scale, error) + UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale, opts metav1.UpdateOptions) (*autoscaling.Scale, error) ClusterTestTypeExpansion } @@ -109,10 +109,11 @@ func (c *clusterTestTypes) Watch(ctx context.Context, opts metav1.ListOptions) ( } // Create takes the representation of a clusterTestType and creates it. Returns the server's representation of the clusterTestType, and an error, if there is any. -func (c *clusterTestTypes) Create(ctx context.Context, clusterTestType *v1.ClusterTestType) (result *v1.ClusterTestType, err error) { +func (c *clusterTestTypes) Create(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.CreateOptions) (result *v1.ClusterTestType, err error) { result = &v1.ClusterTestType{} err = c.client.Post(). Resource("clustertesttypes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterTestType). Do(ctx). Into(result) @@ -120,11 +121,12 @@ func (c *clusterTestTypes) Create(ctx context.Context, clusterTestType *v1.Clust } // Update takes the representation of a clusterTestType and updates it. Returns the server's representation of the clusterTestType, and an error, if there is any. -func (c *clusterTestTypes) Update(ctx context.Context, clusterTestType *v1.ClusterTestType) (result *v1.ClusterTestType, err error) { +func (c *clusterTestTypes) Update(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.UpdateOptions) (result *v1.ClusterTestType, err error) { result = &v1.ClusterTestType{} err = c.client.Put(). Resource("clustertesttypes"). Name(clusterTestType.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterTestType). Do(ctx). Into(result) @@ -133,13 +135,13 @@ func (c *clusterTestTypes) Update(ctx context.Context, clusterTestType *v1.Clust // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *clusterTestTypes) UpdateStatus(ctx context.Context, clusterTestType *v1.ClusterTestType) (result *v1.ClusterTestType, err error) { +func (c *clusterTestTypes) UpdateStatus(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.UpdateOptions) (result *v1.ClusterTestType, err error) { result = &v1.ClusterTestType{} err = c.client.Put(). Resource("clustertesttypes"). Name(clusterTestType.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterTestType). Do(ctx). Into(result) @@ -172,12 +174,13 @@ func (c *clusterTestTypes) DeleteCollection(ctx context.Context, options *metav1 } // Patch applies the patch and returns the patched clusterTestType. -func (c *clusterTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterTestType, err error) { +func (c *clusterTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterTestType, err error) { result = &v1.ClusterTestType{} err = c.client.Patch(pt). Resource("clustertesttypes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) @@ -198,12 +201,13 @@ func (c *clusterTestTypes) GetScale(ctx context.Context, clusterTestTypeName str } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *clusterTestTypes) UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { +func (c *clusterTestTypes) UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale, opts metav1.UpdateOptions) (result *autoscaling.Scale, err error) { result = &autoscaling.Scale{} err = c.client.Put(). Resource("clustertesttypes"). Name(clusterTestTypeName). SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). Body(scale). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go b/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go index d3816474a6a..d10eafe60f3 100644 --- a/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go +++ b/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go @@ -78,7 +78,7 @@ func (c *FakeClusterTestTypes) Watch(ctx context.Context, opts v1.ListOptions) ( } // Create takes the representation of a clusterTestType and creates it. Returns the server's representation of the clusterTestType, and an error, if there is any. -func (c *FakeClusterTestTypes) Create(ctx context.Context, clusterTestType *examplev1.ClusterTestType) (result *examplev1.ClusterTestType, err error) { +func (c *FakeClusterTestTypes) Create(ctx context.Context, clusterTestType *examplev1.ClusterTestType, opts v1.CreateOptions) (result *examplev1.ClusterTestType, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(clustertesttypesResource, clusterTestType), &examplev1.ClusterTestType{}) if obj == nil { @@ -88,7 +88,7 @@ func (c *FakeClusterTestTypes) Create(ctx context.Context, clusterTestType *exam } // Update takes the representation of a clusterTestType and updates it. Returns the server's representation of the clusterTestType, and an error, if there is any. -func (c *FakeClusterTestTypes) Update(ctx context.Context, clusterTestType *examplev1.ClusterTestType) (result *examplev1.ClusterTestType, err error) { +func (c *FakeClusterTestTypes) Update(ctx context.Context, clusterTestType *examplev1.ClusterTestType, opts v1.UpdateOptions) (result *examplev1.ClusterTestType, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(clustertesttypesResource, clusterTestType), &examplev1.ClusterTestType{}) if obj == nil { @@ -99,7 +99,7 @@ func (c *FakeClusterTestTypes) Update(ctx context.Context, clusterTestType *exam // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeClusterTestTypes) UpdateStatus(ctx context.Context, clusterTestType *examplev1.ClusterTestType) (*examplev1.ClusterTestType, error) { +func (c *FakeClusterTestTypes) UpdateStatus(ctx context.Context, clusterTestType *examplev1.ClusterTestType, opts v1.UpdateOptions) (*examplev1.ClusterTestType, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(clustertesttypesResource, "status", clusterTestType), &examplev1.ClusterTestType{}) if obj == nil { @@ -124,7 +124,7 @@ func (c *FakeClusterTestTypes) DeleteCollection(ctx context.Context, options *v1 } // Patch applies the patch and returns the patched clusterTestType. -func (c *FakeClusterTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *examplev1.ClusterTestType, err error) { +func (c *FakeClusterTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *examplev1.ClusterTestType, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(clustertesttypesResource, name, pt, data, subresources...), &examplev1.ClusterTestType{}) if obj == nil { @@ -144,7 +144,7 @@ func (c *FakeClusterTestTypes) GetScale(ctx context.Context, clusterTestTypeName } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeClusterTestTypes) UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { +func (c *FakeClusterTestTypes) UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale, opts v1.UpdateOptions) (result *autoscaling.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(clustertesttypesResource, "scale", scale), &autoscaling.Scale{}) if obj == nil { diff --git a/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_testtype.go b/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_testtype.go index df5045fa10c..6346ee204e1 100644 --- a/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_testtype.go @@ -81,7 +81,7 @@ func (c *FakeTestTypes) Watch(ctx context.Context, opts v1.ListOptions) (watch.I } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Create(ctx context.Context, testType *examplev1.TestType) (result *examplev1.TestType, err error) { +func (c *FakeTestTypes) Create(ctx context.Context, testType *examplev1.TestType, opts v1.CreateOptions) (result *examplev1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(testtypesResource, c.ns, testType), &examplev1.TestType{}) @@ -92,7 +92,7 @@ func (c *FakeTestTypes) Create(ctx context.Context, testType *examplev1.TestType } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Update(ctx context.Context, testType *examplev1.TestType) (result *examplev1.TestType, err error) { +func (c *FakeTestTypes) Update(ctx context.Context, testType *examplev1.TestType, opts v1.UpdateOptions) (result *examplev1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(testtypesResource, c.ns, testType), &examplev1.TestType{}) @@ -104,7 +104,7 @@ func (c *FakeTestTypes) Update(ctx context.Context, testType *examplev1.TestType // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *examplev1.TestType) (*examplev1.TestType, error) { +func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *examplev1.TestType, opts v1.UpdateOptions) (*examplev1.TestType, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(testtypesResource, "status", c.ns, testType), &examplev1.TestType{}) @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(ctx context.Context, options *v1.Delete } // Patch applies the patch and returns the patched testType. -func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *examplev1.TestType, err error) { +func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *examplev1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &examplev1.TestType{}) diff --git a/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/testtype.go index 81be57c0449..530a45e974f 100644 --- a/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/testtype.go @@ -38,15 +38,15 @@ type TestTypesGetter interface { // TestTypeInterface has methods to work with TestType resources. type TestTypeInterface interface { - Create(context.Context, *v1.TestType) (*v1.TestType, error) - Update(context.Context, *v1.TestType) (*v1.TestType, error) - UpdateStatus(context.Context, *v1.TestType) (*v1.TestType, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.TestType, error) + Create(ctx context.Context, testType *v1.TestType, opts metav1.CreateOptions) (*v1.TestType, error) + Update(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (*v1.TestType, error) + UpdateStatus(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (*v1.TestType, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.TestType, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.TestTypeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TestType, err error) TestTypeExpansion } @@ -110,11 +110,12 @@ func (c *testTypes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.I } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Create(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) Create(ctx context.Context, testType *v1.TestType, opts metav1.CreateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Post(). Namespace(c.ns). Resource("testtypes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *testTypes) Create(ctx context.Context, testType *v1.TestType) (result * } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Update(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) Update(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *testTypes) Update(ctx context.Context, testType *v1.TestType) (result * // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *testTypes) UpdateStatus(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) UpdateStatus(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *testTypes) DeleteCollection(ctx context.Context, options *metav1.Delete } // Patch applies the patch and returns the patched testType. -func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) { +func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Patch(pt). Namespace(c.ns). Resource("testtypes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_testtype.go index 2a8f095ea13..86c652af574 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_testtype.go @@ -81,7 +81,7 @@ func (c *FakeTestTypes) Watch(ctx context.Context, opts v1.ListOptions) (watch.I } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Create(ctx context.Context, testType *example.TestType) (result *example.TestType, err error) { +func (c *FakeTestTypes) Create(ctx context.Context, testType *example.TestType, opts v1.CreateOptions) (result *example.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(testtypesResource, c.ns, testType), &example.TestType{}) @@ -92,7 +92,7 @@ func (c *FakeTestTypes) Create(ctx context.Context, testType *example.TestType) } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Update(ctx context.Context, testType *example.TestType) (result *example.TestType, err error) { +func (c *FakeTestTypes) Update(ctx context.Context, testType *example.TestType, opts v1.UpdateOptions) (result *example.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(testtypesResource, c.ns, testType), &example.TestType{}) @@ -104,7 +104,7 @@ func (c *FakeTestTypes) Update(ctx context.Context, testType *example.TestType) // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *example.TestType) (*example.TestType, error) { +func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *example.TestType, opts v1.UpdateOptions) (*example.TestType, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(testtypesResource, "status", c.ns, testType), &example.TestType{}) @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(ctx context.Context, options *v1.Delete } // Patch applies the patch and returns the patched testType. -func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *example.TestType, err error) { +func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *example.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &example.TestType{}) diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/testtype.go index 424fc4656a5..3c4bf279eb9 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/testtype.go @@ -38,15 +38,15 @@ type TestTypesGetter interface { // TestTypeInterface has methods to work with TestType resources. type TestTypeInterface interface { - Create(context.Context, *example.TestType) (*example.TestType, error) - Update(context.Context, *example.TestType) (*example.TestType, error) - UpdateStatus(context.Context, *example.TestType) (*example.TestType, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*example.TestType, error) + Create(ctx context.Context, testType *example.TestType, opts v1.CreateOptions) (*example.TestType, error) + Update(ctx context.Context, testType *example.TestType, opts v1.UpdateOptions) (*example.TestType, error) + UpdateStatus(ctx context.Context, testType *example.TestType, opts v1.UpdateOptions) (*example.TestType, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*example.TestType, error) List(ctx context.Context, opts v1.ListOptions) (*example.TestTypeList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *example.TestType, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *example.TestType, err error) TestTypeExpansion } @@ -110,11 +110,12 @@ func (c *testTypes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inter } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Create(ctx context.Context, testType *example.TestType) (result *example.TestType, err error) { +func (c *testTypes) Create(ctx context.Context, testType *example.TestType, opts v1.CreateOptions) (result *example.TestType, err error) { result = &example.TestType{} err = c.client.Post(). Namespace(c.ns). Resource("testtypes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *testTypes) Create(ctx context.Context, testType *example.TestType) (res } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Update(ctx context.Context, testType *example.TestType) (result *example.TestType, err error) { +func (c *testTypes) Update(ctx context.Context, testType *example.TestType, opts v1.UpdateOptions) (result *example.TestType, err error) { result = &example.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *testTypes) Update(ctx context.Context, testType *example.TestType) (res // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *testTypes) UpdateStatus(ctx context.Context, testType *example.TestType) (result *example.TestType, err error) { +func (c *testTypes) UpdateStatus(ctx context.Context, testType *example.TestType, opts v1.UpdateOptions) (result *example.TestType, err error) { result = &example.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *testTypes) DeleteCollection(ctx context.Context, options *v1.DeleteOpti } // Patch applies the patch and returns the patched testType. -func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *example.TestType, err error) { +func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *example.TestType, err error) { result = &example.TestType{} err = c.client.Patch(pt). Namespace(c.ns). Resource("testtypes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_testtype.go index 566a831e1e3..7595b6478a9 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_testtype.go @@ -77,7 +77,7 @@ func (c *FakeTestTypes) Watch(ctx context.Context, opts v1.ListOptions) (watch.I } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Create(ctx context.Context, testType *example2.TestType) (result *example2.TestType, err error) { +func (c *FakeTestTypes) Create(ctx context.Context, testType *example2.TestType, opts v1.CreateOptions) (result *example2.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(testtypesResource, testType), &example2.TestType{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeTestTypes) Create(ctx context.Context, testType *example2.TestType) } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Update(ctx context.Context, testType *example2.TestType) (result *example2.TestType, err error) { +func (c *FakeTestTypes) Update(ctx context.Context, testType *example2.TestType, opts v1.UpdateOptions) (result *example2.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(testtypesResource, testType), &example2.TestType{}) if obj == nil { @@ -98,7 +98,7 @@ func (c *FakeTestTypes) Update(ctx context.Context, testType *example2.TestType) // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *example2.TestType) (*example2.TestType, error) { +func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *example2.TestType, opts v1.UpdateOptions) (*example2.TestType, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(testtypesResource, "status", testType), &example2.TestType{}) if obj == nil { @@ -123,7 +123,7 @@ func (c *FakeTestTypes) DeleteCollection(ctx context.Context, options *v1.Delete } // Patch applies the patch and returns the patched testType. -func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *example2.TestType, err error) { +func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *example2.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(testtypesResource, name, pt, data, subresources...), &example2.TestType{}) if obj == nil { diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/testtype.go index e9f8c444417..f29f593db2c 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/testtype.go @@ -38,15 +38,15 @@ type TestTypesGetter interface { // TestTypeInterface has methods to work with TestType resources. type TestTypeInterface interface { - Create(context.Context, *example2.TestType) (*example2.TestType, error) - Update(context.Context, *example2.TestType) (*example2.TestType, error) - UpdateStatus(context.Context, *example2.TestType) (*example2.TestType, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*example2.TestType, error) + Create(ctx context.Context, testType *example2.TestType, opts v1.CreateOptions) (*example2.TestType, error) + Update(ctx context.Context, testType *example2.TestType, opts v1.UpdateOptions) (*example2.TestType, error) + UpdateStatus(ctx context.Context, testType *example2.TestType, opts v1.UpdateOptions) (*example2.TestType, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*example2.TestType, error) List(ctx context.Context, opts v1.ListOptions) (*example2.TestTypeList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *example2.TestType, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *example2.TestType, err error) TestTypeExpansion } @@ -105,10 +105,11 @@ func (c *testTypes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inter } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Create(ctx context.Context, testType *example2.TestType) (result *example2.TestType, err error) { +func (c *testTypes) Create(ctx context.Context, testType *example2.TestType, opts v1.CreateOptions) (result *example2.TestType, err error) { result = &example2.TestType{} err = c.client.Post(). Resource("testtypes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -116,11 +117,12 @@ func (c *testTypes) Create(ctx context.Context, testType *example2.TestType) (re } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Update(ctx context.Context, testType *example2.TestType) (result *example2.TestType, err error) { +func (c *testTypes) Update(ctx context.Context, testType *example2.TestType, opts v1.UpdateOptions) (result *example2.TestType, err error) { result = &example2.TestType{} err = c.client.Put(). Resource("testtypes"). Name(testType.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -129,13 +131,13 @@ func (c *testTypes) Update(ctx context.Context, testType *example2.TestType) (re // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *testTypes) UpdateStatus(ctx context.Context, testType *example2.TestType) (result *example2.TestType, err error) { +func (c *testTypes) UpdateStatus(ctx context.Context, testType *example2.TestType, opts v1.UpdateOptions) (result *example2.TestType, err error) { result = &example2.TestType{} err = c.client.Put(). Resource("testtypes"). Name(testType.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -168,12 +170,13 @@ func (c *testTypes) DeleteCollection(ctx context.Context, options *v1.DeleteOpti } // Patch applies the patch and returns the patched testType. -func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *example2.TestType, err error) { +func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *example2.TestType, err error) { result = &example2.TestType{} err = c.client.Patch(pt). Resource("testtypes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/fake/fake_testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/fake/fake_testtype.go index 50725f7ec52..a26b42d1e53 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/fake/fake_testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/fake/fake_testtype.go @@ -81,7 +81,7 @@ func (c *FakeTestTypes) Watch(ctx context.Context, opts v1.ListOptions) (watch.I } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Create(ctx context.Context, testType *example3io.TestType) (result *example3io.TestType, err error) { +func (c *FakeTestTypes) Create(ctx context.Context, testType *example3io.TestType, opts v1.CreateOptions) (result *example3io.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(testtypesResource, c.ns, testType), &example3io.TestType{}) @@ -92,7 +92,7 @@ func (c *FakeTestTypes) Create(ctx context.Context, testType *example3io.TestTyp } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Update(ctx context.Context, testType *example3io.TestType) (result *example3io.TestType, err error) { +func (c *FakeTestTypes) Update(ctx context.Context, testType *example3io.TestType, opts v1.UpdateOptions) (result *example3io.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(testtypesResource, c.ns, testType), &example3io.TestType{}) @@ -104,7 +104,7 @@ func (c *FakeTestTypes) Update(ctx context.Context, testType *example3io.TestTyp // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *example3io.TestType) (*example3io.TestType, error) { +func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *example3io.TestType, opts v1.UpdateOptions) (*example3io.TestType, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(testtypesResource, "status", c.ns, testType), &example3io.TestType{}) @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(ctx context.Context, options *v1.Delete } // Patch applies the patch and returns the patched testType. -func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *example3io.TestType, err error) { +func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *example3io.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &example3io.TestType{}) diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/testtype.go index eb886168054..d2e51d5e2cd 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/testtype.go @@ -38,15 +38,15 @@ type TestTypesGetter interface { // TestTypeInterface has methods to work with TestType resources. type TestTypeInterface interface { - Create(context.Context, *example3io.TestType) (*example3io.TestType, error) - Update(context.Context, *example3io.TestType) (*example3io.TestType, error) - UpdateStatus(context.Context, *example3io.TestType) (*example3io.TestType, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*example3io.TestType, error) + Create(ctx context.Context, testType *example3io.TestType, opts v1.CreateOptions) (*example3io.TestType, error) + Update(ctx context.Context, testType *example3io.TestType, opts v1.UpdateOptions) (*example3io.TestType, error) + UpdateStatus(ctx context.Context, testType *example3io.TestType, opts v1.UpdateOptions) (*example3io.TestType, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*example3io.TestType, error) List(ctx context.Context, opts v1.ListOptions) (*example3io.TestTypeList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *example3io.TestType, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *example3io.TestType, err error) TestTypeExpansion } @@ -110,11 +110,12 @@ func (c *testTypes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inter } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Create(ctx context.Context, testType *example3io.TestType) (result *example3io.TestType, err error) { +func (c *testTypes) Create(ctx context.Context, testType *example3io.TestType, opts v1.CreateOptions) (result *example3io.TestType, err error) { result = &example3io.TestType{} err = c.client.Post(). Namespace(c.ns). Resource("testtypes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *testTypes) Create(ctx context.Context, testType *example3io.TestType) ( } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Update(ctx context.Context, testType *example3io.TestType) (result *example3io.TestType, err error) { +func (c *testTypes) Update(ctx context.Context, testType *example3io.TestType, opts v1.UpdateOptions) (result *example3io.TestType, err error) { result = &example3io.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *testTypes) Update(ctx context.Context, testType *example3io.TestType) ( // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *testTypes) UpdateStatus(ctx context.Context, testType *example3io.TestType) (result *example3io.TestType, err error) { +func (c *testTypes) UpdateStatus(ctx context.Context, testType *example3io.TestType, opts v1.UpdateOptions) (result *example3io.TestType, err error) { result = &example3io.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *testTypes) DeleteCollection(ctx context.Context, options *v1.DeleteOpti } // Patch applies the patch and returns the patched testType. -func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *example3io.TestType, err error) { +func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *example3io.TestType, err error) { result = &example3io.TestType{} err = c.client.Patch(pt). Namespace(c.ns). Resource("testtypes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_testtype.go index 5f8cc72c780..d3ef16e5a90 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_testtype.go @@ -81,7 +81,7 @@ func (c *FakeTestTypes) Watch(ctx context.Context, opts v1.ListOptions) (watch.I } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Create(ctx context.Context, testType *examplev1.TestType) (result *examplev1.TestType, err error) { +func (c *FakeTestTypes) Create(ctx context.Context, testType *examplev1.TestType, opts v1.CreateOptions) (result *examplev1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(testtypesResource, c.ns, testType), &examplev1.TestType{}) @@ -92,7 +92,7 @@ func (c *FakeTestTypes) Create(ctx context.Context, testType *examplev1.TestType } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Update(ctx context.Context, testType *examplev1.TestType) (result *examplev1.TestType, err error) { +func (c *FakeTestTypes) Update(ctx context.Context, testType *examplev1.TestType, opts v1.UpdateOptions) (result *examplev1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(testtypesResource, c.ns, testType), &examplev1.TestType{}) @@ -104,7 +104,7 @@ func (c *FakeTestTypes) Update(ctx context.Context, testType *examplev1.TestType // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *examplev1.TestType) (*examplev1.TestType, error) { +func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *examplev1.TestType, opts v1.UpdateOptions) (*examplev1.TestType, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(testtypesResource, "status", c.ns, testType), &examplev1.TestType{}) @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(ctx context.Context, options *v1.Delete } // Patch applies the patch and returns the patched testType. -func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *examplev1.TestType, err error) { +func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *examplev1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &examplev1.TestType{}) diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/testtype.go index 670432b5514..2d42149182f 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/testtype.go @@ -38,15 +38,15 @@ type TestTypesGetter interface { // TestTypeInterface has methods to work with TestType resources. type TestTypeInterface interface { - Create(context.Context, *v1.TestType) (*v1.TestType, error) - Update(context.Context, *v1.TestType) (*v1.TestType, error) - UpdateStatus(context.Context, *v1.TestType) (*v1.TestType, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.TestType, error) + Create(ctx context.Context, testType *v1.TestType, opts metav1.CreateOptions) (*v1.TestType, error) + Update(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (*v1.TestType, error) + UpdateStatus(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (*v1.TestType, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.TestType, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.TestTypeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TestType, err error) TestTypeExpansion } @@ -110,11 +110,12 @@ func (c *testTypes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.I } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Create(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) Create(ctx context.Context, testType *v1.TestType, opts metav1.CreateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Post(). Namespace(c.ns). Resource("testtypes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *testTypes) Create(ctx context.Context, testType *v1.TestType) (result * } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Update(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) Update(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *testTypes) Update(ctx context.Context, testType *v1.TestType) (result * // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *testTypes) UpdateStatus(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) UpdateStatus(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *testTypes) DeleteCollection(ctx context.Context, options *metav1.Delete } // Patch applies the patch and returns the patched testType. -func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) { +func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Patch(pt). Namespace(c.ns). Resource("testtypes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_testtype.go index 95d8f5a6aad..7d64e588697 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_testtype.go @@ -81,7 +81,7 @@ func (c *FakeTestTypes) Watch(ctx context.Context, opts v1.ListOptions) (watch.I } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Create(ctx context.Context, testType *example2v1.TestType) (result *example2v1.TestType, err error) { +func (c *FakeTestTypes) Create(ctx context.Context, testType *example2v1.TestType, opts v1.CreateOptions) (result *example2v1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(testtypesResource, c.ns, testType), &example2v1.TestType{}) @@ -92,7 +92,7 @@ func (c *FakeTestTypes) Create(ctx context.Context, testType *example2v1.TestTyp } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Update(ctx context.Context, testType *example2v1.TestType) (result *example2v1.TestType, err error) { +func (c *FakeTestTypes) Update(ctx context.Context, testType *example2v1.TestType, opts v1.UpdateOptions) (result *example2v1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(testtypesResource, c.ns, testType), &example2v1.TestType{}) @@ -104,7 +104,7 @@ func (c *FakeTestTypes) Update(ctx context.Context, testType *example2v1.TestTyp // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *example2v1.TestType) (*example2v1.TestType, error) { +func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *example2v1.TestType, opts v1.UpdateOptions) (*example2v1.TestType, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(testtypesResource, "status", c.ns, testType), &example2v1.TestType{}) @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(ctx context.Context, options *v1.Delete } // Patch applies the patch and returns the patched testType. -func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *example2v1.TestType, err error) { +func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *example2v1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &example2v1.TestType{}) diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/testtype.go index 99722d307f4..ea5650dbbe1 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/testtype.go @@ -38,15 +38,15 @@ type TestTypesGetter interface { // TestTypeInterface has methods to work with TestType resources. type TestTypeInterface interface { - Create(context.Context, *v1.TestType) (*v1.TestType, error) - Update(context.Context, *v1.TestType) (*v1.TestType, error) - UpdateStatus(context.Context, *v1.TestType) (*v1.TestType, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.TestType, error) + Create(ctx context.Context, testType *v1.TestType, opts metav1.CreateOptions) (*v1.TestType, error) + Update(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (*v1.TestType, error) + UpdateStatus(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (*v1.TestType, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.TestType, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.TestTypeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TestType, err error) TestTypeExpansion } @@ -110,11 +110,12 @@ func (c *testTypes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.I } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Create(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) Create(ctx context.Context, testType *v1.TestType, opts metav1.CreateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Post(). Namespace(c.ns). Resource("testtypes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *testTypes) Create(ctx context.Context, testType *v1.TestType) (result * } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Update(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) Update(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *testTypes) Update(ctx context.Context, testType *v1.TestType) (result * // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *testTypes) UpdateStatus(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) UpdateStatus(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *testTypes) DeleteCollection(ctx context.Context, options *metav1.Delete } // Patch applies the patch and returns the patched testType. -func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) { +func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Patch(pt). Namespace(c.ns). Resource("testtypes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/fake/fake_testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/fake/fake_testtype.go index 6b02fabf592..18689dbd2b2 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/fake/fake_testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/fake/fake_testtype.go @@ -81,7 +81,7 @@ func (c *FakeTestTypes) Watch(ctx context.Context, opts v1.ListOptions) (watch.I } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Create(ctx context.Context, testType *example3iov1.TestType) (result *example3iov1.TestType, err error) { +func (c *FakeTestTypes) Create(ctx context.Context, testType *example3iov1.TestType, opts v1.CreateOptions) (result *example3iov1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(testtypesResource, c.ns, testType), &example3iov1.TestType{}) @@ -92,7 +92,7 @@ func (c *FakeTestTypes) Create(ctx context.Context, testType *example3iov1.TestT } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Update(ctx context.Context, testType *example3iov1.TestType) (result *example3iov1.TestType, err error) { +func (c *FakeTestTypes) Update(ctx context.Context, testType *example3iov1.TestType, opts v1.UpdateOptions) (result *example3iov1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(testtypesResource, c.ns, testType), &example3iov1.TestType{}) @@ -104,7 +104,7 @@ func (c *FakeTestTypes) Update(ctx context.Context, testType *example3iov1.TestT // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *example3iov1.TestType) (*example3iov1.TestType, error) { +func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *example3iov1.TestType, opts v1.UpdateOptions) (*example3iov1.TestType, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(testtypesResource, "status", c.ns, testType), &example3iov1.TestType{}) @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(ctx context.Context, options *v1.Delete } // Patch applies the patch and returns the patched testType. -func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *example3iov1.TestType, err error) { +func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *example3iov1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &example3iov1.TestType{}) diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/testtype.go index 794825c35f9..9ac449a514b 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/testtype.go @@ -38,15 +38,15 @@ type TestTypesGetter interface { // TestTypeInterface has methods to work with TestType resources. type TestTypeInterface interface { - Create(context.Context, *v1.TestType) (*v1.TestType, error) - Update(context.Context, *v1.TestType) (*v1.TestType, error) - UpdateStatus(context.Context, *v1.TestType) (*v1.TestType, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.TestType, error) + Create(ctx context.Context, testType *v1.TestType, opts metav1.CreateOptions) (*v1.TestType, error) + Update(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (*v1.TestType, error) + UpdateStatus(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (*v1.TestType, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.TestType, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.TestTypeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TestType, err error) TestTypeExpansion } @@ -110,11 +110,12 @@ func (c *testTypes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.I } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Create(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) Create(ctx context.Context, testType *v1.TestType, opts metav1.CreateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Post(). Namespace(c.ns). Resource("testtypes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *testTypes) Create(ctx context.Context, testType *v1.TestType) (result * } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Update(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) Update(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *testTypes) Update(ctx context.Context, testType *v1.TestType) (result * // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *testTypes) UpdateStatus(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) UpdateStatus(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *testTypes) DeleteCollection(ctx context.Context, options *metav1.Delete } // Patch applies the patch and returns the patched testType. -func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) { +func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Patch(pt). Namespace(c.ns). Resource("testtypes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/clustertesttype.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/clustertesttype.go index 43ef4755e39..1fe6bfd99c4 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/clustertesttype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/clustertesttype.go @@ -39,17 +39,17 @@ type ClusterTestTypesGetter interface { // ClusterTestTypeInterface has methods to work with ClusterTestType resources. type ClusterTestTypeInterface interface { - Create(context.Context, *v1.ClusterTestType) (*v1.ClusterTestType, error) - Update(context.Context, *v1.ClusterTestType) (*v1.ClusterTestType, error) - UpdateStatus(context.Context, *v1.ClusterTestType) (*v1.ClusterTestType, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ClusterTestType, error) + Create(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.CreateOptions) (*v1.ClusterTestType, error) + Update(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.UpdateOptions) (*v1.ClusterTestType, error) + UpdateStatus(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.UpdateOptions) (*v1.ClusterTestType, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterTestType, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterTestTypeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterTestType, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterTestType, err error) GetScale(ctx context.Context, clusterTestTypeName string, options metav1.GetOptions) (*autoscaling.Scale, error) - UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale) (*autoscaling.Scale, error) + UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale, opts metav1.UpdateOptions) (*autoscaling.Scale, error) ClusterTestTypeExpansion } @@ -109,10 +109,11 @@ func (c *clusterTestTypes) Watch(ctx context.Context, opts metav1.ListOptions) ( } // Create takes the representation of a clusterTestType and creates it. Returns the server's representation of the clusterTestType, and an error, if there is any. -func (c *clusterTestTypes) Create(ctx context.Context, clusterTestType *v1.ClusterTestType) (result *v1.ClusterTestType, err error) { +func (c *clusterTestTypes) Create(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.CreateOptions) (result *v1.ClusterTestType, err error) { result = &v1.ClusterTestType{} err = c.client.Post(). Resource("clustertesttypes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterTestType). Do(ctx). Into(result) @@ -120,11 +121,12 @@ func (c *clusterTestTypes) Create(ctx context.Context, clusterTestType *v1.Clust } // Update takes the representation of a clusterTestType and updates it. Returns the server's representation of the clusterTestType, and an error, if there is any. -func (c *clusterTestTypes) Update(ctx context.Context, clusterTestType *v1.ClusterTestType) (result *v1.ClusterTestType, err error) { +func (c *clusterTestTypes) Update(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.UpdateOptions) (result *v1.ClusterTestType, err error) { result = &v1.ClusterTestType{} err = c.client.Put(). Resource("clustertesttypes"). Name(clusterTestType.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterTestType). Do(ctx). Into(result) @@ -133,13 +135,13 @@ func (c *clusterTestTypes) Update(ctx context.Context, clusterTestType *v1.Clust // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *clusterTestTypes) UpdateStatus(ctx context.Context, clusterTestType *v1.ClusterTestType) (result *v1.ClusterTestType, err error) { +func (c *clusterTestTypes) UpdateStatus(ctx context.Context, clusterTestType *v1.ClusterTestType, opts metav1.UpdateOptions) (result *v1.ClusterTestType, err error) { result = &v1.ClusterTestType{} err = c.client.Put(). Resource("clustertesttypes"). Name(clusterTestType.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterTestType). Do(ctx). Into(result) @@ -172,12 +174,13 @@ func (c *clusterTestTypes) DeleteCollection(ctx context.Context, options *metav1 } // Patch applies the patch and returns the patched clusterTestType. -func (c *clusterTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterTestType, err error) { +func (c *clusterTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterTestType, err error) { result = &v1.ClusterTestType{} err = c.client.Patch(pt). Resource("clustertesttypes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) @@ -198,12 +201,13 @@ func (c *clusterTestTypes) GetScale(ctx context.Context, clusterTestTypeName str } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *clusterTestTypes) UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { +func (c *clusterTestTypes) UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale, opts metav1.UpdateOptions) (result *autoscaling.Scale, err error) { result = &autoscaling.Scale{} err = c.client.Put(). Resource("clustertesttypes"). Name(clusterTestTypeName). SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). Body(scale). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go index d15c5c1654c..5d01606ee2b 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go @@ -78,7 +78,7 @@ func (c *FakeClusterTestTypes) Watch(ctx context.Context, opts v1.ListOptions) ( } // Create takes the representation of a clusterTestType and creates it. Returns the server's representation of the clusterTestType, and an error, if there is any. -func (c *FakeClusterTestTypes) Create(ctx context.Context, clusterTestType *examplev1.ClusterTestType) (result *examplev1.ClusterTestType, err error) { +func (c *FakeClusterTestTypes) Create(ctx context.Context, clusterTestType *examplev1.ClusterTestType, opts v1.CreateOptions) (result *examplev1.ClusterTestType, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(clustertesttypesResource, clusterTestType), &examplev1.ClusterTestType{}) if obj == nil { @@ -88,7 +88,7 @@ func (c *FakeClusterTestTypes) Create(ctx context.Context, clusterTestType *exam } // Update takes the representation of a clusterTestType and updates it. Returns the server's representation of the clusterTestType, and an error, if there is any. -func (c *FakeClusterTestTypes) Update(ctx context.Context, clusterTestType *examplev1.ClusterTestType) (result *examplev1.ClusterTestType, err error) { +func (c *FakeClusterTestTypes) Update(ctx context.Context, clusterTestType *examplev1.ClusterTestType, opts v1.UpdateOptions) (result *examplev1.ClusterTestType, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(clustertesttypesResource, clusterTestType), &examplev1.ClusterTestType{}) if obj == nil { @@ -99,7 +99,7 @@ func (c *FakeClusterTestTypes) Update(ctx context.Context, clusterTestType *exam // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeClusterTestTypes) UpdateStatus(ctx context.Context, clusterTestType *examplev1.ClusterTestType) (*examplev1.ClusterTestType, error) { +func (c *FakeClusterTestTypes) UpdateStatus(ctx context.Context, clusterTestType *examplev1.ClusterTestType, opts v1.UpdateOptions) (*examplev1.ClusterTestType, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(clustertesttypesResource, "status", clusterTestType), &examplev1.ClusterTestType{}) if obj == nil { @@ -124,7 +124,7 @@ func (c *FakeClusterTestTypes) DeleteCollection(ctx context.Context, options *v1 } // Patch applies the patch and returns the patched clusterTestType. -func (c *FakeClusterTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *examplev1.ClusterTestType, err error) { +func (c *FakeClusterTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *examplev1.ClusterTestType, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(clustertesttypesResource, name, pt, data, subresources...), &examplev1.ClusterTestType{}) if obj == nil { @@ -144,7 +144,7 @@ func (c *FakeClusterTestTypes) GetScale(ctx context.Context, clusterTestTypeName } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeClusterTestTypes) UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { +func (c *FakeClusterTestTypes) UpdateScale(ctx context.Context, clusterTestTypeName string, scale *autoscaling.Scale, opts v1.UpdateOptions) (result *autoscaling.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(clustertesttypesResource, "scale", scale), &autoscaling.Scale{}) if obj == nil { diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_testtype.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_testtype.go index a3dbc0640b1..dfbc8caf564 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_testtype.go @@ -81,7 +81,7 @@ func (c *FakeTestTypes) Watch(ctx context.Context, opts v1.ListOptions) (watch.I } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Create(ctx context.Context, testType *examplev1.TestType) (result *examplev1.TestType, err error) { +func (c *FakeTestTypes) Create(ctx context.Context, testType *examplev1.TestType, opts v1.CreateOptions) (result *examplev1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(testtypesResource, c.ns, testType), &examplev1.TestType{}) @@ -92,7 +92,7 @@ func (c *FakeTestTypes) Create(ctx context.Context, testType *examplev1.TestType } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Update(ctx context.Context, testType *examplev1.TestType) (result *examplev1.TestType, err error) { +func (c *FakeTestTypes) Update(ctx context.Context, testType *examplev1.TestType, opts v1.UpdateOptions) (result *examplev1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(testtypesResource, c.ns, testType), &examplev1.TestType{}) @@ -104,7 +104,7 @@ func (c *FakeTestTypes) Update(ctx context.Context, testType *examplev1.TestType // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *examplev1.TestType) (*examplev1.TestType, error) { +func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *examplev1.TestType, opts v1.UpdateOptions) (*examplev1.TestType, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(testtypesResource, "status", c.ns, testType), &examplev1.TestType{}) @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(ctx context.Context, options *v1.Delete } // Patch applies the patch and returns the patched testType. -func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *examplev1.TestType, err error) { +func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *examplev1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &examplev1.TestType{}) diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/testtype.go index 3023b2162c6..779dd5767d7 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/testtype.go @@ -38,15 +38,15 @@ type TestTypesGetter interface { // TestTypeInterface has methods to work with TestType resources. type TestTypeInterface interface { - Create(context.Context, *v1.TestType) (*v1.TestType, error) - Update(context.Context, *v1.TestType) (*v1.TestType, error) - UpdateStatus(context.Context, *v1.TestType) (*v1.TestType, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.TestType, error) + Create(ctx context.Context, testType *v1.TestType, opts metav1.CreateOptions) (*v1.TestType, error) + Update(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (*v1.TestType, error) + UpdateStatus(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (*v1.TestType, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.TestType, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.TestTypeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TestType, err error) TestTypeExpansion } @@ -110,11 +110,12 @@ func (c *testTypes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.I } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Create(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) Create(ctx context.Context, testType *v1.TestType, opts metav1.CreateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Post(). Namespace(c.ns). Resource("testtypes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *testTypes) Create(ctx context.Context, testType *v1.TestType) (result * } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Update(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) Update(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *testTypes) Update(ctx context.Context, testType *v1.TestType) (result * // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *testTypes) UpdateStatus(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) UpdateStatus(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *testTypes) DeleteCollection(ctx context.Context, options *metav1.Delete } // Patch applies the patch and returns the patched testType. -func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) { +func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Patch(pt). Namespace(c.ns). Resource("testtypes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_testtype.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_testtype.go index 096ea06aaf2..ea843c6bfda 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_testtype.go @@ -81,7 +81,7 @@ func (c *FakeTestTypes) Watch(ctx context.Context, opts v1.ListOptions) (watch.I } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Create(ctx context.Context, testType *example2v1.TestType) (result *example2v1.TestType, err error) { +func (c *FakeTestTypes) Create(ctx context.Context, testType *example2v1.TestType, opts v1.CreateOptions) (result *example2v1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(testtypesResource, c.ns, testType), &example2v1.TestType{}) @@ -92,7 +92,7 @@ func (c *FakeTestTypes) Create(ctx context.Context, testType *example2v1.TestTyp } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Update(ctx context.Context, testType *example2v1.TestType) (result *example2v1.TestType, err error) { +func (c *FakeTestTypes) Update(ctx context.Context, testType *example2v1.TestType, opts v1.UpdateOptions) (result *example2v1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(testtypesResource, c.ns, testType), &example2v1.TestType{}) @@ -104,7 +104,7 @@ func (c *FakeTestTypes) Update(ctx context.Context, testType *example2v1.TestTyp // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *example2v1.TestType) (*example2v1.TestType, error) { +func (c *FakeTestTypes) UpdateStatus(ctx context.Context, testType *example2v1.TestType, opts v1.UpdateOptions) (*example2v1.TestType, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(testtypesResource, "status", c.ns, testType), &example2v1.TestType{}) @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(ctx context.Context, options *v1.Delete } // Patch applies the patch and returns the patched testType. -func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *example2v1.TestType, err error) { +func (c *FakeTestTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *example2v1.TestType, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &example2v1.TestType{}) diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/testtype.go index 1327f720e1f..3f5af5f97ce 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/testtype.go @@ -38,15 +38,15 @@ type TestTypesGetter interface { // TestTypeInterface has methods to work with TestType resources. type TestTypeInterface interface { - Create(context.Context, *v1.TestType) (*v1.TestType, error) - Update(context.Context, *v1.TestType) (*v1.TestType, error) - UpdateStatus(context.Context, *v1.TestType) (*v1.TestType, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.TestType, error) + Create(ctx context.Context, testType *v1.TestType, opts metav1.CreateOptions) (*v1.TestType, error) + Update(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (*v1.TestType, error) + UpdateStatus(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (*v1.TestType, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.TestType, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.TestTypeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TestType, err error) TestTypeExpansion } @@ -110,11 +110,12 @@ func (c *testTypes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.I } // Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Create(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) Create(ctx context.Context, testType *v1.TestType, opts metav1.CreateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Post(). Namespace(c.ns). Resource("testtypes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *testTypes) Create(ctx context.Context, testType *v1.TestType) (result * } // Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Update(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) Update(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *testTypes) Update(ctx context.Context, testType *v1.TestType) (result * // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *testTypes) UpdateStatus(ctx context.Context, testType *v1.TestType) (result *v1.TestType, err error) { +func (c *testTypes) UpdateStatus(ctx context.Context, testType *v1.TestType, opts metav1.UpdateOptions) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Put(). Namespace(c.ns). Resource("testtypes"). Name(testType.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(testType). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *testTypes) DeleteCollection(ctx context.Context, options *metav1.Delete } // Patch applies the patch and returns the patched testType. -func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) { +func (c *testTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TestType, err error) { result = &v1.TestType{} err = c.client.Patch(pt). Namespace(c.ns). Resource("testtypes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go index 24867b37456..dfaaee3ed0e 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go @@ -38,15 +38,15 @@ type APIServicesGetter interface { // APIServiceInterface has methods to work with APIService resources. type APIServiceInterface interface { - Create(context.Context, *v1.APIService) (*v1.APIService, error) - Update(context.Context, *v1.APIService) (*v1.APIService, error) - UpdateStatus(context.Context, *v1.APIService) (*v1.APIService, error) - Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.APIService, error) + Create(ctx context.Context, aPIService *v1.APIService, opts metav1.CreateOptions) (*v1.APIService, error) + Update(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (*v1.APIService, error) + UpdateStatus(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (*v1.APIService, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.APIService, error) List(ctx context.Context, opts metav1.ListOptions) (*v1.APIServiceList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.APIService, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.APIService, err error) APIServiceExpansion } @@ -105,10 +105,11 @@ func (c *aPIServices) Watch(ctx context.Context, opts metav1.ListOptions) (watch } // Create takes the representation of a aPIService and creates it. Returns the server's representation of the aPIService, and an error, if there is any. -func (c *aPIServices) Create(ctx context.Context, aPIService *v1.APIService) (result *v1.APIService, err error) { +func (c *aPIServices) Create(ctx context.Context, aPIService *v1.APIService, opts metav1.CreateOptions) (result *v1.APIService, err error) { result = &v1.APIService{} err = c.client.Post(). Resource("apiservices"). + VersionedParams(&opts, scheme.ParameterCodec). Body(aPIService). Do(ctx). Into(result) @@ -116,11 +117,12 @@ func (c *aPIServices) Create(ctx context.Context, aPIService *v1.APIService) (re } // Update takes the representation of a aPIService and updates it. Returns the server's representation of the aPIService, and an error, if there is any. -func (c *aPIServices) Update(ctx context.Context, aPIService *v1.APIService) (result *v1.APIService, err error) { +func (c *aPIServices) Update(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (result *v1.APIService, err error) { result = &v1.APIService{} err = c.client.Put(). Resource("apiservices"). Name(aPIService.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(aPIService). Do(ctx). Into(result) @@ -129,13 +131,13 @@ func (c *aPIServices) Update(ctx context.Context, aPIService *v1.APIService) (re // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *aPIServices) UpdateStatus(ctx context.Context, aPIService *v1.APIService) (result *v1.APIService, err error) { +func (c *aPIServices) UpdateStatus(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (result *v1.APIService, err error) { result = &v1.APIService{} err = c.client.Put(). Resource("apiservices"). Name(aPIService.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(aPIService). Do(ctx). Into(result) @@ -168,12 +170,13 @@ func (c *aPIServices) DeleteCollection(ctx context.Context, options *metav1.Dele } // Patch applies the patch and returns the patched aPIService. -func (c *aPIServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.APIService, err error) { +func (c *aPIServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.APIService, err error) { result = &v1.APIService{} err = c.client.Patch(pt). Resource("apiservices"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/fake/fake_apiservice.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/fake/fake_apiservice.go index 8c80481809e..4d5001c6ccc 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/fake/fake_apiservice.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/fake/fake_apiservice.go @@ -77,7 +77,7 @@ func (c *FakeAPIServices) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a aPIService and creates it. Returns the server's representation of the aPIService, and an error, if there is any. -func (c *FakeAPIServices) Create(ctx context.Context, aPIService *apiregistrationv1.APIService) (result *apiregistrationv1.APIService, err error) { +func (c *FakeAPIServices) Create(ctx context.Context, aPIService *apiregistrationv1.APIService, opts v1.CreateOptions) (result *apiregistrationv1.APIService, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(apiservicesResource, aPIService), &apiregistrationv1.APIService{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeAPIServices) Create(ctx context.Context, aPIService *apiregistratio } // Update takes the representation of a aPIService and updates it. Returns the server's representation of the aPIService, and an error, if there is any. -func (c *FakeAPIServices) Update(ctx context.Context, aPIService *apiregistrationv1.APIService) (result *apiregistrationv1.APIService, err error) { +func (c *FakeAPIServices) Update(ctx context.Context, aPIService *apiregistrationv1.APIService, opts v1.UpdateOptions) (result *apiregistrationv1.APIService, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(apiservicesResource, aPIService), &apiregistrationv1.APIService{}) if obj == nil { @@ -98,7 +98,7 @@ func (c *FakeAPIServices) Update(ctx context.Context, aPIService *apiregistratio // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeAPIServices) UpdateStatus(ctx context.Context, aPIService *apiregistrationv1.APIService) (*apiregistrationv1.APIService, error) { +func (c *FakeAPIServices) UpdateStatus(ctx context.Context, aPIService *apiregistrationv1.APIService, opts v1.UpdateOptions) (*apiregistrationv1.APIService, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(apiservicesResource, "status", aPIService), &apiregistrationv1.APIService{}) if obj == nil { @@ -123,7 +123,7 @@ func (c *FakeAPIServices) DeleteCollection(ctx context.Context, options *v1.Dele } // Patch applies the patch and returns the patched aPIService. -func (c *FakeAPIServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *apiregistrationv1.APIService, err error) { +func (c *FakeAPIServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *apiregistrationv1.APIService, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(apiservicesResource, name, pt, data, subresources...), &apiregistrationv1.APIService{}) if obj == nil { diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiservice.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiservice.go index a4c2967ae81..b39d9b2dc01 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiservice.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiservice.go @@ -38,15 +38,15 @@ type APIServicesGetter interface { // APIServiceInterface has methods to work with APIService resources. type APIServiceInterface interface { - Create(context.Context, *v1beta1.APIService) (*v1beta1.APIService, error) - Update(context.Context, *v1beta1.APIService) (*v1beta1.APIService, error) - UpdateStatus(context.Context, *v1beta1.APIService) (*v1beta1.APIService, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.APIService, error) + Create(ctx context.Context, aPIService *v1beta1.APIService, opts v1.CreateOptions) (*v1beta1.APIService, error) + Update(ctx context.Context, aPIService *v1beta1.APIService, opts v1.UpdateOptions) (*v1beta1.APIService, error) + UpdateStatus(ctx context.Context, aPIService *v1beta1.APIService, opts v1.UpdateOptions) (*v1beta1.APIService, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.APIService, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.APIServiceList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.APIService, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.APIService, err error) APIServiceExpansion } @@ -105,10 +105,11 @@ func (c *aPIServices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Int } // Create takes the representation of a aPIService and creates it. Returns the server's representation of the aPIService, and an error, if there is any. -func (c *aPIServices) Create(ctx context.Context, aPIService *v1beta1.APIService) (result *v1beta1.APIService, err error) { +func (c *aPIServices) Create(ctx context.Context, aPIService *v1beta1.APIService, opts v1.CreateOptions) (result *v1beta1.APIService, err error) { result = &v1beta1.APIService{} err = c.client.Post(). Resource("apiservices"). + VersionedParams(&opts, scheme.ParameterCodec). Body(aPIService). Do(ctx). Into(result) @@ -116,11 +117,12 @@ func (c *aPIServices) Create(ctx context.Context, aPIService *v1beta1.APIService } // Update takes the representation of a aPIService and updates it. Returns the server's representation of the aPIService, and an error, if there is any. -func (c *aPIServices) Update(ctx context.Context, aPIService *v1beta1.APIService) (result *v1beta1.APIService, err error) { +func (c *aPIServices) Update(ctx context.Context, aPIService *v1beta1.APIService, opts v1.UpdateOptions) (result *v1beta1.APIService, err error) { result = &v1beta1.APIService{} err = c.client.Put(). Resource("apiservices"). Name(aPIService.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(aPIService). Do(ctx). Into(result) @@ -129,13 +131,13 @@ func (c *aPIServices) Update(ctx context.Context, aPIService *v1beta1.APIService // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *aPIServices) UpdateStatus(ctx context.Context, aPIService *v1beta1.APIService) (result *v1beta1.APIService, err error) { +func (c *aPIServices) UpdateStatus(ctx context.Context, aPIService *v1beta1.APIService, opts v1.UpdateOptions) (result *v1beta1.APIService, err error) { result = &v1beta1.APIService{} err = c.client.Put(). Resource("apiservices"). Name(aPIService.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(aPIService). Do(ctx). Into(result) @@ -168,12 +170,13 @@ func (c *aPIServices) DeleteCollection(ctx context.Context, options *v1.DeleteOp } // Patch applies the patch and returns the patched aPIService. -func (c *aPIServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.APIService, err error) { +func (c *aPIServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.APIService, err error) { result = &v1beta1.APIService{} err = c.client.Patch(pt). Resource("apiservices"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiservice.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiservice.go index 8db0c1aa1d6..36b59e11aa7 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiservice.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiservice.go @@ -77,7 +77,7 @@ func (c *FakeAPIServices) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a aPIService and creates it. Returns the server's representation of the aPIService, and an error, if there is any. -func (c *FakeAPIServices) Create(ctx context.Context, aPIService *v1beta1.APIService) (result *v1beta1.APIService, err error) { +func (c *FakeAPIServices) Create(ctx context.Context, aPIService *v1beta1.APIService, opts v1.CreateOptions) (result *v1beta1.APIService, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(apiservicesResource, aPIService), &v1beta1.APIService{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeAPIServices) Create(ctx context.Context, aPIService *v1beta1.APISer } // Update takes the representation of a aPIService and updates it. Returns the server's representation of the aPIService, and an error, if there is any. -func (c *FakeAPIServices) Update(ctx context.Context, aPIService *v1beta1.APIService) (result *v1beta1.APIService, err error) { +func (c *FakeAPIServices) Update(ctx context.Context, aPIService *v1beta1.APIService, opts v1.UpdateOptions) (result *v1beta1.APIService, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(apiservicesResource, aPIService), &v1beta1.APIService{}) if obj == nil { @@ -98,7 +98,7 @@ func (c *FakeAPIServices) Update(ctx context.Context, aPIService *v1beta1.APISer // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeAPIServices) UpdateStatus(ctx context.Context, aPIService *v1beta1.APIService) (*v1beta1.APIService, error) { +func (c *FakeAPIServices) UpdateStatus(ctx context.Context, aPIService *v1beta1.APIService, opts v1.UpdateOptions) (*v1beta1.APIService, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(apiservicesResource, "status", aPIService), &v1beta1.APIService{}) if obj == nil { @@ -123,7 +123,7 @@ func (c *FakeAPIServices) DeleteCollection(ctx context.Context, options *v1.Dele } // Patch applies the patch and returns the patched aPIService. -func (c *FakeAPIServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.APIService, err error) { +func (c *FakeAPIServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.APIService, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(apiservicesResource, name, pt, data, subresources...), &v1beta1.APIService{}) if obj == nil { diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go b/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go index ecb5fad2617..d79163ddb81 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go @@ -37,7 +37,7 @@ type NodeMetricsesGetter interface { // NodeMetricsInterface has methods to work with NodeMetrics resources. type NodeMetricsInterface interface { - Get(ctx context.Context, name string, options v1.GetOptions) (*v1alpha1.NodeMetrics, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.NodeMetrics, error) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.NodeMetricsList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) NodeMetricsExpansion diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go b/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go index f957dea5c8c..49d57c8e887 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go @@ -37,7 +37,7 @@ type PodMetricsesGetter interface { // PodMetricsInterface has methods to work with PodMetrics resources. type PodMetricsInterface interface { - Get(ctx context.Context, name string, options v1.GetOptions) (*v1alpha1.PodMetrics, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PodMetrics, error) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PodMetricsList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) PodMetricsExpansion diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go b/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go index 78b2b0e40a6..a312221ed25 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go @@ -37,7 +37,7 @@ type NodeMetricsesGetter interface { // NodeMetricsInterface has methods to work with NodeMetrics resources. type NodeMetricsInterface interface { - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.NodeMetrics, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.NodeMetrics, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.NodeMetricsList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) NodeMetricsExpansion diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go b/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go index dd5a03d1fc7..e66c377c25b 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go @@ -37,7 +37,7 @@ type PodMetricsesGetter interface { // PodMetricsInterface has methods to work with PodMetrics resources. type PodMetricsInterface interface { - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.PodMetrics, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodMetrics, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodMetricsList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) PodMetricsExpansion diff --git a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1/fake/fake_fischer.go b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1/fake/fake_fischer.go index 93165d11b1a..9e2d2ad36ca 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1/fake/fake_fischer.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1/fake/fake_fischer.go @@ -77,7 +77,7 @@ func (c *FakeFischers) Watch(ctx context.Context, opts v1.ListOptions) (watch.In } // Create takes the representation of a fischer and creates it. Returns the server's representation of the fischer, and an error, if there is any. -func (c *FakeFischers) Create(ctx context.Context, fischer *v1alpha1.Fischer) (result *v1alpha1.Fischer, err error) { +func (c *FakeFischers) Create(ctx context.Context, fischer *v1alpha1.Fischer, opts v1.CreateOptions) (result *v1alpha1.Fischer, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(fischersResource, fischer), &v1alpha1.Fischer{}) if obj == nil { @@ -87,7 +87,7 @@ func (c *FakeFischers) Create(ctx context.Context, fischer *v1alpha1.Fischer) (r } // Update takes the representation of a fischer and updates it. Returns the server's representation of the fischer, and an error, if there is any. -func (c *FakeFischers) Update(ctx context.Context, fischer *v1alpha1.Fischer) (result *v1alpha1.Fischer, err error) { +func (c *FakeFischers) Update(ctx context.Context, fischer *v1alpha1.Fischer, opts v1.UpdateOptions) (result *v1alpha1.Fischer, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(fischersResource, fischer), &v1alpha1.Fischer{}) if obj == nil { @@ -112,7 +112,7 @@ func (c *FakeFischers) DeleteCollection(ctx context.Context, options *v1.DeleteO } // Patch applies the patch and returns the patched fischer. -func (c *FakeFischers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Fischer, err error) { +func (c *FakeFischers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Fischer, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(fischersResource, name, pt, data, subresources...), &v1alpha1.Fischer{}) if obj == nil { diff --git a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1/fake/fake_flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1/fake/fake_flunder.go index 0b13958f69b..d1b76d43716 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1/fake/fake_flunder.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1/fake/fake_flunder.go @@ -81,7 +81,7 @@ func (c *FakeFlunders) Watch(ctx context.Context, opts v1.ListOptions) (watch.In } // Create takes the representation of a flunder and creates it. Returns the server's representation of the flunder, and an error, if there is any. -func (c *FakeFlunders) Create(ctx context.Context, flunder *v1alpha1.Flunder) (result *v1alpha1.Flunder, err error) { +func (c *FakeFlunders) Create(ctx context.Context, flunder *v1alpha1.Flunder, opts v1.CreateOptions) (result *v1alpha1.Flunder, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(flundersResource, c.ns, flunder), &v1alpha1.Flunder{}) @@ -92,7 +92,7 @@ func (c *FakeFlunders) Create(ctx context.Context, flunder *v1alpha1.Flunder) (r } // Update takes the representation of a flunder and updates it. Returns the server's representation of the flunder, and an error, if there is any. -func (c *FakeFlunders) Update(ctx context.Context, flunder *v1alpha1.Flunder) (result *v1alpha1.Flunder, err error) { +func (c *FakeFlunders) Update(ctx context.Context, flunder *v1alpha1.Flunder, opts v1.UpdateOptions) (result *v1alpha1.Flunder, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(flundersResource, c.ns, flunder), &v1alpha1.Flunder{}) @@ -104,7 +104,7 @@ func (c *FakeFlunders) Update(ctx context.Context, flunder *v1alpha1.Flunder) (r // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlunders) UpdateStatus(ctx context.Context, flunder *v1alpha1.Flunder) (*v1alpha1.Flunder, error) { +func (c *FakeFlunders) UpdateStatus(ctx context.Context, flunder *v1alpha1.Flunder, opts v1.UpdateOptions) (*v1alpha1.Flunder, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(flundersResource, "status", c.ns, flunder), &v1alpha1.Flunder{}) @@ -131,7 +131,7 @@ func (c *FakeFlunders) DeleteCollection(ctx context.Context, options *v1.DeleteO } // Patch applies the patch and returns the patched flunder. -func (c *FakeFlunders) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Flunder, err error) { +func (c *FakeFlunders) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Flunder, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(flundersResource, c.ns, name, pt, data, subresources...), &v1alpha1.Flunder{}) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1/fischer.go b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1/fischer.go index e99f1b03187..51ab0e9c152 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1/fischer.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1/fischer.go @@ -38,14 +38,14 @@ type FischersGetter interface { // FischerInterface has methods to work with Fischer resources. type FischerInterface interface { - Create(context.Context, *v1alpha1.Fischer) (*v1alpha1.Fischer, error) - Update(context.Context, *v1alpha1.Fischer) (*v1alpha1.Fischer, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1alpha1.Fischer, error) + Create(ctx context.Context, fischer *v1alpha1.Fischer, opts v1.CreateOptions) (*v1alpha1.Fischer, error) + Update(ctx context.Context, fischer *v1alpha1.Fischer, opts v1.UpdateOptions) (*v1alpha1.Fischer, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Fischer, error) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.FischerList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Fischer, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Fischer, err error) FischerExpansion } @@ -104,10 +104,11 @@ func (c *fischers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interf } // Create takes the representation of a fischer and creates it. Returns the server's representation of the fischer, and an error, if there is any. -func (c *fischers) Create(ctx context.Context, fischer *v1alpha1.Fischer) (result *v1alpha1.Fischer, err error) { +func (c *fischers) Create(ctx context.Context, fischer *v1alpha1.Fischer, opts v1.CreateOptions) (result *v1alpha1.Fischer, err error) { result = &v1alpha1.Fischer{} err = c.client.Post(). Resource("fischers"). + VersionedParams(&opts, scheme.ParameterCodec). Body(fischer). Do(ctx). Into(result) @@ -115,11 +116,12 @@ func (c *fischers) Create(ctx context.Context, fischer *v1alpha1.Fischer) (resul } // Update takes the representation of a fischer and updates it. Returns the server's representation of the fischer, and an error, if there is any. -func (c *fischers) Update(ctx context.Context, fischer *v1alpha1.Fischer) (result *v1alpha1.Fischer, err error) { +func (c *fischers) Update(ctx context.Context, fischer *v1alpha1.Fischer, opts v1.UpdateOptions) (result *v1alpha1.Fischer, err error) { result = &v1alpha1.Fischer{} err = c.client.Put(). Resource("fischers"). Name(fischer.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(fischer). Do(ctx). Into(result) @@ -152,12 +154,13 @@ func (c *fischers) DeleteCollection(ctx context.Context, options *v1.DeleteOptio } // Patch applies the patch and returns the patched fischer. -func (c *fischers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Fischer, err error) { +func (c *fischers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Fischer, err error) { result = &v1alpha1.Fischer{} err = c.client.Patch(pt). Resource("fischers"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1/flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1/flunder.go index 664eda2f165..ae8dffeb62c 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1/flunder.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1/flunder.go @@ -38,15 +38,15 @@ type FlundersGetter interface { // FlunderInterface has methods to work with Flunder resources. type FlunderInterface interface { - Create(context.Context, *v1alpha1.Flunder) (*v1alpha1.Flunder, error) - Update(context.Context, *v1alpha1.Flunder) (*v1alpha1.Flunder, error) - UpdateStatus(context.Context, *v1alpha1.Flunder) (*v1alpha1.Flunder, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1alpha1.Flunder, error) + Create(ctx context.Context, flunder *v1alpha1.Flunder, opts v1.CreateOptions) (*v1alpha1.Flunder, error) + Update(ctx context.Context, flunder *v1alpha1.Flunder, opts v1.UpdateOptions) (*v1alpha1.Flunder, error) + UpdateStatus(ctx context.Context, flunder *v1alpha1.Flunder, opts v1.UpdateOptions) (*v1alpha1.Flunder, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Flunder, error) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.FlunderList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Flunder, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Flunder, err error) FlunderExpansion } @@ -110,11 +110,12 @@ func (c *flunders) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interf } // Create takes the representation of a flunder and creates it. Returns the server's representation of the flunder, and an error, if there is any. -func (c *flunders) Create(ctx context.Context, flunder *v1alpha1.Flunder) (result *v1alpha1.Flunder, err error) { +func (c *flunders) Create(ctx context.Context, flunder *v1alpha1.Flunder, opts v1.CreateOptions) (result *v1alpha1.Flunder, err error) { result = &v1alpha1.Flunder{} err = c.client.Post(). Namespace(c.ns). Resource("flunders"). + VersionedParams(&opts, scheme.ParameterCodec). Body(flunder). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *flunders) Create(ctx context.Context, flunder *v1alpha1.Flunder) (resul } // Update takes the representation of a flunder and updates it. Returns the server's representation of the flunder, and an error, if there is any. -func (c *flunders) Update(ctx context.Context, flunder *v1alpha1.Flunder) (result *v1alpha1.Flunder, err error) { +func (c *flunders) Update(ctx context.Context, flunder *v1alpha1.Flunder, opts v1.UpdateOptions) (result *v1alpha1.Flunder, err error) { result = &v1alpha1.Flunder{} err = c.client.Put(). Namespace(c.ns). Resource("flunders"). Name(flunder.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(flunder). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *flunders) Update(ctx context.Context, flunder *v1alpha1.Flunder) (resul // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *flunders) UpdateStatus(ctx context.Context, flunder *v1alpha1.Flunder) (result *v1alpha1.Flunder, err error) { +func (c *flunders) UpdateStatus(ctx context.Context, flunder *v1alpha1.Flunder, opts v1.UpdateOptions) (result *v1alpha1.Flunder, err error) { result = &v1alpha1.Flunder{} err = c.client.Put(). Namespace(c.ns). Resource("flunders"). Name(flunder.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(flunder). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *flunders) DeleteCollection(ctx context.Context, options *v1.DeleteOptio } // Patch applies the patch and returns the patched flunder. -func (c *flunders) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Flunder, err error) { +func (c *flunders) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Flunder, err error) { result = &v1alpha1.Flunder{} err = c.client.Patch(pt). Namespace(c.ns). Resource("flunders"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1beta1/fake/fake_flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1beta1/fake/fake_flunder.go index afc5d4cd2ae..5039134bd8f 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1beta1/fake/fake_flunder.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1beta1/fake/fake_flunder.go @@ -81,7 +81,7 @@ func (c *FakeFlunders) Watch(ctx context.Context, opts v1.ListOptions) (watch.In } // Create takes the representation of a flunder and creates it. Returns the server's representation of the flunder, and an error, if there is any. -func (c *FakeFlunders) Create(ctx context.Context, flunder *v1beta1.Flunder) (result *v1beta1.Flunder, err error) { +func (c *FakeFlunders) Create(ctx context.Context, flunder *v1beta1.Flunder, opts v1.CreateOptions) (result *v1beta1.Flunder, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(flundersResource, c.ns, flunder), &v1beta1.Flunder{}) @@ -92,7 +92,7 @@ func (c *FakeFlunders) Create(ctx context.Context, flunder *v1beta1.Flunder) (re } // Update takes the representation of a flunder and updates it. Returns the server's representation of the flunder, and an error, if there is any. -func (c *FakeFlunders) Update(ctx context.Context, flunder *v1beta1.Flunder) (result *v1beta1.Flunder, err error) { +func (c *FakeFlunders) Update(ctx context.Context, flunder *v1beta1.Flunder, opts v1.UpdateOptions) (result *v1beta1.Flunder, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(flundersResource, c.ns, flunder), &v1beta1.Flunder{}) @@ -104,7 +104,7 @@ func (c *FakeFlunders) Update(ctx context.Context, flunder *v1beta1.Flunder) (re // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlunders) UpdateStatus(ctx context.Context, flunder *v1beta1.Flunder) (*v1beta1.Flunder, error) { +func (c *FakeFlunders) UpdateStatus(ctx context.Context, flunder *v1beta1.Flunder, opts v1.UpdateOptions) (*v1beta1.Flunder, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(flundersResource, "status", c.ns, flunder), &v1beta1.Flunder{}) @@ -131,7 +131,7 @@ func (c *FakeFlunders) DeleteCollection(ctx context.Context, options *v1.DeleteO } // Patch applies the patch and returns the patched flunder. -func (c *FakeFlunders) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Flunder, err error) { +func (c *FakeFlunders) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Flunder, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(flundersResource, c.ns, name, pt, data, subresources...), &v1beta1.Flunder{}) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1beta1/flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1beta1/flunder.go index 0a8574b25e4..61900687e7d 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1beta1/flunder.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1beta1/flunder.go @@ -38,15 +38,15 @@ type FlundersGetter interface { // FlunderInterface has methods to work with Flunder resources. type FlunderInterface interface { - Create(context.Context, *v1beta1.Flunder) (*v1beta1.Flunder, error) - Update(context.Context, *v1beta1.Flunder) (*v1beta1.Flunder, error) - UpdateStatus(context.Context, *v1beta1.Flunder) (*v1beta1.Flunder, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1beta1.Flunder, error) + Create(ctx context.Context, flunder *v1beta1.Flunder, opts v1.CreateOptions) (*v1beta1.Flunder, error) + Update(ctx context.Context, flunder *v1beta1.Flunder, opts v1.UpdateOptions) (*v1beta1.Flunder, error) + UpdateStatus(ctx context.Context, flunder *v1beta1.Flunder, opts v1.UpdateOptions) (*v1beta1.Flunder, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Flunder, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.FlunderList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Flunder, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Flunder, err error) FlunderExpansion } @@ -110,11 +110,12 @@ func (c *flunders) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interf } // Create takes the representation of a flunder and creates it. Returns the server's representation of the flunder, and an error, if there is any. -func (c *flunders) Create(ctx context.Context, flunder *v1beta1.Flunder) (result *v1beta1.Flunder, err error) { +func (c *flunders) Create(ctx context.Context, flunder *v1beta1.Flunder, opts v1.CreateOptions) (result *v1beta1.Flunder, err error) { result = &v1beta1.Flunder{} err = c.client.Post(). Namespace(c.ns). Resource("flunders"). + VersionedParams(&opts, scheme.ParameterCodec). Body(flunder). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *flunders) Create(ctx context.Context, flunder *v1beta1.Flunder) (result } // Update takes the representation of a flunder and updates it. Returns the server's representation of the flunder, and an error, if there is any. -func (c *flunders) Update(ctx context.Context, flunder *v1beta1.Flunder) (result *v1beta1.Flunder, err error) { +func (c *flunders) Update(ctx context.Context, flunder *v1beta1.Flunder, opts v1.UpdateOptions) (result *v1beta1.Flunder, err error) { result = &v1beta1.Flunder{} err = c.client.Put(). Namespace(c.ns). Resource("flunders"). Name(flunder.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(flunder). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *flunders) Update(ctx context.Context, flunder *v1beta1.Flunder) (result // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *flunders) UpdateStatus(ctx context.Context, flunder *v1beta1.Flunder) (result *v1beta1.Flunder, err error) { +func (c *flunders) UpdateStatus(ctx context.Context, flunder *v1beta1.Flunder, opts v1.UpdateOptions) (result *v1beta1.Flunder, err error) { result = &v1beta1.Flunder{} err = c.client.Put(). Namespace(c.ns). Resource("flunders"). Name(flunder.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(flunder). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *flunders) DeleteCollection(ctx context.Context, options *v1.DeleteOptio } // Patch applies the patch and returns the patched flunder. -func (c *flunders) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Flunder, err error) { +func (c *flunders) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Flunder, err error) { result = &v1beta1.Flunder{} err = c.client.Patch(pt). Namespace(c.ns). Resource("flunders"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) diff --git a/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1/fake/fake_foo.go b/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1/fake/fake_foo.go index 353f91e307f..e1434da222c 100644 --- a/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1/fake/fake_foo.go +++ b/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1/fake/fake_foo.go @@ -81,7 +81,7 @@ func (c *FakeFoos) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interf } // Create takes the representation of a foo and creates it. Returns the server's representation of the foo, and an error, if there is any. -func (c *FakeFoos) Create(ctx context.Context, foo *v1alpha1.Foo) (result *v1alpha1.Foo, err error) { +func (c *FakeFoos) Create(ctx context.Context, foo *v1alpha1.Foo, opts v1.CreateOptions) (result *v1alpha1.Foo, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(foosResource, c.ns, foo), &v1alpha1.Foo{}) @@ -92,7 +92,7 @@ func (c *FakeFoos) Create(ctx context.Context, foo *v1alpha1.Foo) (result *v1alp } // Update takes the representation of a foo and updates it. Returns the server's representation of the foo, and an error, if there is any. -func (c *FakeFoos) Update(ctx context.Context, foo *v1alpha1.Foo) (result *v1alpha1.Foo, err error) { +func (c *FakeFoos) Update(ctx context.Context, foo *v1alpha1.Foo, opts v1.UpdateOptions) (result *v1alpha1.Foo, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(foosResource, c.ns, foo), &v1alpha1.Foo{}) @@ -104,7 +104,7 @@ func (c *FakeFoos) Update(ctx context.Context, foo *v1alpha1.Foo) (result *v1alp // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFoos) UpdateStatus(ctx context.Context, foo *v1alpha1.Foo) (*v1alpha1.Foo, error) { +func (c *FakeFoos) UpdateStatus(ctx context.Context, foo *v1alpha1.Foo, opts v1.UpdateOptions) (*v1alpha1.Foo, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(foosResource, "status", c.ns, foo), &v1alpha1.Foo{}) @@ -131,7 +131,7 @@ func (c *FakeFoos) DeleteCollection(ctx context.Context, options *v1.DeleteOptio } // Patch applies the patch and returns the patched foo. -func (c *FakeFoos) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Foo, err error) { +func (c *FakeFoos) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Foo, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(foosResource, c.ns, name, pt, data, subresources...), &v1alpha1.Foo{}) diff --git a/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1/foo.go b/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1/foo.go index 9798124227e..bf0eae3a006 100644 --- a/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1/foo.go +++ b/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1/foo.go @@ -38,15 +38,15 @@ type FoosGetter interface { // FooInterface has methods to work with Foo resources. type FooInterface interface { - Create(context.Context, *v1alpha1.Foo) (*v1alpha1.Foo, error) - Update(context.Context, *v1alpha1.Foo) (*v1alpha1.Foo, error) - UpdateStatus(context.Context, *v1alpha1.Foo) (*v1alpha1.Foo, error) - Delete(ctx context.Context, name string, options *v1.DeleteOptions) error - DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(ctx context.Context, name string, options v1.GetOptions) (*v1alpha1.Foo, error) + Create(ctx context.Context, foo *v1alpha1.Foo, opts v1.CreateOptions) (*v1alpha1.Foo, error) + Update(ctx context.Context, foo *v1alpha1.Foo, opts v1.UpdateOptions) (*v1alpha1.Foo, error) + UpdateStatus(ctx context.Context, foo *v1alpha1.Foo, opts v1.UpdateOptions) (*v1alpha1.Foo, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Foo, error) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.FooList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Foo, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Foo, err error) FooExpansion } @@ -110,11 +110,12 @@ func (c *foos) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, } // Create takes the representation of a foo and creates it. Returns the server's representation of the foo, and an error, if there is any. -func (c *foos) Create(ctx context.Context, foo *v1alpha1.Foo) (result *v1alpha1.Foo, err error) { +func (c *foos) Create(ctx context.Context, foo *v1alpha1.Foo, opts v1.CreateOptions) (result *v1alpha1.Foo, err error) { result = &v1alpha1.Foo{} err = c.client.Post(). Namespace(c.ns). Resource("foos"). + VersionedParams(&opts, scheme.ParameterCodec). Body(foo). Do(ctx). Into(result) @@ -122,12 +123,13 @@ func (c *foos) Create(ctx context.Context, foo *v1alpha1.Foo) (result *v1alpha1. } // Update takes the representation of a foo and updates it. Returns the server's representation of the foo, and an error, if there is any. -func (c *foos) Update(ctx context.Context, foo *v1alpha1.Foo) (result *v1alpha1.Foo, err error) { +func (c *foos) Update(ctx context.Context, foo *v1alpha1.Foo, opts v1.UpdateOptions) (result *v1alpha1.Foo, err error) { result = &v1alpha1.Foo{} err = c.client.Put(). Namespace(c.ns). Resource("foos"). Name(foo.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(foo). Do(ctx). Into(result) @@ -136,14 +138,14 @@ func (c *foos) Update(ctx context.Context, foo *v1alpha1.Foo) (result *v1alpha1. // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *foos) UpdateStatus(ctx context.Context, foo *v1alpha1.Foo) (result *v1alpha1.Foo, err error) { +func (c *foos) UpdateStatus(ctx context.Context, foo *v1alpha1.Foo, opts v1.UpdateOptions) (result *v1alpha1.Foo, err error) { result = &v1alpha1.Foo{} err = c.client.Put(). Namespace(c.ns). Resource("foos"). Name(foo.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(foo). Do(ctx). Into(result) @@ -178,13 +180,14 @@ func (c *foos) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, } // Patch applies the patch and returns the patched foo. -func (c *foos) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Foo, err error) { +func (c *foos) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Foo, err error) { result = &v1alpha1.Foo{} err = c.client.Patch(pt). Namespace(c.ns). Resource("foos"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) From bfc75d9a5c1673092be1030137319684edd17aa3 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Tue, 4 Feb 2020 18:02:08 -0800 Subject: [PATCH 4/4] manual fixes --- .../node_lifecycle_controller_test.go | 11 ++++---- pkg/controller/replication/conversion.go | 26 ++++++++++++------- pkg/controller/testutil/test_utils.go | 10 +++---- .../certificate/bootstrap/bootstrap_test.go | 2 +- .../certificate/certificate_manager_test.go | 2 +- 5 files changed, 28 insertions(+), 23 deletions(-) diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go index 32eeced551e..0bb57f34e24 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go @@ -2759,7 +2759,7 @@ func TestApplyNoExecuteTaints(t *testing.T) { // Make node3 healthy again. node2.Status = healthyNodeNewStatus - _, err = fakeNodeHandler.UpdateStatus(context.TODO(), node2) + _, err = fakeNodeHandler.UpdateStatus(context.TODO(), node2, metav1.UpdateOptions{}) if err != nil { t.Errorf(err.Error()) return @@ -2905,12 +2905,12 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) { node0.Status = newNodeStatus node1.Status = healthyNodeNewStatus - _, err = fakeNodeHandler.UpdateStatus(context.TODO(), node0) + _, err = fakeNodeHandler.UpdateStatus(context.TODO(), node0, metav1.UpdateOptions{}) if err != nil { t.Errorf(err.Error()) return } - _, err = fakeNodeHandler.UpdateStatus(context.TODO(), node1) + _, err = fakeNodeHandler.UpdateStatus(context.TODO(), node1, metav1.UpdateOptions{}) if err != nil { t.Errorf(err.Error()) return @@ -3120,7 +3120,7 @@ func TestTaintsNodeByCondition(t *testing.T) { } for _, test := range tests { - fakeNodeHandler.Update(context.TODO(), test.Node) + fakeNodeHandler.Update(context.TODO(), test.Node, metav1.UpdateOptions{}) if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } @@ -3331,7 +3331,7 @@ func TestReconcileNodeLabels(t *testing.T) { } for _, test := range tests { - fakeNodeHandler.Update(context.TODO(), test.Node) + fakeNodeHandler.Update(context.TODO(), test.Node, metav1.UpdateOptions{}) if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -3355,7 +3355,6 @@ func TestReconcileNodeLabels(t *testing.T) { if actualValue != expectedValue { t.Errorf("%s: label %q: expected value %q, got value %q", test.Name, key, expectedValue, actualValue) } - } } } diff --git a/pkg/controller/replication/conversion.go b/pkg/controller/replication/conversion.go index b33fbb8e03c..0214a479678 100644 --- a/pkg/controller/replication/conversion.go +++ b/pkg/controller/replication/conversion.go @@ -203,16 +203,22 @@ type conversionClient struct { v1client.ReplicationControllerInterface } -func (c conversionClient) Create(ctx context.Context, rs *apps.ReplicaSet) (*apps.ReplicaSet, error) { - return convertCall(ctx, c.ReplicationControllerInterface.Create, rs) +func (c conversionClient) Create(ctx context.Context, rs *apps.ReplicaSet, opts metav1.CreateOptions) (*apps.ReplicaSet, error) { + return convertCall(func(rc *v1.ReplicationController) (*v1.ReplicationController, error) { + return c.ReplicationControllerInterface.Create(ctx, rc, opts) + }, rs) } -func (c conversionClient) Update(ctx context.Context, rs *apps.ReplicaSet) (*apps.ReplicaSet, error) { - return convertCall(ctx, c.ReplicationControllerInterface.Update, rs) +func (c conversionClient) Update(ctx context.Context, rs *apps.ReplicaSet, opts metav1.UpdateOptions) (*apps.ReplicaSet, error) { + return convertCall(func(rc *v1.ReplicationController) (*v1.ReplicationController, error) { + return c.ReplicationControllerInterface.Update(ctx, rc, opts) + }, rs) } -func (c conversionClient) UpdateStatus(ctx context.Context, rs *apps.ReplicaSet) (*apps.ReplicaSet, error) { - return convertCall(ctx, c.ReplicationControllerInterface.UpdateStatus, rs) +func (c conversionClient) UpdateStatus(ctx context.Context, rs *apps.ReplicaSet, opts metav1.UpdateOptions) (*apps.ReplicaSet, error) { + return convertCall(func(rc *v1.ReplicationController) (*v1.ReplicationController, error) { + return c.ReplicationControllerInterface.UpdateStatus(ctx, rc, opts) + }, rs) } func (c conversionClient) Get(ctx context.Context, name string, options metav1.GetOptions) (*apps.ReplicaSet, error) { @@ -236,7 +242,7 @@ func (c conversionClient) Watch(ctx context.Context, opts metav1.ListOptions) (w return nil, errors.New("Watch() is not implemented for conversionClient") } -func (c conversionClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *apps.ReplicaSet, err error) { +func (c conversionClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *apps.ReplicaSet, err error) { // This is not used by RSC. return nil, errors.New("Patch() is not implemented for conversionClient") } @@ -246,7 +252,7 @@ func (c conversionClient) GetScale(ctx context.Context, name string, options met return nil, errors.New("GetScale() is not implemented for conversionClient") } -func (c conversionClient) UpdateScale(ctx context.Context, name string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { +func (c conversionClient) UpdateScale(ctx context.Context, name string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { // This is not used by RSC. return nil, errors.New("UpdateScale() is not implemented for conversionClient") } @@ -275,12 +281,12 @@ func convertList(rcList *v1.ReplicationControllerList) (*apps.ReplicaSetList, er return rsList, nil } -func convertCall(ctx context.Context, fn func(context.Context, *v1.ReplicationController) (*v1.ReplicationController, error), rs *apps.ReplicaSet) (*apps.ReplicaSet, error) { +func convertCall(fn func(*v1.ReplicationController) (*v1.ReplicationController, error), rs *apps.ReplicaSet) (*apps.ReplicaSet, error) { rc, err := convertRStoRC(rs) if err != nil { return nil, err } - result, err := fn(ctx, rc) + result, err := fn(rc) if err != nil { return nil, err } diff --git a/pkg/controller/testutil/test_utils.go b/pkg/controller/testutil/test_utils.go index 7eb12d8b83a..3f7b8ab0ea8 100644 --- a/pkg/controller/testutil/test_utils.go +++ b/pkg/controller/testutil/test_utils.go @@ -111,7 +111,7 @@ func (m *FakeLegacyHandler) Nodes() v1core.NodeInterface { } // Create adds a new Node to the fake store. -func (m *FakeNodeHandler) Create(_ context.Context, node *v1.Node) (*v1.Node, error) { +func (m *FakeNodeHandler) Create(_ context.Context, node *v1.Node, _ metav1.CreateOptions) (*v1.Node, error) { m.lock.Lock() defer func() { m.RequestCount++ @@ -202,7 +202,7 @@ func (m *FakeNodeHandler) DeleteCollection(_ context.Context, opt *metav1.Delete } // Update updates a Node in the fake store. -func (m *FakeNodeHandler) Update(_ context.Context, node *v1.Node) (*v1.Node, error) { +func (m *FakeNodeHandler) Update(_ context.Context, node *v1.Node, _ metav1.UpdateOptions) (*v1.Node, error) { m.lock.Lock() defer func() { m.RequestCount++ @@ -221,7 +221,7 @@ func (m *FakeNodeHandler) Update(_ context.Context, node *v1.Node) (*v1.Node, er } // UpdateStatus updates a status of a Node in the fake store. -func (m *FakeNodeHandler) UpdateStatus(_ context.Context, node *v1.Node) (*v1.Node, error) { +func (m *FakeNodeHandler) UpdateStatus(_ context.Context, node *v1.Node, _ metav1.UpdateOptions) (*v1.Node, error) { m.lock.Lock() defer func() { m.RequestCount++ @@ -266,7 +266,7 @@ func (m *FakeNodeHandler) UpdateStatus(_ context.Context, node *v1.Node) (*v1.No // PatchStatus patches a status of a Node in the fake store. func (m *FakeNodeHandler) PatchStatus(ctx context.Context, nodeName string, data []byte) (*v1.Node, error) { m.RequestCount++ - return m.Patch(ctx, nodeName, types.StrategicMergePatchType, data, "status") + return m.Patch(ctx, nodeName, types.StrategicMergePatchType, data, metav1.PatchOptions{}, "status") } // Watch watches Nodes in a fake store. @@ -275,7 +275,7 @@ func (m *FakeNodeHandler) Watch(_ context.Context, opts metav1.ListOptions) (wat } // Patch patches a Node in the fake store. -func (m *FakeNodeHandler) Patch(_ context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (*v1.Node, error) { +func (m *FakeNodeHandler) Patch(_ context.Context, name string, pt types.PatchType, data []byte, _ metav1.PatchOptions, subresources ...string) (*v1.Node, error) { m.lock.Lock() defer func() { m.RequestCount++ diff --git a/pkg/kubelet/certificate/bootstrap/bootstrap_test.go b/pkg/kubelet/certificate/bootstrap/bootstrap_test.go index 40bbfd16b6c..bbfa0e9aa8c 100644 --- a/pkg/kubelet/certificate/bootstrap/bootstrap_test.go +++ b/pkg/kubelet/certificate/bootstrap/bootstrap_test.go @@ -148,7 +148,7 @@ type fakeClient struct { failureType failureType } -func (c *fakeClient) Create(context.Context, *certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error) { +func (c *fakeClient) Create(context.Context, *certificates.CertificateSigningRequest, metav1.CreateOptions) (*certificates.CertificateSigningRequest, error) { if c.failureType == createError { return nil, fmt.Errorf("fakeClient failed creating request") } diff --git a/staging/src/k8s.io/client-go/util/certificate/certificate_manager_test.go b/staging/src/k8s.io/client-go/util/certificate/certificate_manager_test.go index b68b7b601a8..6fe95130ae6 100644 --- a/staging/src/k8s.io/client-go/util/certificate/certificate_manager_test.go +++ b/staging/src/k8s.io/client-go/util/certificate/certificate_manager_test.go @@ -1012,7 +1012,7 @@ func (c fakeClient) List(_ context.Context, opts v1.ListOptions) (*certificates. return &csrReply, nil } -func (c fakeClient) Create(context.Context, *certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error) { +func (c fakeClient) Create(context.Context, *certificates.CertificateSigningRequest, v1.CreateOptions) (*certificates.CertificateSigningRequest, error) { if c.failureType == createError { if c.err != nil { return nil, c.err