From 469df1203884b49fdfe1010fdd544cc42f3ec216 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Sat, 21 Jan 2017 22:36:02 -0500 Subject: [PATCH] refactor: move ListOptions references to metav1 --- .../elasticsearch_logging_discovery.go | 2 +- .../apiregistration/internalversion/BUILD | 3 +- .../informers/apiregistration/v1alpha1/BUILD | 2 +- cmd/kubeadm/app/cmd/token.go | 8 +- cmd/kubeadm/app/master/addons.go | 10 +- cmd/kubeadm/app/master/apiclient.go | 13 +- cmd/kubeadm/app/master/discovery.go | 7 +- cmd/kubeadm/app/master/selfhosted.go | 13 +- .../app/phases/apiconfig/clusterroles.go | 3 +- cmd/kubeadm/app/util/tokens.go | 6 +- .../client-gen/test_apis/testgroup/BUILD | 1 - examples/examples_test.go | 30 ++-- federation/apis/core/BUILD | 3 - .../cluster/cluster_client.go | 2 +- .../cluster/clustercontroller.go | 8 +- .../pkg/federation-controller/configmap/BUILD | 1 + .../configmap/configmap_controller.go | 17 +- .../pkg/federation-controller/daemonset/BUILD | 1 + .../daemonset/daemonset_controller.go | 17 +- .../federation-controller/deployment/BUILD | 1 + .../deployment/deploymentcontroller.go | 25 +-- .../deployment/deploymentcontroller_test.go | 2 +- .../ingress/ingress_controller.go | 20 +-- .../pkg/federation-controller/namespace/BUILD | 1 + .../namespace/namespace_controller.go | 23 +-- .../federation-controller/replicaset/BUILD | 1 + .../replicaset/replicasetcontroller.go | 25 +-- .../replicaset/replicasetcontroller_test.go | 26 +-- .../pkg/federation-controller/secret/BUILD | 1 + .../secret/secret_controller.go | 17 +- .../service/cluster_helper.go | 17 +- .../service/servicecontroller.go | 20 +-- .../pkg/federation-controller/util/BUILD | 1 - .../util/deployment_test.go | 3 +- .../util/federated_informer.go | 5 +- .../util/federated_informer_test.go | 8 +- .../util/versionize_listoptions.go | 40 ----- federation/pkg/kubefed/init/init.go | 2 +- federation/registry/cluster/BUILD | 1 + pkg/api/v1/BUILD | 1 - pkg/api/validation/events.go | 3 +- pkg/api/validation/events_test.go | 18 +- pkg/api/validation/validation_test.go | 60 +++---- pkg/apis/apps/validation/validation_test.go | 48 ++--- .../autoscaling/validation/validation_test.go | 37 ++-- pkg/apis/batch/validation/validation_test.go | 50 +++--- pkg/apis/componentconfig/v1alpha1/defaults.go | 2 +- .../extensions/validation/validation_test.go | 166 +++++++++--------- pkg/apis/rbac/validation/validation_test.go | 23 ++- pkg/client/cache/BUILD | 1 + pkg/client/cache/controller_test.go | 4 +- pkg/client/cache/listers.go | 3 +- pkg/client/cache/listwatch.go | 2 - pkg/client/cache/listwatch_test.go | 28 +-- pkg/client/cache/mutation_detector_test.go | 4 +- pkg/client/cache/reflector_test.go | 28 +-- .../typed/core/v1/event_expansion.go | 3 +- .../core/internalversion/event_expansion.go | 3 +- .../fake/fake_event_expansion.go | 5 +- pkg/client/legacylisters/listers_test.go | 6 +- pkg/client/record/event.go | 2 +- .../testing/cache/fake_controller_source.go | 8 +- .../cache/fake_controller_source_test.go | 8 +- pkg/client/testing/core/actions.go | 15 +- pkg/client/testing/core/fake_test.go | 10 +- pkg/client/typed/dynamic/client_test.go | 7 +- .../certificates/certificate_controller.go | 6 +- pkg/controller/cloud/nodecontroller.go | 2 +- pkg/controller/controller_utils_test.go | 6 +- .../cronjob/cronjob_controller_test.go | 2 +- pkg/controller/cronjob/utils_test.go | 4 +- .../daemon/daemoncontroller_test.go | 10 +- .../deployment/deployment_controller_test.go | 6 +- pkg/controller/deployment/sync.go | 6 +- .../deployment/util/deployment_util.go | 12 +- pkg/controller/disruption/disruption.go | 40 ++--- pkg/controller/disruption/disruption_test.go | 12 +- .../endpoint/endpoints_controller.go | 10 +- .../endpoint/endpoints_controller_test.go | 18 +- .../garbagecollector/garbagecollector.go | 8 +- .../garbagecollector/garbagecollector_test.go | 4 +- pkg/controller/informers/BUILD | 1 + pkg/controller/informers/batch.go | 10 +- pkg/controller/informers/core.go | 77 ++++---- pkg/controller/informers/extensions.go | 26 +-- pkg/controller/informers/rbac.go | 30 ++-- pkg/controller/informers/storage.go | 6 +- pkg/controller/job/jobcontroller_test.go | 4 +- .../namespace/namespace_controller.go | 4 +- .../namespace/namespace_controller_utils.go | 6 +- pkg/controller/node/controller_utils.go | 9 +- pkg/controller/node/nodecontroller.go | 2 +- pkg/controller/node/nodecontroller_test.go | 2 +- pkg/controller/node/testutil/test_utils.go | 10 +- pkg/controller/podautoscaler/horizontal.go | 8 +- pkg/controller/podautoscaler/metrics/BUILD | 1 + .../podautoscaler/replica_calculator.go | 5 +- pkg/controller/podgc/BUILD | 1 + pkg/controller/podgc/gc_controller.go | 3 +- pkg/controller/replicaset/replica_set_test.go | 4 +- .../replication_controller_test.go | 4 +- .../resourcequota/replenishment_controller.go | 41 ++--- .../resource_quota_controller.go | 8 +- pkg/controller/route/routecontroller.go | 4 +- pkg/controller/service/BUILD | 1 + pkg/controller/service/servicecontroller.go | 11 +- .../serviceaccounts_controller_test.go | 2 +- .../serviceaccount/tokens_controller.go | 16 +- .../serviceaccount/tokens_controller_test.go | 114 ++++++------ pkg/controller/statefulset/fakes.go | 2 +- pkg/controller/statefulset/stateful_set.go | 8 +- .../persistentvolume/pv_controller_base.go | 20 +-- pkg/genericapiserver/endpoints/BUILD | 1 + .../endpoints/apiserver_test.go | 8 +- .../endpoints/groupversion.go | 2 +- pkg/genericapiserver/endpoints/handlers/BUILD | 1 + pkg/genericapiserver/endpoints/watch_test.go | 8 +- .../registry/generic/registry/BUILD | 2 + .../registry/generic/registry/store_test.go | 2 +- pkg/genericapiserver/registry/rest/BUILD | 1 + pkg/genericapiserver/registry/rest/create.go | 3 +- .../registry/rest/resttest/BUILD | 1 + .../registry/rest/resttest/resttest.go | 10 +- pkg/genericapiserver/registry/rest/update.go | 2 +- .../server/genericapiserver.go | 2 +- .../server/genericapiserver_test.go | 2 +- .../server/options/server_run_options.go | 3 +- pkg/kubectl/cmd/apply.go | 3 +- pkg/kubectl/cmd/clusterinfo.go | 3 +- pkg/kubectl/cmd/clusterinfo_dump.go | 21 +-- pkg/kubectl/cmd/drain.go | 4 +- pkg/kubectl/cmd/run.go | 2 +- pkg/kubectl/cmd/top_node.go | 4 +- pkg/kubectl/cmd/top_pod.go | 4 +- pkg/kubectl/cmd/util/factory.go | 3 +- pkg/kubectl/cmd/util/factory_test.go | 14 +- pkg/kubectl/describe.go | 16 +- pkg/kubectl/metricsutil/metrics_client.go | 6 +- pkg/kubectl/resource/builder.go | 6 +- pkg/kubectl/resource/visitor.go | 4 +- pkg/kubectl/rollback.go | 5 +- pkg/kubectl/rolling_updater.go | 8 +- pkg/kubectl/rolling_updater_test.go | 32 ++-- pkg/kubectl/scale.go | 4 +- pkg/kubectl/scale_test.go | 12 +- pkg/kubectl/stop.go | 8 +- pkg/kubectl/stop_test.go | 14 +- pkg/kubelet/config/apiserver.go | 3 +- pkg/kubelet/config/apiserver_test.go | 4 +- pkg/kubelet/config/common.go | 5 +- pkg/kubelet/config/http_test.go | 4 +- pkg/kubelet/kubelet.go | 5 +- pkg/kubelet/kubelet_pods_test.go | 8 +- pkg/kubelet/kubelet_test.go | 2 +- pkg/kubelet/network/hostport/hostport_test.go | 4 +- pkg/kubelet/pod/pod_manager_test.go | 6 +- pkg/kubelet/server/server_test.go | 3 +- pkg/kubelet/server/stats/handler.go | 3 +- pkg/kubelet/types/pod_update.go | 3 +- pkg/kubelet/util/csr/csr.go | 1 - pkg/kubemark/hollow_kubelet.go | 3 +- pkg/master/controller.go | 22 +-- pkg/master/controller_test.go | 4 +- pkg/master/master.go | 3 +- pkg/master/master_test.go | 6 +- pkg/metrics/BUILD | 2 +- pkg/metrics/metrics_grabber.go | 12 +- pkg/proxy/config/BUILD | 1 - pkg/proxy/config/api.go | 5 +- pkg/proxy/config/api_test.go | 5 +- pkg/quota/evaluator/core/BUILD | 1 + pkg/quota/evaluator/core/configmap.go | 4 +- .../core/persistent_volume_claims.go | 3 +- pkg/quota/evaluator/core/pods.go | 3 +- .../evaluator/core/replication_controllers.go | 4 +- pkg/quota/evaluator/core/resource_quotas.go | 4 +- pkg/quota/evaluator/core/secrets.go | 4 +- pkg/quota/evaluator/core/services.go | 3 +- pkg/quota/generic/BUILD | 2 +- pkg/quota/generic/evaluator.go | 8 +- .../apps/petset/storage/storage_test.go | 6 +- pkg/registry/apps/petset/strategy_test.go | 6 +- .../storage/storage_test.go | 3 +- .../batch/cronjob/storage/storage_test.go | 2 +- pkg/registry/batch/cronjob/strategy_test.go | 6 +- pkg/registry/batch/job/strategy_test.go | 8 +- pkg/registry/certificates/certificates/BUILD | 1 + pkg/registry/core/componentstatus/BUILD | 1 + pkg/registry/core/configmap/BUILD | 1 + pkg/registry/core/configmap/strategy_test.go | 4 +- pkg/registry/core/controller/BUILD | 1 + .../core/controller/storage/storage_test.go | 2 +- pkg/registry/core/controller/strategy_test.go | 6 +- pkg/registry/core/endpoint/BUILD | 1 + .../core/endpoint/storage/storage_test.go | 2 +- .../core/limitrange/storage/storage_test.go | 2 +- pkg/registry/core/namespace/BUILD | 1 + pkg/registry/core/node/BUILD | 1 + .../storage/storage_test.go | 16 +- pkg/registry/core/pod/storage/storage_test.go | 26 +-- .../core/podtemplate/storage/storage_test.go | 2 +- .../resourcequota/storage/storage_test.go | 4 +- pkg/registry/core/secret/BUILD | 1 + .../core/secret/storage/storage_test.go | 2 +- pkg/registry/core/service/BUILD | 1 + .../core/service/ipallocator/controller/BUILD | 1 + .../service/portallocator/controller/BUILD | 1 + .../portallocator/controller/repair.go | 3 +- pkg/registry/core/service/rest_test.go | 10 +- .../core/service/storage/storage_test.go | 2 +- pkg/registry/core/serviceaccount/BUILD | 1 + .../serviceaccount/storage/storage_test.go | 2 +- .../daemonset/storage/storage_test.go | 2 +- pkg/registry/extensions/deployment/BUILD | 1 + .../ingress/storage/storage_test.go | 2 +- .../extensions/ingress/strategy_test.go | 2 +- .../networkpolicy/storage/storage_test.go | 3 +- .../extensions/networkpolicy/strategy_test.go | 3 +- pkg/registry/extensions/replicaset/BUILD | 1 + .../replicaset/storage/storage_test.go | 18 +- .../extensions/replicaset/strategy_test.go | 6 +- pkg/registry/extensions/rest/BUILD | 2 +- .../extensions/rest/thirdparty_controller.go | 4 +- .../extensions/thirdpartyresourcedata/BUILD | 2 +- .../storage/storage_test.go | 3 +- .../storage/storage_test.go | 6 +- .../poddisruptionbudget/strategy_test.go | 7 +- pkg/registry/rbac/clusterrole/BUILD | 1 + pkg/registry/rbac/clusterrolebinding/BUILD | 1 + pkg/registry/rbac/rest/BUILD | 2 +- pkg/registry/rbac/role/BUILD | 1 + pkg/registry/rbac/rolebinding/BUILD | 1 + pkg/registry/registrytest/BUILD | 1 + pkg/registry/registrytest/service.go | 2 +- pkg/storage/BUILD | 1 - pkg/storage/cacher.go | 5 +- pkg/storage/watch_cache_test.go | 13 +- pkg/volume/plugins.go | 2 +- pkg/volume/util_test.go | 4 +- plugin/pkg/admission/gc/gc_admission_test.go | 2 +- .../admission/initialresources/hawkular.go | 3 +- plugin/pkg/admission/limitranger/BUILD | 1 + plugin/pkg/admission/limitranger/admission.go | 3 +- .../namespace/lifecycle/admission.go | 2 +- .../namespace/lifecycle/admission_test.go | 4 +- plugin/pkg/admission/resourcequota/BUILD | 1 - .../resourcequota/resource_access.go | 18 +- .../security/podsecuritypolicy/BUILD | 2 +- .../security/podsecuritypolicy/admission.go | 14 +- plugin/pkg/admission/serviceaccount/BUILD | 1 - .../pkg/admission/serviceaccount/admission.go | 29 ++- .../pkg/admission/storageclass/default/BUILD | 2 +- .../storageclass/default/admission.go | 14 +- .../priorities/selector_spreading_test.go | 16 +- plugin/pkg/scheduler/factory/factory.go | 16 +- plugin/pkg/scheduler/factory/factory_test.go | 4 +- .../pkg/endpoints/request/context.go | 2 +- .../pkg/endpoints/request/context_test.go | 6 +- .../pkg/endpoints/request/requestinfo.go | 2 +- .../pkg/endpoints/request/requestinfo_test.go | 2 +- .../core/v1/fake/fake_event_expansion.go | 5 +- .../client-go/pkg/apis/kubeadm/register.go | 2 +- .../federation/apis/federation/register.go | 1 - .../src/k8s.io/client-go/rest/request_test.go | 4 +- .../src/k8s.io/client-go/testing/actions.go | 3 +- test/e2e/BUILD | 2 +- test/e2e/addon_update.go | 3 +- test/e2e/cadvisor.go | 4 +- test/e2e/cluster_logging_es.go | 13 +- test/e2e/cluster_logging_utils.go | 7 +- test/e2e/cluster_size_autoscaling.go | 6 +- test/e2e/common/init_container.go | 8 +- test/e2e/common/pods.go | 14 +- test/e2e/cronjob.go | 14 +- test/e2e/daemon_restart.go | 7 +- test/e2e/daemon_set.go | 10 +- test/e2e/dashboard.go | 4 +- test/e2e/density.go | 12 +- test/e2e/deployment.go | 14 +- test/e2e/dns.go | 4 +- test/e2e/dns_autoscaling.go | 21 ++- test/e2e/dns_configmap.go | 4 +- test/e2e/e2e.go | 15 +- test/e2e/etcd_failure.go | 5 +- test/e2e/events.go | 6 +- test/e2e/example_cluster_dns.go | 5 +- test/e2e/examples.go | 2 +- test/e2e/framework/framework.go | 10 +- test/e2e/framework/kubelet_stats.go | 6 +- test/e2e/framework/metrics_util.go | 7 +- test/e2e/framework/networking_utils.go | 2 +- test/e2e/framework/nodes_util.go | 3 +- test/e2e/framework/resource_usage_gatherer.go | 6 +- test/e2e/framework/service_util.go | 4 +- test/e2e/framework/util.go | 116 ++++++------ test/e2e/garbage_collector.go | 30 ++-- test/e2e/generated_clientset.go | 16 +- test/e2e/ingress_utils.go | 8 +- test/e2e/job.go | 2 +- test/e2e/kibana_logging.go | 10 +- test/e2e/kubectl.go | 4 +- test/e2e/kubelet_perf.go | 4 +- test/e2e/load.go | 2 +- test/e2e/mesos.go | 4 +- test/e2e/metrics_grabber_test.go | 6 +- test/e2e/monitoring.go | 31 ++-- test/e2e/namespace.go | 2 +- test/e2e/network_partition.go | 20 +-- test/e2e/node_problem_detector.go | 14 +- test/e2e/nodeoutofdisk.go | 6 +- test/e2e/opaque_resource.go | 10 +- test/e2e/persistent_volumes-disruptive.go | 2 +- test/e2e/persistent_volumes.go | 2 +- test/e2e/pod_gc.go | 2 +- test/e2e/pods.go | 8 +- test/e2e/reboot.go | 6 +- test/e2e/rescheduler.go | 10 +- test/e2e/resize_nodes.go | 5 +- test/e2e/resource_quota.go | 2 +- test/e2e/restart.go | 5 +- test/e2e/scheduler_predicates.go | 9 +- test/e2e/service.go | 4 +- test/e2e/service_latency.go | 4 +- test/e2e/serviceloadbalancers.go | 3 +- test/e2e/statefulset.go | 18 +- test/e2e/ubernetes_lite.go | 2 +- test/e2e_federation/federated-daemonset.go | 2 +- test/e2e_federation/federated-deployment.go | 2 +- test/e2e_federation/federated-ingress.go | 2 +- test/e2e_federation/federated-namespace.go | 8 +- test/e2e_federation/federated-replicaset.go | 2 +- test/e2e_federation/federated-secret.go | 2 +- test/e2e_federation/federation-apiserver.go | 4 +- test/e2e_federation/federation-event.go | 2 +- test/e2e_federation/federation-util.go | 4 +- test/e2e_federation/framework/framework.go | 4 +- test/e2e_federation/framework/util.go | 3 +- test/e2e_node/apparmor_test.go | 2 +- test/e2e_node/density_test.go | 4 +- test/e2e_node/e2e_node_suite_test.go | 3 +- test/e2e_node/inode_eviction_test.go | 4 +- test/e2e_node/memory_eviction_test.go | 4 +- test/e2e_node/restart_test.go | 3 +- test/images/clusterapi-tester/main.go | 8 +- test/integration/auth/auth_test.go | 9 +- test/integration/auth/rbac_test.go | 4 +- test/integration/client/client_test.go | 8 +- .../integration/client/dynamic_client_test.go | 4 +- test/integration/framework/master_utils.go | 2 +- .../garbage_collector_test.go | 16 +- .../master/master_benchmark_test.go | 5 +- test/integration/master/master_test.go | 30 ++-- test/integration/metrics/metrics_test.go | 3 +- test/integration/quota/quota_test.go | 6 +- .../integration/replicaset/replicaset_test.go | 4 +- .../replicationcontroller_test.go | 4 +- test/integration/scheduler/extender_test.go | 2 +- test/integration/scheduler/scheduler_test.go | 6 +- .../serviceaccount/service_account_test.go | 6 +- .../volume/persistent_volumes_test.go | 32 ++-- test/soak/cauldron/cauldron.go | 2 +- test/soak/serve_hostnames/serve_hostnames.go | 2 +- test/utils/pod_store.go | 5 +- test/utils/runners.go | 6 +- 364 files changed, 1519 insertions(+), 1554 deletions(-) delete mode 100644 federation/pkg/federation-controller/util/versionize_listoptions.go diff --git a/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go b/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go index b52be0dff1f..265000be66a 100644 --- a/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go +++ b/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go @@ -53,7 +53,7 @@ func main() { if err != nil { glog.Fatalf("Failed to make client: %v", err) } - namespace := api.NamespaceSystem + namespace := metav1.NamespaceSystem envNamespace := os.Getenv("NAMESPACE") if envNamespace != "" { if _, err := client.Core().Namespaces().Get(envNamespace, meta_v1.GetOptions{}); err != nil { diff --git a/cmd/kube-aggregator/pkg/client/informers/apiregistration/internalversion/BUILD b/cmd/kube-aggregator/pkg/client/informers/apiregistration/internalversion/BUILD index 884bf8fc48b..726f0c8b6d8 100644 --- a/cmd/kube-aggregator/pkg/client/informers/apiregistration/internalversion/BUILD +++ b/cmd/kube-aggregator/pkg/client/informers/apiregistration/internalversion/BUILD @@ -19,9 +19,8 @@ go_library( "//cmd/kube-aggregator/pkg/client/clientset_generated/internalclientset:go_default_library", "//cmd/kube-aggregator/pkg/client/informers/internalinterfaces:go_default_library", "//cmd/kube-aggregator/pkg/client/listers/apiregistration/internalversion:go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/client/cache:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/watch", ], diff --git a/cmd/kube-aggregator/pkg/client/informers/apiregistration/v1alpha1/BUILD b/cmd/kube-aggregator/pkg/client/informers/apiregistration/v1alpha1/BUILD index 62c012ce6ba..7cbf4dbb29b 100644 --- a/cmd/kube-aggregator/pkg/client/informers/apiregistration/v1alpha1/BUILD +++ b/cmd/kube-aggregator/pkg/client/informers/apiregistration/v1alpha1/BUILD @@ -19,8 +19,8 @@ go_library( "//cmd/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library", "//cmd/kube-aggregator/pkg/client/informers/internalinterfaces:go_default_library", "//cmd/kube-aggregator/pkg/client/listers/apiregistration/v1alpha1:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/client/cache:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/watch", ], diff --git a/cmd/kubeadm/app/cmd/token.go b/cmd/kubeadm/app/cmd/token.go index d3a43e7ccd1..c6c9957863d 100644 --- a/cmd/kubeadm/app/cmd/token.go +++ b/cmd/kubeadm/app/cmd/token.go @@ -27,13 +27,13 @@ import ( "github.com/renstrom/dedent" "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubemaster "k8s.io/kubernetes/cmd/kubeadm/app/master" "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubectl" ) @@ -167,11 +167,11 @@ func RunListTokens(out io.Writer, errW io.Writer, cmd *cobra.Command) error { api.SecretTypeField: string(api.SecretTypeBootstrapToken), }, ) - listOptions := v1.ListOptions{ + listOptions := metav1.ListOptions{ FieldSelector: tokenSelector.String(), } - results, err := client.Secrets(api.NamespaceSystem).List(listOptions) + results, err := client.Secrets(metav1.NamespaceSystem).List(listOptions) if err != nil { return fmt.Errorf("failed to list bootstrap tokens [%v]", err) } @@ -222,7 +222,7 @@ func RunDeleteToken(out io.Writer, cmd *cobra.Command, tokenId string) error { } tokenSecretName := fmt.Sprintf("%s%s", kubeadmutil.BootstrapTokenSecretPrefix, tokenId) - if err := client.Secrets(api.NamespaceSystem).Delete(tokenSecretName, nil); err != nil { + if err := client.Secrets(metav1.NamespaceSystem).Delete(tokenSecretName, nil); err != nil { return fmt.Errorf("failed to delete bootstrap token [%v]", err) } fmt.Fprintf(out, "[token] bootstrap token deleted: %s\n", tokenId) diff --git a/cmd/kubeadm/app/master/addons.go b/cmd/kubeadm/app/master/addons.go index 2661bb4c632..9d682fe0792 100644 --- a/cmd/kubeadm/app/master/addons.go +++ b/cmd/kubeadm/app/master/addons.go @@ -21,10 +21,10 @@ import ( "net" "path" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/images" "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -268,7 +268,7 @@ func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientse SetMasterTaintTolerations(&kubeProxyDaemonSet.Spec.Template.ObjectMeta) SetNodeAffinity(&kubeProxyDaemonSet.Spec.Template.ObjectMeta, NativeArchitectureNodeAffinity()) - if _, err := client.Extensions().DaemonSets(api.NamespaceSystem).Create(kubeProxyDaemonSet); err != nil { + if _, err := client.Extensions().DaemonSets(metav1.NamespaceSystem).Create(kubeProxyDaemonSet); err != nil { return fmt.Errorf("failed creating essential kube-proxy addon [%v]", err) } @@ -279,10 +279,10 @@ func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientse SetNodeAffinity(&kubeDNSDeployment.Spec.Template.ObjectMeta, NativeArchitectureNodeAffinity()) kubeDNSServiceAccount := &v1.ServiceAccount{} kubeDNSServiceAccount.ObjectMeta.Name = KubeDNS - if _, err := client.ServiceAccounts(api.NamespaceSystem).Create(kubeDNSServiceAccount); err != nil { + if _, err := client.ServiceAccounts(metav1.NamespaceSystem).Create(kubeDNSServiceAccount); err != nil { return fmt.Errorf("failed creating kube-dns service account [%v]", err) } - if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(kubeDNSDeployment); err != nil { + if _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(kubeDNSDeployment); err != nil { return fmt.Errorf("failed creating essential kube-dns addon [%v]", err) } @@ -293,7 +293,7 @@ func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientse kubeDNSService := NewService(KubeDNS, *kubeDNSServiceSpec) kubeDNSService.ObjectMeta.Labels["kubernetes.io/name"] = "KubeDNS" - if _, err := client.Services(api.NamespaceSystem).Create(kubeDNSService); err != nil { + if _, err := client.Services(metav1.NamespaceSystem).Create(kubeDNSService); err != nil { return fmt.Errorf("failed creating essential kube-dns addon [%v]", err) } diff --git a/cmd/kubeadm/app/master/apiclient.go b/cmd/kubeadm/app/master/apiclient.go index 8bbe075674b..eb516f86f87 100644 --- a/cmd/kubeadm/app/master/apiclient.go +++ b/cmd/kubeadm/app/master/apiclient.go @@ -28,7 +28,6 @@ import ( "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/kubernetes/cmd/kubeadm/app/images" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -73,7 +72,7 @@ func CreateClientAndWaitForAPI(file string) (*clientset.Clientset, error) { fmt.Println("[apiclient] Waiting for at least one node to register and become ready") start := time.Now() wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { - nodeList, err := client.Nodes().List(v1.ListOptions{}) + nodeList, err := client.Nodes().List(metav1.ListOptions{}) if err != nil { fmt.Println("[apiclient] Temporarily unable to list nodes (will retry)") return false, nil @@ -107,7 +106,7 @@ func WaitForAPI(client *clientset.Clientset) { start := time.Now() wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { // TODO: use /healthz API instead of this - cs, err := client.ComponentStatuses().List(v1.ListOptions{}) + cs, err := client.ComponentStatuses().List(metav1.ListOptions{}) if err != nil { if apierrs.IsForbidden(err) { fmt.Println("[apiclient] Waiting for API server authorization") @@ -176,7 +175,7 @@ func NewDeployment(deploymentName string, replicas int32, podSpec v1.PodSpec) *e // It's safe to do this for alpha, as we don't have HA and there is no way we can get // more then one node here (TODO(phase1+) use os.Hostname) func findMyself(client *clientset.Clientset) (*v1.Node, error) { - nodeList, err := client.Nodes().List(v1.ListOptions{}) + nodeList, err := client.Nodes().List(metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("unable to list nodes [%v]", err) } @@ -274,7 +273,7 @@ func createDummyDeployment(client *clientset.Clientset) { wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { // TODO: we should check the error, as some cases may be fatal - if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(dummyDeployment); err != nil { + if _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(dummyDeployment); err != nil { fmt.Printf("[apiclient] Failed to create test deployment [%v] (will retry)\n", err) return false, nil } @@ -282,7 +281,7 @@ func createDummyDeployment(client *clientset.Clientset) { }) wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { - d, err := client.Extensions().Deployments(api.NamespaceSystem).Get("dummy", metav1.GetOptions{}) + d, err := client.Extensions().Deployments(metav1.NamespaceSystem).Get("dummy", metav1.GetOptions{}) if err != nil { fmt.Printf("[apiclient] Failed to get test deployment [%v] (will retry)\n", err) return false, nil @@ -296,7 +295,7 @@ func createDummyDeployment(client *clientset.Clientset) { fmt.Println("[apiclient] Test deployment succeeded") // TODO: In the future, make sure the ReplicaSet and Pod are garbage collected - if err := client.Extensions().Deployments(api.NamespaceSystem).Delete("dummy", &v1.DeleteOptions{}); err != nil { + if err := client.Extensions().Deployments(metav1.NamespaceSystem).Delete("dummy", &v1.DeleteOptions{}); err != nil { fmt.Printf("[apiclient] Failed to delete test deployment [%v] (will ignore)\n", err) } } diff --git a/cmd/kubeadm/app/master/discovery.go b/cmd/kubeadm/app/master/discovery.go index 1189ec9061b..58ee74d746f 100644 --- a/cmd/kubeadm/app/master/discovery.go +++ b/cmd/kubeadm/app/master/discovery.go @@ -30,7 +30,6 @@ import ( kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -136,10 +135,10 @@ func CreateDiscoveryDeploymentAndSecret(cfg *kubeadmapi.MasterConfiguration, cli kd := newKubeDiscovery(cfg, caCert) - if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(kd.Deployment); err != nil { + if _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(kd.Deployment); err != nil { return fmt.Errorf("failed to create %q deployment [%v]", kubeDiscoveryName, err) } - if _, err := client.Secrets(api.NamespaceSystem).Create(kd.Secret); err != nil { + if _, err := client.Secrets(metav1.NamespaceSystem).Create(kd.Secret); err != nil { return fmt.Errorf("failed to create %q secret [%v]", kubeDiscoverySecretName, err) } @@ -147,7 +146,7 @@ func CreateDiscoveryDeploymentAndSecret(cfg *kubeadmapi.MasterConfiguration, cli start := time.Now() wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { - d, err := client.Extensions().Deployments(api.NamespaceSystem).Get(kubeDiscoveryName, metav1.GetOptions{}) + d, err := client.Extensions().Deployments(metav1.NamespaceSystem).Get(kubeDiscoveryName, metav1.GetOptions{}) if err != nil { return false, nil } diff --git a/cmd/kubeadm/app/master/selfhosted.go b/cmd/kubeadm/app/master/selfhosted.go index b0c54e48c36..031a1a74dd4 100644 --- a/cmd/kubeadm/app/master/selfhosted.go +++ b/cmd/kubeadm/app/master/selfhosted.go @@ -27,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/images" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" ext "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -69,7 +68,7 @@ func launchSelfHostedAPIServer(cfg *kubeadmapi.MasterConfiguration, client *clie start := time.Now() apiServer := getAPIServerDS(cfg, volumes, volumeMounts) - if _, err := client.Extensions().DaemonSets(api.NamespaceSystem).Create(&apiServer); err != nil { + if _, err := client.Extensions().DaemonSets(metav1.NamespaceSystem).Create(&apiServer); err != nil { return fmt.Errorf("failed to create self-hosted %q daemon set [%v]", kubeAPIServer, err) } @@ -77,7 +76,7 @@ func launchSelfHostedAPIServer(cfg *kubeadmapi.MasterConfiguration, client *clie // TODO: This might be pointless, checking the pods is probably enough. // It does however get us a count of how many there should be which may be useful // with HA. - apiDS, err := client.DaemonSets(api.NamespaceSystem).Get("self-hosted-"+kubeAPIServer, + apiDS, err := client.DaemonSets(metav1.NamespaceSystem).Get("self-hosted-"+kubeAPIServer, metav1.GetOptions{}) if err != nil { fmt.Println("[self-hosted] error getting apiserver DaemonSet:", err) @@ -114,7 +113,7 @@ func launchSelfHostedControllerManager(cfg *kubeadmapi.MasterConfiguration, clie start := time.Now() ctrlMgr := getControllerManagerDeployment(cfg, volumes, volumeMounts) - if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(&ctrlMgr); err != nil { + if _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(&ctrlMgr); err != nil { return fmt.Errorf("failed to create self-hosted %q deployment [%v]", kubeControllerManager, err) } @@ -133,7 +132,7 @@ func launchSelfHostedControllerManager(cfg *kubeadmapi.MasterConfiguration, clie func launchSelfHostedScheduler(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error { start := time.Now() scheduler := getSchedulerDeployment(cfg) - if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(&scheduler); err != nil { + if _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(&scheduler); err != nil { return fmt.Errorf("failed to create self-hosted %q deployment [%v]", kubeScheduler, err) } @@ -153,8 +152,8 @@ func launchSelfHostedScheduler(cfg *kubeadmapi.MasterConfiguration, client *clie func waitForPodsWithLabel(client *clientset.Clientset, appLabel string, mustBeRunning bool) { wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { // TODO: Do we need a stronger label link than this? - listOpts := v1.ListOptions{LabelSelector: fmt.Sprintf("k8s-app=%s", appLabel)} - apiPods, err := client.Pods(api.NamespaceSystem).List(listOpts) + listOpts := metav1.ListOptions{LabelSelector: fmt.Sprintf("k8s-app=%s", appLabel)} + apiPods, err := client.Pods(metav1.NamespaceSystem).List(listOpts) if err != nil { fmt.Printf("[self-hosted] error getting %s pods [%v]\n", appLabel, err) return false, nil diff --git a/cmd/kubeadm/app/phases/apiconfig/clusterroles.go b/cmd/kubeadm/app/phases/apiconfig/clusterroles.go index 3ff75512e28..446eaab1c2e 100644 --- a/cmd/kubeadm/app/phases/apiconfig/clusterroles.go +++ b/cmd/kubeadm/app/phases/apiconfig/clusterroles.go @@ -21,7 +21,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/cmd/kubeadm/app/master" - "k8s.io/kubernetes/pkg/api" rbac "k8s.io/kubernetes/pkg/apis/rbac/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" ) @@ -66,7 +65,7 @@ func CreateKubeDNSRBACClusterRole(clientset *clientset.Clientset) error { subject := rbac.Subject{ Kind: "ServiceAccount", Name: master.KubeDNS, - Namespace: api.NamespaceSystem, + Namespace: metav1.NamespaceSystem, } clusterRoleBinding := rbac.ClusterRoleBinding{ diff --git a/cmd/kubeadm/app/util/tokens.go b/cmd/kubeadm/app/util/tokens.go index 3e48289d7f7..e173cd32577 100644 --- a/cmd/kubeadm/app/util/tokens.go +++ b/cmd/kubeadm/app/util/tokens.go @@ -136,11 +136,11 @@ func UpdateOrCreateToken(client *clientset.Clientset, d *kubeadmapi.TokenDiscove secretName := fmt.Sprintf("%s%s", BootstrapTokenSecretPrefix, d.ID) var lastErr error for i := 0; i < tokenCreateRetries; i++ { - secret, err := client.Secrets(api.NamespaceSystem).Get(secretName, metav1.GetOptions{}) + secret, err := client.Secrets(metav1.NamespaceSystem).Get(secretName, metav1.GetOptions{}) if err == nil { // Secret with this ID already exists, update it: secret.Data = encodeTokenSecretData(d, tokenDuration) - if _, err := client.Secrets(api.NamespaceSystem).Update(secret); err == nil { + if _, err := client.Secrets(metav1.NamespaceSystem).Update(secret); err == nil { return nil } else { lastErr = err @@ -157,7 +157,7 @@ func UpdateOrCreateToken(client *clientset.Clientset, d *kubeadmapi.TokenDiscove Type: api.SecretTypeBootstrapToken, Data: encodeTokenSecretData(d, tokenDuration), } - if _, err := client.Secrets(api.NamespaceSystem).Create(secret); err == nil { + if _, err := client.Secrets(metav1.NamespaceSystem).Create(secret); err == nil { return nil } else { lastErr = err diff --git a/cmd/libs/go2idl/client-gen/test_apis/testgroup/BUILD b/cmd/libs/go2idl/client-gen/test_apis/testgroup/BUILD index e5e53a18134..eef8ff9ec3a 100644 --- a/cmd/libs/go2idl/client-gen/test_apis/testgroup/BUILD +++ b/cmd/libs/go2idl/client-gen/test_apis/testgroup/BUILD @@ -16,7 +16,6 @@ go_library( ], tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/runtime/schema", diff --git a/examples/examples_test.go b/examples/examples_test.go index 1d970ce6b14..79131a8fe32 100644 --- a/examples/examples_test.go +++ b/examples/examples_test.go @@ -26,6 +26,8 @@ import ( "testing" "github.com/golang/glog" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" @@ -48,7 +50,7 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { switch t := obj.(type) { case *api.ReplicationController: if t.Namespace == "" { - t.Namespace = api.NamespaceDefault + t.Namespace = metav1.NamespaceDefault } errors = validation.ValidateReplicationController(t) case *api.ReplicationControllerList: @@ -57,7 +59,7 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { } case *api.Service: if t.Namespace == "" { - t.Namespace = api.NamespaceDefault + t.Namespace = metav1.NamespaceDefault } errors = validation.ValidateService(t) case *api.ServiceList: @@ -66,7 +68,7 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { } case *api.Pod: if t.Namespace == "" { - t.Namespace = api.NamespaceDefault + t.Namespace = metav1.NamespaceDefault } errors = validation.ValidatePod(t) case *api.PodList: @@ -77,44 +79,44 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { errors = validation.ValidatePersistentVolume(t) case *api.PersistentVolumeClaim: if t.Namespace == "" { - t.Namespace = api.NamespaceDefault + t.Namespace = metav1.NamespaceDefault } errors = validation.ValidatePersistentVolumeClaim(t) case *api.PodTemplate: if t.Namespace == "" { - t.Namespace = api.NamespaceDefault + t.Namespace = metav1.NamespaceDefault } errors = validation.ValidatePodTemplate(t) case *api.Endpoints: if t.Namespace == "" { - t.Namespace = api.NamespaceDefault + t.Namespace = metav1.NamespaceDefault } errors = validation.ValidateEndpoints(t) case *api.Namespace: errors = validation.ValidateNamespace(t) case *api.Secret: if t.Namespace == "" { - t.Namespace = api.NamespaceDefault + t.Namespace = metav1.NamespaceDefault } errors = validation.ValidateSecret(t) case *api.LimitRange: if t.Namespace == "" { - t.Namespace = api.NamespaceDefault + t.Namespace = metav1.NamespaceDefault } errors = validation.ValidateLimitRange(t) case *api.ResourceQuota: if t.Namespace == "" { - t.Namespace = api.NamespaceDefault + t.Namespace = metav1.NamespaceDefault } errors = validation.ValidateResourceQuota(t) case *extensions.Deployment: if t.Namespace == "" { - t.Namespace = api.NamespaceDefault + t.Namespace = metav1.NamespaceDefault } errors = expvalidation.ValidateDeployment(t) case *batch.Job: if t.Namespace == "" { - t.Namespace = api.NamespaceDefault + t.Namespace = metav1.NamespaceDefault } // Job needs generateSelector called before validation, and job.Validate does this. // See: https://github.com/kubernetes/kubernetes/issues/20951#issuecomment-187787040 @@ -122,17 +124,17 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { errors = job.Strategy.Validate(nil, t) case *extensions.Ingress: if t.Namespace == "" { - t.Namespace = api.NamespaceDefault + t.Namespace = metav1.NamespaceDefault } errors = expvalidation.ValidateIngress(t) case *extensions.DaemonSet: if t.Namespace == "" { - t.Namespace = api.NamespaceDefault + t.Namespace = metav1.NamespaceDefault } errors = expvalidation.ValidateDaemonSet(t) case *apps.StatefulSet: if t.Namespace == "" { - t.Namespace = api.NamespaceDefault + t.Namespace = metav1.NamespaceDefault } errors = appsvalidation.ValidateStatefulSet(t) default: diff --git a/federation/apis/core/BUILD b/federation/apis/core/BUILD index db15c776506..e16cca85f94 100644 --- a/federation/apis/core/BUILD +++ b/federation/apis/core/BUILD @@ -11,15 +11,12 @@ go_library( name = "go_default_library", srcs = [ "conversion.go", - "defaults.go", "register.go", ], tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", - "//vendor:k8s.io/apimachinery/pkg/fields", - "//vendor:k8s.io/apimachinery/pkg/labels", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/runtime/schema", "//vendor:k8s.io/apimachinery/pkg/runtime/serializer", diff --git a/federation/pkg/federation-controller/cluster/cluster_client.go b/federation/pkg/federation-controller/cluster/cluster_client.go index 9418fdd8b36..d2cb53e28c5 100644 --- a/federation/pkg/federation-controller/cluster/cluster_client.go +++ b/federation/pkg/federation-controller/cluster/cluster_client.go @@ -142,7 +142,7 @@ func getRegionNameForNode(node api.Node) (string, error) { // Find the names of all zones and the region in which we have nodes in this cluster. func getZoneNames(client *clientset.Clientset) (zones []string, region string, err error) { zoneNames := sets.NewString() - nodes, err := client.Core().Nodes().List(api.ListOptions{}) + nodes, err := client.Core().Nodes().List(metav1.ListOptions{}) if err != nil { glog.Errorf("Failed to list nodes while getting zone names: %v", err) return nil, "", err diff --git a/federation/pkg/federation-controller/cluster/clustercontroller.go b/federation/pkg/federation-controller/cluster/clustercontroller.go index 89c20833253..d41ad536277 100644 --- a/federation/pkg/federation-controller/cluster/clustercontroller.go +++ b/federation/pkg/federation-controller/cluster/clustercontroller.go @@ -21,6 +21,7 @@ import ( "time" "github.com/golang/glog" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" @@ -29,7 +30,6 @@ import ( federationv1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1" clustercache "k8s.io/kubernetes/federation/client/cache" federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/controller" ) @@ -64,10 +64,10 @@ func NewclusterController(federationClient federationclientset.Interface, cluste } cc.clusterStore.Store, cc.clusterController = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return cc.federationClient.Federation().Clusters().List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return cc.federationClient.Federation().Clusters().Watch(options) }, }, @@ -134,7 +134,7 @@ func (cc *ClusterController) GetClusterStatus(cluster *federationv1beta1.Cluster // UpdateClusterStatus checks cluster status and get the metrics from cluster's restapi func (cc *ClusterController) UpdateClusterStatus() error { - clusters, err := cc.federationClient.Federation().Clusters().List(v1.ListOptions{}) + clusters, err := cc.federationClient.Federation().Clusters().List(metav1.ListOptions{}) if err != nil { return err } diff --git a/federation/pkg/federation-controller/configmap/BUILD b/federation/pkg/federation-controller/configmap/BUILD index 935c1d66650..8251338cb4d 100644 --- a/federation/pkg/federation-controller/configmap/BUILD +++ b/federation/pkg/federation-controller/configmap/BUILD @@ -24,6 +24,7 @@ go_library( "//pkg/client/record:go_default_library", "//pkg/controller:go_default_library", "//vendor:github.com/golang/glog", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/types", "//vendor:k8s.io/apimachinery/pkg/watch", diff --git a/federation/pkg/federation-controller/configmap/configmap_controller.go b/federation/pkg/federation-controller/configmap/configmap_controller.go index e0d0d04abc7..acf7139ed36 100644 --- a/federation/pkg/federation-controller/configmap/configmap_controller.go +++ b/federation/pkg/federation-controller/configmap/configmap_controller.go @@ -19,6 +19,7 @@ package configmap import ( "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" @@ -98,11 +99,11 @@ func NewConfigMapController(client federationclientset.Interface) *ConfigMapCont // Start informer on federated API servers on configmaps that should be federated. configmapcontroller.configmapInformerStore, configmapcontroller.configmapInformerController = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) { - return client.Core().ConfigMaps(apiv1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) { + return client.Core().ConfigMaps(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { - return client.Core().ConfigMaps(apiv1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return client.Core().ConfigMaps(metav1.NamespaceAll).Watch(options) }, }, &apiv1.ConfigMap{}, @@ -115,11 +116,11 @@ func NewConfigMapController(client federationclientset.Interface) *ConfigMapCont func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) { - return targetClient.Core().ConfigMaps(apiv1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) { + return targetClient.Core().ConfigMaps(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { - return targetClient.Core().ConfigMaps(apiv1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return targetClient.Core().ConfigMaps(metav1.NamespaceAll).Watch(options) }, }, &apiv1.ConfigMap{}, diff --git a/federation/pkg/federation-controller/daemonset/BUILD b/federation/pkg/federation-controller/daemonset/BUILD index 3fa647990c5..d057a657f8c 100644 --- a/federation/pkg/federation-controller/daemonset/BUILD +++ b/federation/pkg/federation-controller/daemonset/BUILD @@ -27,6 +27,7 @@ go_library( "//pkg/controller:go_default_library", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/api/errors", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/types", "//vendor:k8s.io/apimachinery/pkg/watch", diff --git a/federation/pkg/federation-controller/daemonset/daemonset_controller.go b/federation/pkg/federation-controller/daemonset/daemonset_controller.go index b5ada4daa76..9a815c0d307 100644 --- a/federation/pkg/federation-controller/daemonset/daemonset_controller.go +++ b/federation/pkg/federation-controller/daemonset/daemonset_controller.go @@ -22,6 +22,7 @@ import ( "time" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" @@ -105,11 +106,11 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont // Start informer in federated API servers on daemonsets that should be federated. daemonsetcontroller.daemonsetInformerStore, daemonsetcontroller.daemonsetInformerController = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) { - return client.Extensions().DaemonSets(apiv1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) { + return client.Extensions().DaemonSets(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { - return client.Extensions().DaemonSets(apiv1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return client.Extensions().DaemonSets(metav1.NamespaceAll).Watch(options) }, }, &extensionsv1.DaemonSet{}, @@ -122,11 +123,11 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) { - return targetClient.Extensions().DaemonSets(apiv1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) { + return targetClient.Extensions().DaemonSets(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { - return targetClient.Extensions().DaemonSets(apiv1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return targetClient.Extensions().DaemonSets(metav1.NamespaceAll).Watch(options) }, }, &extensionsv1.DaemonSet{}, diff --git a/federation/pkg/federation-controller/deployment/BUILD b/federation/pkg/federation-controller/deployment/BUILD index ef6445c53ba..e0da49a68c2 100644 --- a/federation/pkg/federation-controller/deployment/BUILD +++ b/federation/pkg/federation-controller/deployment/BUILD @@ -31,6 +31,7 @@ go_library( "//pkg/util/workqueue:go_default_library", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/api/errors", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/util/wait", "//vendor:k8s.io/apimachinery/pkg/watch", diff --git a/federation/pkg/federation-controller/deployment/deploymentcontroller.go b/federation/pkg/federation-controller/deployment/deploymentcontroller.go index de16b9b2f4e..415ac7fa192 100644 --- a/federation/pkg/federation-controller/deployment/deploymentcontroller.go +++ b/federation/pkg/federation-controller/deployment/deploymentcontroller.go @@ -26,6 +26,7 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" @@ -122,11 +123,11 @@ func NewDeploymentController(federationClient fedclientset.Interface) *Deploymen deploymentFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) { - return clientset.Extensions().Deployments(apiv1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return clientset.Extensions().Deployments(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { - return clientset.Extensions().Deployments(apiv1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return clientset.Extensions().Deployments(metav1.NamespaceAll).Watch(options) }, }, &extensionsv1.Deployment{}, @@ -149,11 +150,11 @@ func NewDeploymentController(federationClient fedclientset.Interface) *Deploymen podFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) { - return clientset.Core().Pods(apiv1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return clientset.Core().Pods(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { - return clientset.Core().Pods(apiv1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return clientset.Core().Pods(metav1.NamespaceAll).Watch(options) }, }, &apiv1.Pod{}, @@ -169,11 +170,11 @@ func NewDeploymentController(federationClient fedclientset.Interface) *Deploymen fdc.deploymentStore, fdc.deploymentController = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) { - return fdc.fedClient.Extensions().Deployments(apiv1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return fdc.fedClient.Extensions().Deployments(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { - return fdc.fedClient.Extensions().Deployments(apiv1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return fdc.fedClient.Extensions().Deployments(metav1.NamespaceAll).Watch(options) }, }, &extensionsv1.Deployment{}, diff --git a/federation/pkg/federation-controller/deployment/deploymentcontroller_test.go b/federation/pkg/federation-controller/deployment/deploymentcontroller_test.go index 8f858bd223e..2fd6d92d8e5 100644 --- a/federation/pkg/federation-controller/deployment/deploymentcontroller_test.go +++ b/federation/pkg/federation-controller/deployment/deploymentcontroller_test.go @@ -177,7 +177,7 @@ func newDeploymentWithReplicas(name string, replicas int32) *extensionsv1.Deploy return &extensionsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: apiv1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, SelfLink: "/api/v1/namespaces/default/deployments/name", }, Spec: extensionsv1.DeploymentSpec{ diff --git a/federation/pkg/federation-controller/ingress/ingress_controller.go b/federation/pkg/federation-controller/ingress/ingress_controller.go index a76caa01702..3bffd07098b 100644 --- a/federation/pkg/federation-controller/ingress/ingress_controller.go +++ b/federation/pkg/federation-controller/ingress/ingress_controller.go @@ -140,11 +140,11 @@ func NewIngressController(client federationclientset.Interface) *IngressControll // Start informer in federated API servers on ingresses that should be federated. ic.ingressInformerStore, ic.ingressInformerController = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) { - return client.Extensions().Ingresses(api.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) { + return client.Extensions().Ingresses(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return client.Extensions().Ingresses(api.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return client.Extensions().Ingresses(metav1.NamespaceAll).Watch(options) }, }, &extensionsv1beta1.Ingress{}, @@ -161,11 +161,11 @@ func NewIngressController(client federationclientset.Interface) *IngressControll func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) { - return targetClient.Extensions().Ingresses(api.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) { + return targetClient.Extensions().Ingresses(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return targetClient.Extensions().Ingresses(api.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return targetClient.Extensions().Ingresses(metav1.NamespaceAll).Watch(options) }, }, &extensionsv1beta1.Ingress{}, @@ -194,13 +194,13 @@ func NewIngressController(client federationclientset.Interface) *IngressControll glog.V(4).Infof("Returning new informer for cluster %q", cluster.Name) return cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) { + ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) { if targetClient == nil { glog.Errorf("Internal error: targetClient is nil") } return targetClient.Core().ConfigMaps(uidConfigMapNamespace).List(options) // we only want to list one by name - unfortunately Kubernetes don't have a selector for that. }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if targetClient == nil { glog.Errorf("Internal error: targetClient is nil") } diff --git a/federation/pkg/federation-controller/namespace/BUILD b/federation/pkg/federation-controller/namespace/BUILD index 2607e5e179f..94aeb77d63a 100644 --- a/federation/pkg/federation-controller/namespace/BUILD +++ b/federation/pkg/federation-controller/namespace/BUILD @@ -26,6 +26,7 @@ go_library( "//pkg/controller:go_default_library", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/api/errors", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/watch", "//vendor:k8s.io/client-go/pkg/util/flowcontrol", diff --git a/federation/pkg/federation-controller/namespace/namespace_controller.go b/federation/pkg/federation-controller/namespace/namespace_controller.go index 6671a2906f6..ef15f153500 100644 --- a/federation/pkg/federation-controller/namespace/namespace_controller.go +++ b/federation/pkg/federation-controller/namespace/namespace_controller.go @@ -21,6 +21,7 @@ import ( "time" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/pkg/util/flowcontrol" @@ -102,10 +103,10 @@ func NewNamespaceController(client federationclientset.Interface) *NamespaceCont // Start informer in federated API servers on namespaces that should be federated. nc.namespaceInformerStore, nc.namespaceInformerController = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return client.Core().Namespaces().List(options) }, - WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return client.Core().Namespaces().Watch(options) }, }, @@ -119,10 +120,10 @@ func NewNamespaceController(client federationclientset.Interface) *NamespaceCont func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return targetClient.Core().Namespaces().List(options) }, - WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return targetClient.Core().Namespaces().Watch(options) }, }, @@ -491,31 +492,31 @@ func (nc *NamespaceController) removeKubernetesFinalizer(namespace *apiv1.Namesp // Right now there are just 7 types of objects: Deployments, DaemonSets, ReplicaSet, Secret, Ingress, Events and Service. // Temporarily these items are simply deleted one by one to squeeze this code into 1.4. // TODO: Make it generic (like in the regular namespace controller) and parallel. - err := nc.federatedApiClient.Core().Services(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{}) + err := nc.federatedApiClient.Core().Services(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to delete service list: %v", err) } - err = nc.federatedApiClient.Extensions().ReplicaSets(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{}) + err = nc.federatedApiClient.Extensions().ReplicaSets(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to delete replicaset list from namespace: %v", err) } - err = nc.federatedApiClient.Core().Secrets(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{}) + err = nc.federatedApiClient.Core().Secrets(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to delete secret list from namespace: %v", err) } - err = nc.federatedApiClient.Extensions().Ingresses(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{}) + err = nc.federatedApiClient.Extensions().Ingresses(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to delete ingresses list from namespace: %v", err) } - err = nc.federatedApiClient.Extensions().DaemonSets(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{}) + err = nc.federatedApiClient.Extensions().DaemonSets(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to delete daemonsets list from namespace: %v", err) } - err = nc.federatedApiClient.Extensions().Deployments(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{}) + err = nc.federatedApiClient.Extensions().Deployments(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to delete deployments list from namespace: %v", err) } - err = nc.federatedApiClient.Core().Events(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{}) + err = nc.federatedApiClient.Core().Events(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to delete events list from namespace: %v", err) } diff --git a/federation/pkg/federation-controller/replicaset/BUILD b/federation/pkg/federation-controller/replicaset/BUILD index 6e7bb078b27..ad6d1658514 100644 --- a/federation/pkg/federation-controller/replicaset/BUILD +++ b/federation/pkg/federation-controller/replicaset/BUILD @@ -32,6 +32,7 @@ go_library( "//pkg/util/workqueue:go_default_library", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/api/errors", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/util/wait", "//vendor:k8s.io/apimachinery/pkg/watch", diff --git a/federation/pkg/federation-controller/replicaset/replicasetcontroller.go b/federation/pkg/federation-controller/replicaset/replicasetcontroller.go index cf26a54d4c6..5cf62cf62de 100644 --- a/federation/pkg/federation-controller/replicaset/replicasetcontroller.go +++ b/federation/pkg/federation-controller/replicaset/replicasetcontroller.go @@ -26,6 +26,7 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" @@ -125,11 +126,11 @@ func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSe replicaSetFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) { - return clientset.Extensions().ReplicaSets(apiv1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return clientset.Extensions().ReplicaSets(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { - return clientset.Extensions().ReplicaSets(apiv1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return clientset.Extensions().ReplicaSets(metav1.NamespaceAll).Watch(options) }, }, &extensionsv1.ReplicaSet{}, @@ -152,11 +153,11 @@ func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSe podFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) { - return clientset.Core().Pods(apiv1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return clientset.Core().Pods(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { - return clientset.Core().Pods(apiv1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return clientset.Core().Pods(metav1.NamespaceAll).Watch(options) }, }, &apiv1.Pod{}, @@ -172,11 +173,11 @@ func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSe frsc.replicaSetStore.Indexer, frsc.replicaSetController = cache.NewIndexerInformer( &cache.ListWatch{ - ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) { - return frsc.fedClient.Extensions().ReplicaSets(apiv1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return frsc.fedClient.Extensions().ReplicaSets(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { - return frsc.fedClient.Extensions().ReplicaSets(apiv1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return frsc.fedClient.Extensions().ReplicaSets(metav1.NamespaceAll).Watch(options) }, }, &extensionsv1.ReplicaSet{}, diff --git a/federation/pkg/federation-controller/replicaset/replicasetcontroller_test.go b/federation/pkg/federation-controller/replicaset/replicasetcontroller_test.go index bfdbaec9242..3d94ae2aabd 100644 --- a/federation/pkg/federation-controller/replicaset/replicasetcontroller_test.go +++ b/federation/pkg/federation-controller/replicaset/replicasetcontroller_test.go @@ -119,30 +119,30 @@ func TestReplicaSetController(t *testing.T) { go replicaSetController.Run(1, stopChan) rs := newReplicaSetWithReplicas("rs", 9) - rs, _ = fedclientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Create(rs) + rs, _ = fedclientset.Extensions().ReplicaSets(metav1.NamespaceDefault).Create(rs) fedrswatch.Add(rs) time.Sleep(1 * time.Second) - rs1, _ := kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) + rs1, _ := kube1clientset.Extensions().ReplicaSets(metav1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) kube1rswatch.Add(rs1) rs1.Status.Replicas = *rs1.Spec.Replicas rs1.Status.FullyLabeledReplicas = *rs1.Spec.Replicas rs1.Status.ReadyReplicas = *rs1.Spec.Replicas rs1.Status.AvailableReplicas = *rs1.Spec.Replicas - rs1, _ = kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).UpdateStatus(rs1) + rs1, _ = kube1clientset.Extensions().ReplicaSets(metav1.NamespaceDefault).UpdateStatus(rs1) kube1rswatch.Modify(rs1) - rs2, _ := kube2clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) + rs2, _ := kube2clientset.Extensions().ReplicaSets(metav1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) kube2rswatch.Add(rs2) rs2.Status.Replicas = *rs2.Spec.Replicas rs2.Status.FullyLabeledReplicas = *rs2.Spec.Replicas rs2.Status.ReadyReplicas = *rs2.Spec.Replicas rs2.Status.AvailableReplicas = *rs2.Spec.Replicas - rs2, _ = kube2clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).UpdateStatus(rs2) + rs2, _ = kube2clientset.Extensions().ReplicaSets(metav1.NamespaceDefault).UpdateStatus(rs2) kube2rswatch.Modify(rs2) time.Sleep(1 * time.Second) - rs, _ = fedclientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) + rs, _ = fedclientset.Extensions().ReplicaSets(metav1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) assert.Equal(t, *rs.Spec.Replicas, *rs1.Spec.Replicas+*rs2.Spec.Replicas) assert.Equal(t, rs.Status.Replicas, rs1.Status.Replicas+rs2.Status.Replicas) assert.Equal(t, rs.Status.FullyLabeledReplicas, rs1.Status.FullyLabeledReplicas+rs2.Status.FullyLabeledReplicas) @@ -151,28 +151,28 @@ func TestReplicaSetController(t *testing.T) { var replicas int32 = 20 rs.Spec.Replicas = &replicas - rs, _ = fedclientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Update(rs) + rs, _ = fedclientset.Extensions().ReplicaSets(metav1.NamespaceDefault).Update(rs) fedrswatch.Modify(rs) time.Sleep(1 * time.Second) - rs1, _ = kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) + rs1, _ = kube1clientset.Extensions().ReplicaSets(metav1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) rs1.Status.Replicas = *rs1.Spec.Replicas rs1.Status.FullyLabeledReplicas = *rs1.Spec.Replicas rs1.Status.ReadyReplicas = *rs1.Spec.Replicas rs1.Status.AvailableReplicas = *rs1.Spec.Replicas - rs1, _ = kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).UpdateStatus(rs1) + rs1, _ = kube1clientset.Extensions().ReplicaSets(metav1.NamespaceDefault).UpdateStatus(rs1) kube1rswatch.Modify(rs1) - rs2, _ = kube2clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) + rs2, _ = kube2clientset.Extensions().ReplicaSets(metav1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) rs2.Status.Replicas = *rs2.Spec.Replicas rs2.Status.FullyLabeledReplicas = *rs2.Spec.Replicas rs2.Status.ReadyReplicas = *rs2.Spec.Replicas rs2.Status.AvailableReplicas = *rs2.Spec.Replicas - rs2, _ = kube2clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).UpdateStatus(rs2) + rs2, _ = kube2clientset.Extensions().ReplicaSets(metav1.NamespaceDefault).UpdateStatus(rs2) kube2rswatch.Modify(rs2) time.Sleep(1 * time.Second) - rs, _ = fedclientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) + rs, _ = fedclientset.Extensions().ReplicaSets(metav1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) assert.Equal(t, *rs.Spec.Replicas, *rs1.Spec.Replicas+*rs2.Spec.Replicas) assert.Equal(t, rs.Status.Replicas, rs1.Status.Replicas+rs2.Status.Replicas) assert.Equal(t, rs.Status.FullyLabeledReplicas, rs1.Status.FullyLabeledReplicas+rs2.Status.FullyLabeledReplicas) @@ -184,7 +184,7 @@ func newReplicaSetWithReplicas(name string, replicas int32) *extensionsv1.Replic return &extensionsv1.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: apiv1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, SelfLink: "/api/v1/namespaces/default/replicasets/name", }, Spec: extensionsv1.ReplicaSetSpec{ diff --git a/federation/pkg/federation-controller/secret/BUILD b/federation/pkg/federation-controller/secret/BUILD index a2177d44b20..89b84628ca9 100644 --- a/federation/pkg/federation-controller/secret/BUILD +++ b/federation/pkg/federation-controller/secret/BUILD @@ -26,6 +26,7 @@ go_library( "//pkg/controller:go_default_library", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/api/errors", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/types", "//vendor:k8s.io/apimachinery/pkg/watch", diff --git a/federation/pkg/federation-controller/secret/secret_controller.go b/federation/pkg/federation-controller/secret/secret_controller.go index 339a0660d87..83852bbc94c 100644 --- a/federation/pkg/federation-controller/secret/secret_controller.go +++ b/federation/pkg/federation-controller/secret/secret_controller.go @@ -21,6 +21,7 @@ import ( "time" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" @@ -103,11 +104,11 @@ func NewSecretController(client federationclientset.Interface) *SecretController // Start informer in federated API servers on secrets that should be federated. secretcontroller.secretInformerStore, secretcontroller.secretInformerController = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) { - return client.Core().Secrets(apiv1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) { + return client.Core().Secrets(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { - return client.Core().Secrets(apiv1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return client.Core().Secrets(metav1.NamespaceAll).Watch(options) }, }, &apiv1.Secret{}, @@ -120,11 +121,11 @@ func NewSecretController(client federationclientset.Interface) *SecretController func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) { - return targetClient.Core().Secrets(apiv1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) { + return targetClient.Core().Secrets(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { - return targetClient.Core().Secrets(apiv1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return targetClient.Core().Secrets(metav1.NamespaceAll).Watch(options) }, }, &apiv1.Secret{}, diff --git a/federation/pkg/federation-controller/service/cluster_helper.go b/federation/pkg/federation-controller/service/cluster_helper.go index 09d52e237ba..3e4f437581f 100644 --- a/federation/pkg/federation-controller/service/cluster_helper.go +++ b/federation/pkg/federation-controller/service/cluster_helper.go @@ -19,6 +19,7 @@ package service import ( "sync" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" @@ -92,11 +93,11 @@ func (cc *clusterClientCache) startClusterLW(cluster *v1beta1.Cluster, clusterNa } cachedClusterClient.endpointStore.Store, cachedClusterClient.endpointController = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) { - return clientset.Core().Endpoints(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) { + return clientset.Core().Endpoints(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return clientset.Core().Endpoints(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return clientset.Core().Endpoints(metav1.NamespaceAll).Watch(options) }, }, &v1.Endpoints{}, @@ -116,11 +117,11 @@ func (cc *clusterClientCache) startClusterLW(cluster *v1beta1.Cluster, clusterNa cachedClusterClient.serviceStore.Indexer, cachedClusterClient.serviceController = cache.NewIndexerInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) { - return clientset.Core().Services(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) { + return clientset.Core().Services(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return clientset.Core().Services(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return clientset.Core().Services(metav1.NamespaceAll).Watch(options) }, }, &v1.Service{}, diff --git a/federation/pkg/federation-controller/service/servicecontroller.go b/federation/pkg/federation-controller/service/servicecontroller.go index 55a7adeb5ce..9b06aafb05c 100644 --- a/federation/pkg/federation-controller/service/servicecontroller.go +++ b/federation/pkg/federation-controller/service/servicecontroller.go @@ -181,11 +181,11 @@ func New(federationClient fedclientset.Interface, dns dnsprovider.Interface, s.clusterDeliverer = util.NewDelayingDeliverer() s.serviceStore.Indexer, s.serviceController = cache.NewIndexerInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) { - return s.federationClient.Core().Services(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) { + return s.federationClient.Core().Services(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return s.federationClient.Core().Services(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return s.federationClient.Core().Services(metav1.NamespaceAll).Watch(options) }, }, &v1.Service{}, @@ -204,10 +204,10 @@ func New(federationClient fedclientset.Interface, dns dnsprovider.Interface, ) s.clusterStore.Store, s.clusterController = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) { + ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) { return s.federationClient.Federation().Clusters().List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return s.federationClient.Federation().Clusters().Watch(options) }, }, @@ -249,11 +249,11 @@ func New(federationClient fedclientset.Interface, dns dnsprovider.Interface, fedInformerFactory := func(cluster *v1beta1.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) { - return targetClient.Core().Services(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) { + return targetClient.Core().Services(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return targetClient.Core().Services(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return targetClient.Core().Services(metav1.NamespaceAll).Watch(options) }, }, &v1.Service{}, diff --git a/federation/pkg/federation-controller/util/BUILD b/federation/pkg/federation-controller/util/BUILD index 936d08be354..ee4c9376264 100644 --- a/federation/pkg/federation-controller/util/BUILD +++ b/federation/pkg/federation-controller/util/BUILD @@ -21,7 +21,6 @@ go_library( "handlers.go", "meta.go", "secret.go", - "versionize_listoptions.go", ], tags = ["automanaged"], deps = [ diff --git a/federation/pkg/federation-controller/util/deployment_test.go b/federation/pkg/federation-controller/util/deployment_test.go index 9d83b5c1e6c..4550c6728cc 100644 --- a/federation/pkg/federation-controller/util/deployment_test.go +++ b/federation/pkg/federation-controller/util/deployment_test.go @@ -20,7 +20,6 @@ import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - apiv1 "k8s.io/kubernetes/pkg/api/v1" extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" deputils "k8s.io/kubernetes/pkg/controller/deployment/util" @@ -61,7 +60,7 @@ func newDeployment() *extensionsv1.Deployment { return &extensionsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "wrr", - Namespace: apiv1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, SelfLink: "/api/v1/namespaces/default/deployments/name123", }, Spec: extensionsv1.DeploymentSpec{ diff --git a/federation/pkg/federation-controller/util/federated_informer.go b/federation/pkg/federation-controller/util/federated_informer.go index 11ddb24ba77..5e1e465378e 100644 --- a/federation/pkg/federation-controller/util/federated_informer.go +++ b/federation/pkg/federation-controller/util/federated_informer.go @@ -22,6 +22,7 @@ import ( "sync" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" @@ -160,10 +161,10 @@ func NewFederatedInformer( federatedInformer.clusterInformer.store, federatedInformer.clusterInformer.controller = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) { + ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) { return federationClient.Federation().Clusters().List(options) }, - WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return federationClient.Federation().Clusters().Watch(options) }, }, diff --git a/federation/pkg/federation-controller/util/federated_informer_test.go b/federation/pkg/federation-controller/util/federated_informer_test.go index 0e64b6d2665..2898b348da4 100644 --- a/federation/pkg/federation-controller/util/federated_informer_test.go +++ b/federation/pkg/federation-controller/util/federated_informer_test.go @@ -81,11 +81,11 @@ func TestFederatedInformer(t *testing.T) { targetInformerFactory := func(cluster *federationapi.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) { - return clientset.Core().Services(apiv1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return clientset.Core().Services(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { - return clientset.Core().Services(apiv1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return clientset.Core().Services(metav1.NamespaceAll).Watch(options) }, }, &apiv1.Service{}, diff --git a/federation/pkg/federation-controller/util/versionize_listoptions.go b/federation/pkg/federation-controller/util/versionize_listoptions.go deleted file mode 100644 index 1f6ef7d34ab..00000000000 --- a/federation/pkg/federation-controller/util/versionize_listoptions.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" -) - -// TODO: remove this when Reflector takes an interface rather than a particular ListOptions as input parameter. -func VersionizeV1ListOptions(in api.ListOptions) (out v1.ListOptions) { - if in.LabelSelector != nil { - out.LabelSelector = in.LabelSelector.String() - } else { - out.LabelSelector = "" - } - if in.FieldSelector != nil { - out.FieldSelector = in.FieldSelector.String() - } else { - out.FieldSelector = "" - } - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion - out.TimeoutSeconds = in.TimeoutSeconds - return out -} diff --git a/federation/pkg/kubefed/init/init.go b/federation/pkg/kubefed/init/init.go index f2d793b2ffb..609b709f459 100644 --- a/federation/pkg/kubefed/init/init.go +++ b/federation/pkg/kubefed/init/init.go @@ -592,7 +592,7 @@ func createControllerManager(clientset *client.Clientset, namespace, name, svcNa func waitForPods(clientset *client.Clientset, fedPods []string, namespace string) error { err := wait.PollInfinite(podWaitInterval, func() (bool, error) { podCheck := len(fedPods) - podList, err := clientset.Core().Pods(namespace).List(api.ListOptions{}) + podList, err := clientset.Core().Pods(namespace).List(metav1.ListOptions{}) if err != nil { return false, nil } diff --git a/federation/registry/cluster/BUILD b/federation/registry/cluster/BUILD index 23e31dbb31e..0b44bbbecfd 100644 --- a/federation/registry/cluster/BUILD +++ b/federation/registry/cluster/BUILD @@ -22,6 +22,7 @@ go_library( "//pkg/genericapiserver/registry/generic:go_default_library", "//pkg/genericapiserver/registry/rest:go_default_library", "//pkg/storage:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/api/v1/BUILD b/pkg/api/v1/BUILD index 25b2f03b3ee..d74dcf7a5bb 100644 --- a/pkg/api/v1/BUILD +++ b/pkg/api/v1/BUILD @@ -42,7 +42,6 @@ go_library( "//vendor:k8s.io/apimachinery/pkg/api/meta", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/conversion", - "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/runtime/schema", diff --git a/pkg/api/validation/events.go b/pkg/api/validation/events.go index ac5cebfb34d..a255f58e2b9 100644 --- a/pkg/api/validation/events.go +++ b/pkg/api/validation/events.go @@ -20,6 +20,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" @@ -34,7 +35,7 @@ func ValidateEvent(event *api.Event) field.ErrorList { // Make sure event.Namespace and the involvedObject.Namespace agree if len(event.InvolvedObject.Namespace) == 0 { // event.Namespace must also be empty (or "default", for compatibility with old clients) - if event.Namespace != api.NamespaceNone && event.Namespace != api.NamespaceDefault { + if event.Namespace != metav1.NamespaceNone && event.Namespace != metav1.NamespaceDefault { allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) } } else { diff --git a/pkg/api/validation/events_test.go b/pkg/api/validation/events_test.go index 870f41b4382..0df784f6dff 100644 --- a/pkg/api/validation/events_test.go +++ b/pkg/api/validation/events_test.go @@ -56,7 +56,7 @@ func TestValidateEvent(t *testing.T) { &api.Event{ ObjectMeta: metav1.ObjectMeta{ Name: "test3", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, InvolvedObject: api.ObjectReference{ APIVersion: "v1", @@ -68,7 +68,7 @@ func TestValidateEvent(t *testing.T) { &api.Event{ ObjectMeta: metav1.ObjectMeta{ Name: "test4", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, InvolvedObject: api.ObjectReference{ APIVersion: "v1", @@ -80,12 +80,12 @@ func TestValidateEvent(t *testing.T) { &api.Event{ ObjectMeta: metav1.ObjectMeta{ Name: "test5", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, InvolvedObject: api.ObjectReference{ APIVersion: "extensions/v1beta1", Kind: "NoKind", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, }, true, @@ -93,7 +93,7 @@ func TestValidateEvent(t *testing.T) { &api.Event{ ObjectMeta: metav1.ObjectMeta{ Name: "test6", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, InvolvedObject: api.ObjectReference{ APIVersion: "extensions/v1beta1", @@ -106,12 +106,12 @@ func TestValidateEvent(t *testing.T) { &api.Event{ ObjectMeta: metav1.ObjectMeta{ Name: "test7", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, InvolvedObject: api.ObjectReference{ APIVersion: "extensions/v1beta1", Kind: "Job", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, }, true, @@ -119,7 +119,7 @@ func TestValidateEvent(t *testing.T) { &api.Event{ ObjectMeta: metav1.ObjectMeta{ Name: "test8", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, InvolvedObject: api.ObjectReference{ APIVersion: "other/v1beta1", @@ -145,7 +145,7 @@ func TestValidateEvent(t *testing.T) { &api.Event{ ObjectMeta: metav1.ObjectMeta{ Name: "test10", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, InvolvedObject: api.ObjectReference{ APIVersion: "extensions", diff --git a/pkg/api/validation/validation_test.go b/pkg/api/validation/validation_test.go index 3a0e3854c61..8e5382b1c8b 100644 --- a/pkg/api/validation/validation_test.go +++ b/pkg/api/validation/validation_test.go @@ -5243,7 +5243,7 @@ func TestValidateReplicationControllerStatusUpdate(t *testing.T) { successCases := []rcUpdateTest{ { old: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Selector: validSelector, Template: &validPodTemplate.Template, @@ -5253,7 +5253,7 @@ func TestValidateReplicationControllerStatusUpdate(t *testing.T) { }, }, update: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Replicas: 3, Selector: validSelector, @@ -5275,7 +5275,7 @@ func TestValidateReplicationControllerStatusUpdate(t *testing.T) { errorCases := map[string]rcUpdateTest{ "negative replicas": { old: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Selector: validSelector, Template: &validPodTemplate.Template, @@ -5285,7 +5285,7 @@ func TestValidateReplicationControllerStatusUpdate(t *testing.T) { }, }, update: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Replicas: 2, Selector: validSelector, @@ -5351,14 +5351,14 @@ func TestValidateReplicationControllerUpdate(t *testing.T) { successCases := []rcUpdateTest{ { old: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, }, update: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Replicas: 3, Selector: validSelector, @@ -5368,14 +5368,14 @@ func TestValidateReplicationControllerUpdate(t *testing.T) { }, { old: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, }, update: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Replicas: 1, Selector: validSelector, @@ -5394,14 +5394,14 @@ func TestValidateReplicationControllerUpdate(t *testing.T) { errorCases := map[string]rcUpdateTest{ "more than one read/write": { old: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, }, update: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Replicas: 2, Selector: validSelector, @@ -5411,14 +5411,14 @@ func TestValidateReplicationControllerUpdate(t *testing.T) { }, "invalid selector": { old: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, }, update: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Replicas: 2, Selector: invalidSelector, @@ -5428,14 +5428,14 @@ func TestValidateReplicationControllerUpdate(t *testing.T) { }, "invalid pod": { old: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, }, update: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Replicas: 2, Selector: validSelector, @@ -5445,14 +5445,14 @@ func TestValidateReplicationControllerUpdate(t *testing.T) { }, "negative replicas": { old: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, }, update: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Replicas: -1, Selector: validSelector, @@ -5509,21 +5509,21 @@ func TestValidateReplicationController(t *testing.T) { } successCases := []api.ReplicationController{ { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, }, { - ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, }, { - ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Replicas: 1, Selector: validSelector, @@ -5539,7 +5539,7 @@ func TestValidateReplicationController(t *testing.T) { errorCases := map[string]api.ReplicationController{ "zero-length ID": { - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Selector: validSelector, Template: &validPodTemplate.Template, @@ -5553,20 +5553,20 @@ func TestValidateReplicationController(t *testing.T) { }, }, "empty selector": { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Template: &validPodTemplate.Template, }, }, "selector_doesnt_match": { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Selector: map[string]string{"foo": "bar"}, Template: &validPodTemplate.Template, }, }, "invalid manifest": { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Selector: validSelector, }, @@ -5580,7 +5580,7 @@ func TestValidateReplicationController(t *testing.T) { }, }, "negative_replicas": { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Replicas: -1, Selector: validSelector, @@ -5589,7 +5589,7 @@ func TestValidateReplicationController(t *testing.T) { "invalid_label": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Labels: map[string]string{ "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, @@ -5602,7 +5602,7 @@ func TestValidateReplicationController(t *testing.T) { "invalid_label 2": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Labels: map[string]string{ "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, @@ -5614,7 +5614,7 @@ func TestValidateReplicationController(t *testing.T) { "invalid_annotation": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Annotations: map[string]string{ "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, @@ -5627,7 +5627,7 @@ func TestValidateReplicationController(t *testing.T) { "invalid restart policy 1": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: api.ReplicationControllerSpec{ Selector: validSelector, @@ -5646,7 +5646,7 @@ func TestValidateReplicationController(t *testing.T) { "invalid restart policy 2": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: api.ReplicationControllerSpec{ Selector: validSelector, @@ -8522,7 +8522,7 @@ func newNodeNameEndpoint(nodeName string) *api.Endpoints { ep := &api.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{ diff --git a/pkg/apis/apps/validation/validation_test.go b/pkg/apis/apps/validation/validation_test.go index 66094b9ce1f..6efff4b552d 100644 --- a/pkg/apis/apps/validation/validation_test.go +++ b/pkg/apis/apps/validation/validation_test.go @@ -53,14 +53,14 @@ func TestValidateStatefulSet(t *testing.T) { } successCases := []apps.StatefulSet{ { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, { - ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, @@ -75,7 +75,7 @@ func TestValidateStatefulSet(t *testing.T) { errorCases := map[string]apps.StatefulSet{ "zero-length ID": { - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, @@ -89,26 +89,26 @@ func TestValidateStatefulSet(t *testing.T) { }, }, "empty selector": { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Template: validPodTemplate.Template, }, }, "selector_doesnt_match": { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, Template: validPodTemplate.Template, }, }, "invalid manifest": { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, }, }, "negative_replicas": { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Replicas: -1, Selector: &metav1.LabelSelector{MatchLabels: validLabels}, @@ -117,7 +117,7 @@ func TestValidateStatefulSet(t *testing.T) { "invalid_label": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Labels: map[string]string{ "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, @@ -130,7 +130,7 @@ func TestValidateStatefulSet(t *testing.T) { "invalid_label 2": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Labels: map[string]string{ "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, @@ -142,7 +142,7 @@ func TestValidateStatefulSet(t *testing.T) { "invalid_annotation": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Annotations: map[string]string{ "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, @@ -155,7 +155,7 @@ func TestValidateStatefulSet(t *testing.T) { "invalid restart policy 1": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: apps.StatefulSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, @@ -174,7 +174,7 @@ func TestValidateStatefulSet(t *testing.T) { "invalid restart policy 2": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: apps.StatefulSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, @@ -261,14 +261,14 @@ func TestValidateStatefulSetUpdate(t *testing.T) { successCases := []psUpdateTest{ { old: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, update: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Replicas: 3, Selector: &metav1.LabelSelector{MatchLabels: validLabels}, @@ -287,14 +287,14 @@ func TestValidateStatefulSetUpdate(t *testing.T) { errorCases := map[string]psUpdateTest{ "more than one read/write": { old: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, update: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Replicas: 2, Selector: &metav1.LabelSelector{MatchLabels: validLabels}, @@ -304,14 +304,14 @@ func TestValidateStatefulSetUpdate(t *testing.T) { }, "updates to a field other than spec.Replicas": { old: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, update: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Replicas: 1, Selector: &metav1.LabelSelector{MatchLabels: validLabels}, @@ -321,14 +321,14 @@ func TestValidateStatefulSetUpdate(t *testing.T) { }, "invalid selector": { old: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, update: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Replicas: 2, Selector: &metav1.LabelSelector{MatchLabels: invalidLabels}, @@ -338,14 +338,14 @@ func TestValidateStatefulSetUpdate(t *testing.T) { }, "invalid pod": { old: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, update: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Replicas: 2, Selector: &metav1.LabelSelector{MatchLabels: validLabels}, @@ -355,14 +355,14 @@ func TestValidateStatefulSetUpdate(t *testing.T) { }, "negative replicas": { old: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, update: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Replicas: -1, Selector: &metav1.LabelSelector{MatchLabels: validLabels}, diff --git a/pkg/apis/autoscaling/validation/validation_test.go b/pkg/apis/autoscaling/validation/validation_test.go index cd57ac72938..9c7acdf6a2c 100644 --- a/pkg/apis/autoscaling/validation/validation_test.go +++ b/pkg/apis/autoscaling/validation/validation_test.go @@ -21,7 +21,6 @@ import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/controller/podautoscaler" ) @@ -31,7 +30,7 @@ func TestValidateScale(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{ Name: "frontend", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: autoscaling.ScaleSpec{ Replicas: 1, @@ -40,7 +39,7 @@ func TestValidateScale(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{ Name: "frontend", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: autoscaling.ScaleSpec{ Replicas: 10, @@ -49,7 +48,7 @@ func TestValidateScale(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{ Name: "frontend", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: autoscaling.ScaleSpec{ Replicas: 0, @@ -71,7 +70,7 @@ func TestValidateScale(t *testing.T) { scale: autoscaling.Scale{ ObjectMeta: metav1.ObjectMeta{ Name: "frontend", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: autoscaling.ScaleSpec{ Replicas: -1, @@ -95,7 +94,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{ Name: "myautoscaler", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: autoscaling.HorizontalPodAutoscalerSpec{ ScaleTargetRef: autoscaling.CrossVersionObjectReference{ @@ -110,7 +109,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{ Name: "myautoscaler", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: autoscaling.HorizontalPodAutoscalerSpec{ ScaleTargetRef: autoscaling.CrossVersionObjectReference{ @@ -124,7 +123,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{ Name: "myautoscaler", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Annotations: map[string]string{ podautoscaler.HpaCustomMetricsTargetAnnotationName: "{\"items\":[{\"name\":\"qps\",\"value\":\"20\"}]}", }, @@ -151,7 +150,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { }{ { horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ - ObjectMeta: metav1.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "myautoscaler", Namespace: metav1.NamespaceDefault}, Spec: autoscaling.HorizontalPodAutoscalerSpec{ ScaleTargetRef: autoscaling.CrossVersionObjectReference{Name: "myrc"}, MinReplicas: newInt32(1), @@ -163,7 +162,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { }, { horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ - ObjectMeta: metav1.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "myautoscaler", Namespace: metav1.NamespaceDefault}, Spec: autoscaling.HorizontalPodAutoscalerSpec{ ScaleTargetRef: autoscaling.CrossVersionObjectReference{Kind: "..", Name: "myrc"}, MinReplicas: newInt32(1), @@ -175,7 +174,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { }, { horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ - ObjectMeta: metav1.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "myautoscaler", Namespace: metav1.NamespaceDefault}, Spec: autoscaling.HorizontalPodAutoscalerSpec{ ScaleTargetRef: autoscaling.CrossVersionObjectReference{Kind: "ReplicationController"}, MinReplicas: newInt32(1), @@ -187,7 +186,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { }, { horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ - ObjectMeta: metav1.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "myautoscaler", Namespace: metav1.NamespaceDefault}, Spec: autoscaling.HorizontalPodAutoscalerSpec{ ScaleTargetRef: autoscaling.CrossVersionObjectReference{Kind: "ReplicationController", Name: ".."}, MinReplicas: newInt32(1), @@ -201,7 +200,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "myautoscaler", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: autoscaling.HorizontalPodAutoscalerSpec{ ScaleTargetRef: autoscaling.CrossVersionObjectReference{}, @@ -215,7 +214,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "myautoscaler", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: autoscaling.HorizontalPodAutoscalerSpec{ ScaleTargetRef: autoscaling.CrossVersionObjectReference{}, @@ -229,7 +228,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "myautoscaler", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: autoscaling.HorizontalPodAutoscalerSpec{ ScaleTargetRef: autoscaling.CrossVersionObjectReference{}, @@ -244,7 +243,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "myautoscaler", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Annotations: map[string]string{ podautoscaler.HpaCustomMetricsTargetAnnotationName: "broken", }, @@ -264,7 +263,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "myautoscaler", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Annotations: map[string]string{ podautoscaler.HpaCustomMetricsTargetAnnotationName: "{}", }, @@ -284,7 +283,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "myautoscaler", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Annotations: map[string]string{ podautoscaler.HpaCustomMetricsTargetAnnotationName: "{\"items\":[{\"value\":\"20\"}]}", }, @@ -304,7 +303,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "myautoscaler", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Annotations: map[string]string{ podautoscaler.HpaCustomMetricsTargetAnnotationName: "{\"items\":[{\"name\":\"qps\",\"value\":\"0\"}]}", }, diff --git a/pkg/apis/batch/validation/validation_test.go b/pkg/apis/batch/validation/validation_test.go index b28ddd30d2d..0e60991f423 100644 --- a/pkg/apis/batch/validation/validation_test.go +++ b/pkg/apis/batch/validation/validation_test.go @@ -74,7 +74,7 @@ func TestValidateJob(t *testing.T) { "manual selector": { ObjectMeta: metav1.ObjectMeta{ Name: "myjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.JobSpec{ @@ -86,7 +86,7 @@ func TestValidateJob(t *testing.T) { "generated selector": { ObjectMeta: metav1.ObjectMeta{ Name: "myjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.JobSpec{ @@ -106,7 +106,7 @@ func TestValidateJob(t *testing.T) { "spec.parallelism:must be greater than or equal to 0": { ObjectMeta: metav1.ObjectMeta{ Name: "myjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.JobSpec{ @@ -118,7 +118,7 @@ func TestValidateJob(t *testing.T) { "spec.completions:must be greater than or equal to 0": { ObjectMeta: metav1.ObjectMeta{ Name: "myjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.JobSpec{ @@ -130,7 +130,7 @@ func TestValidateJob(t *testing.T) { "spec.activeDeadlineSeconds:must be greater than or equal to 0": { ObjectMeta: metav1.ObjectMeta{ Name: "myjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.JobSpec{ @@ -142,7 +142,7 @@ func TestValidateJob(t *testing.T) { "spec.selector:Required value": { ObjectMeta: metav1.ObjectMeta{ Name: "myjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.JobSpec{ @@ -152,7 +152,7 @@ func TestValidateJob(t *testing.T) { "spec.template.metadata.labels: Invalid value: {\"y\":\"z\"}: `selector` does not match template `labels`": { ObjectMeta: metav1.ObjectMeta{ Name: "myjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.JobSpec{ @@ -173,7 +173,7 @@ func TestValidateJob(t *testing.T) { "spec.template.metadata.labels: Invalid value: {\"controller-uid\":\"4d5e6f\"}: `selector` does not match template `labels`": { ObjectMeta: metav1.ObjectMeta{ Name: "myjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.JobSpec{ @@ -194,7 +194,7 @@ func TestValidateJob(t *testing.T) { "spec.template.spec.restartPolicy: Unsupported value": { ObjectMeta: metav1.ObjectMeta{ Name: "myjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.JobSpec{ @@ -237,7 +237,7 @@ func TestValidateJobUpdateStatus(t *testing.T) { successCases := []testcase{ { old: batch.Job{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Status: batch.JobStatus{ Active: 1, Succeeded: 2, @@ -245,7 +245,7 @@ func TestValidateJobUpdateStatus(t *testing.T) { }, }, update: batch.Job{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Status: batch.JobStatus{ Active: 1, Succeeded: 1, @@ -268,7 +268,7 @@ func TestValidateJobUpdateStatus(t *testing.T) { old: batch.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "10", }, Status: batch.JobStatus{ @@ -280,7 +280,7 @@ func TestValidateJobUpdateStatus(t *testing.T) { update: batch.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "10", }, Status: batch.JobStatus{ @@ -313,7 +313,7 @@ func TestValidateCronJob(t *testing.T) { "basic scheduled job": { ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.CronJobSpec{ @@ -329,7 +329,7 @@ func TestValidateCronJob(t *testing.T) { "non-standard scheduled": { ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.CronJobSpec{ @@ -356,7 +356,7 @@ func TestValidateCronJob(t *testing.T) { "spec.schedule: Invalid value": { ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.CronJobSpec{ @@ -372,7 +372,7 @@ func TestValidateCronJob(t *testing.T) { "spec.schedule: Required value": { ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.CronJobSpec{ @@ -388,7 +388,7 @@ func TestValidateCronJob(t *testing.T) { "spec.startingDeadlineSeconds:must be greater than or equal to 0": { ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.CronJobSpec{ @@ -405,7 +405,7 @@ func TestValidateCronJob(t *testing.T) { "spec.concurrencyPolicy: Required value": { ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.CronJobSpec{ @@ -420,7 +420,7 @@ func TestValidateCronJob(t *testing.T) { "spec.jobTemplate.spec.parallelism:must be greater than or equal to 0": { ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.CronJobSpec{ @@ -437,7 +437,7 @@ func TestValidateCronJob(t *testing.T) { "spec.jobTemplate.spec.completions:must be greater than or equal to 0": { ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.CronJobSpec{ @@ -455,7 +455,7 @@ func TestValidateCronJob(t *testing.T) { "spec.jobTemplate.spec.activeDeadlineSeconds:must be greater than or equal to 0": { ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.CronJobSpec{ @@ -472,7 +472,7 @@ func TestValidateCronJob(t *testing.T) { "spec.jobTemplate.spec.selector: Invalid value: {\"matchLabels\":{\"a\":\"b\"}}: `selector` will be auto-generated": { ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.CronJobSpec{ @@ -489,7 +489,7 @@ func TestValidateCronJob(t *testing.T) { "spec.jobTemplate.spec.manualSelector: Unsupported value": { ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.CronJobSpec{ @@ -506,7 +506,7 @@ func TestValidateCronJob(t *testing.T) { "spec.jobTemplate.spec.template.spec.restartPolicy: Unsupported value": { ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.CronJobSpec{ diff --git a/pkg/apis/componentconfig/v1alpha1/defaults.go b/pkg/apis/componentconfig/v1alpha1/defaults.go index 78a32975a90..89e44f10eb7 100644 --- a/pkg/apis/componentconfig/v1alpha1/defaults.go +++ b/pkg/apis/componentconfig/v1alpha1/defaults.go @@ -277,7 +277,7 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { obj.LowDiskSpaceThresholdMB = 256 } if obj.MasterServiceNamespace == "" { - obj.MasterServiceNamespace = api.NamespaceDefault + obj.MasterServiceNamespace = metav1.NamespaceDefault } if obj.MaxContainerCount == nil { temp := int32(-1) diff --git a/pkg/apis/extensions/validation/validation_test.go b/pkg/apis/extensions/validation/validation_test.go index 4ee028c4ce2..3dcc4653940 100644 --- a/pkg/apis/extensions/validation/validation_test.go +++ b/pkg/apis/extensions/validation/validation_test.go @@ -40,7 +40,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) { successCases := []dsUpdateTest{ { old: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Status: extensions.DaemonSetStatus{ CurrentNumberScheduled: 1, NumberMisscheduled: 2, @@ -49,7 +49,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) { }, }, update: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Status: extensions.DaemonSetStatus{ CurrentNumberScheduled: 1, NumberMisscheduled: 1, @@ -72,7 +72,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) { old: extensions.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "10", }, Status: extensions.DaemonSetStatus{ @@ -86,7 +86,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) { update: extensions.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "10", }, Status: extensions.DaemonSetStatus{ @@ -102,7 +102,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) { old: extensions.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "10", }, Status: extensions.DaemonSetStatus{ @@ -116,7 +116,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) { update: extensions.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "10", }, Status: extensions.DaemonSetStatus{ @@ -132,7 +132,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) { old: extensions.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "10", }, Status: extensions.DaemonSetStatus{ @@ -146,7 +146,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) { update: extensions.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "10", }, Status: extensions.DaemonSetStatus{ @@ -162,7 +162,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) { old: extensions.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "10", }, Status: extensions.DaemonSetStatus{ @@ -176,7 +176,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) { update: extensions.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "10", }, Status: extensions.DaemonSetStatus{ @@ -192,7 +192,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) { old: extensions.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "10", }, Status: extensions.DaemonSetStatus{ @@ -206,7 +206,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) { update: extensions.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "10", }, Status: extensions.DaemonSetStatus{ @@ -222,7 +222,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) { old: extensions.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "10", }, Status: extensions.DaemonSetStatus{ @@ -236,7 +236,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) { update: extensions.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "10", }, Status: extensions.DaemonSetStatus{ @@ -345,14 +345,14 @@ func TestValidateDaemonSetUpdate(t *testing.T) { successCases := []dsUpdateTest{ { old: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplateAbc.Template, }, }, update: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplateAbc.Template, @@ -361,14 +361,14 @@ func TestValidateDaemonSetUpdate(t *testing.T) { }, { old: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplateAbc.Template, }, }, update: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector2}, Template: validPodTemplateAbc2.Template, @@ -377,14 +377,14 @@ func TestValidateDaemonSetUpdate(t *testing.T) { }, { old: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplateAbc.Template, }, }, update: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplateNodeSelector.Template, @@ -402,14 +402,14 @@ func TestValidateDaemonSetUpdate(t *testing.T) { errorCases := map[string]dsUpdateTest{ "change daemon name": { old: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplateAbc.Template, }, }, update: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplateAbc.Template, @@ -418,14 +418,14 @@ func TestValidateDaemonSetUpdate(t *testing.T) { }, "invalid selector": { old: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplateAbc.Template, }, }, update: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: invalidSelector}, Template: validPodTemplateAbc.Template, @@ -434,14 +434,14 @@ func TestValidateDaemonSetUpdate(t *testing.T) { }, "invalid pod": { old: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplateAbc.Template, }, }, update: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: invalidPodTemplate.Template, @@ -450,14 +450,14 @@ func TestValidateDaemonSetUpdate(t *testing.T) { }, "change container image": { old: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplateAbc.Template, }, }, update: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplateDef.Template, @@ -466,14 +466,14 @@ func TestValidateDaemonSetUpdate(t *testing.T) { }, "read-write volume": { old: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplateAbc.Template, }, }, update: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: readWriteVolumePodTemplate.Template, @@ -482,14 +482,14 @@ func TestValidateDaemonSetUpdate(t *testing.T) { }, "invalid update strategy": { old: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplateAbc.Template, }, }, update: extensions.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: invalidSelector}, Template: validPodTemplateAbc.Template, @@ -532,14 +532,14 @@ func TestValidateDaemonSet(t *testing.T) { } successCases := []extensions.DaemonSet{ { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplate.Template, }, }, { - ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplate.Template, @@ -554,7 +554,7 @@ func TestValidateDaemonSet(t *testing.T) { errorCases := map[string]extensions.DaemonSet{ "zero-length ID": { - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplate.Template, @@ -568,27 +568,27 @@ func TestValidateDaemonSet(t *testing.T) { }, }, "nil selector": { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Template: validPodTemplate.Template, }, }, "empty selector": { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{}, Template: validPodTemplate.Template, }, }, "selector_doesnt_match": { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, Template: validPodTemplate.Template, }, }, "invalid template": { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, }, @@ -596,7 +596,7 @@ func TestValidateDaemonSet(t *testing.T) { "invalid_label": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Labels: map[string]string{ "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, @@ -609,7 +609,7 @@ func TestValidateDaemonSet(t *testing.T) { "invalid_label 2": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Labels: map[string]string{ "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, @@ -621,7 +621,7 @@ func TestValidateDaemonSet(t *testing.T) { "invalid_annotation": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Annotations: map[string]string{ "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, @@ -634,7 +634,7 @@ func TestValidateDaemonSet(t *testing.T) { "invalid restart policy 1": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, @@ -653,7 +653,7 @@ func TestValidateDaemonSet(t *testing.T) { "invalid restart policy 2": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, @@ -697,7 +697,7 @@ func validDeployment() *extensions.Deployment { return &extensions.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: extensions.DeploymentSpec{ Selector: &metav1.LabelSelector{ @@ -715,7 +715,7 @@ func validDeployment() *extensions.Deployment { Template: api.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Labels: map[string]string{ "name": "abc", }, @@ -753,7 +753,7 @@ func TestValidateDeployment(t *testing.T) { errorCases := map[string]*extensions.Deployment{} errorCases["metadata.name: Required value"] = &extensions.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, } // selector should match the labels in pod template. @@ -888,7 +888,7 @@ func TestValidateIngress(t *testing.T) { return extensions.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: extensions.IngressSpec{ Backend: &extensions.IngressBackend{ @@ -992,7 +992,7 @@ func TestValidateIngressTLS(t *testing.T) { return extensions.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: extensions.IngressSpec{ Backend: &extensions.IngressBackend{ @@ -1062,7 +1062,7 @@ func TestValidateIngressStatusUpdate(t *testing.T) { return extensions.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "9", }, Spec: extensions.IngressSpec{ @@ -1149,7 +1149,7 @@ func TestValidateScale(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{ Name: "frontend", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: extensions.ScaleSpec{ Replicas: 1, @@ -1158,7 +1158,7 @@ func TestValidateScale(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{ Name: "frontend", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: extensions.ScaleSpec{ Replicas: 10, @@ -1167,7 +1167,7 @@ func TestValidateScale(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{ Name: "frontend", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: extensions.ScaleSpec{ Replicas: 0, @@ -1189,7 +1189,7 @@ func TestValidateScale(t *testing.T) { scale: extensions.Scale{ ObjectMeta: metav1.ObjectMeta{ Name: "frontend", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: extensions.ScaleSpec{ Replicas: -1, @@ -1229,7 +1229,7 @@ func TestValidateReplicaSetStatusUpdate(t *testing.T) { successCases := []rcUpdateTest{ { old: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, @@ -1239,7 +1239,7 @@ func TestValidateReplicaSetStatusUpdate(t *testing.T) { }, }, update: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Replicas: 3, Selector: &metav1.LabelSelector{MatchLabels: validLabels}, @@ -1261,7 +1261,7 @@ func TestValidateReplicaSetStatusUpdate(t *testing.T) { errorCases := map[string]rcUpdateTest{ "negative replicas": { old: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, @@ -1271,7 +1271,7 @@ func TestValidateReplicaSetStatusUpdate(t *testing.T) { }, }, update: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Replicas: 2, Selector: &metav1.LabelSelector{MatchLabels: validLabels}, @@ -1337,14 +1337,14 @@ func TestValidateReplicaSetUpdate(t *testing.T) { successCases := []rcUpdateTest{ { old: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, update: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Replicas: 3, Selector: &metav1.LabelSelector{MatchLabels: validLabels}, @@ -1354,14 +1354,14 @@ func TestValidateReplicaSetUpdate(t *testing.T) { }, { old: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, update: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Replicas: 1, Selector: &metav1.LabelSelector{MatchLabels: validLabels}, @@ -1380,14 +1380,14 @@ func TestValidateReplicaSetUpdate(t *testing.T) { errorCases := map[string]rcUpdateTest{ "more than one read/write": { old: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, update: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Replicas: 2, Selector: &metav1.LabelSelector{MatchLabels: validLabels}, @@ -1397,14 +1397,14 @@ func TestValidateReplicaSetUpdate(t *testing.T) { }, "invalid selector": { old: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, update: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Replicas: 2, Selector: &metav1.LabelSelector{MatchLabels: invalidLabels}, @@ -1414,14 +1414,14 @@ func TestValidateReplicaSetUpdate(t *testing.T) { }, "invalid pod": { old: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, update: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Replicas: 2, Selector: &metav1.LabelSelector{MatchLabels: validLabels}, @@ -1431,14 +1431,14 @@ func TestValidateReplicaSetUpdate(t *testing.T) { }, "negative replicas": { old: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, update: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Replicas: -1, Selector: &metav1.LabelSelector{MatchLabels: validLabels}, @@ -1495,21 +1495,21 @@ func TestValidateReplicaSet(t *testing.T) { } successCases := []extensions.ReplicaSet{ { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, { - ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, { - ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Replicas: 1, Selector: &metav1.LabelSelector{MatchLabels: validLabels}, @@ -1525,7 +1525,7 @@ func TestValidateReplicaSet(t *testing.T) { errorCases := map[string]extensions.ReplicaSet{ "zero-length ID": { - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, @@ -1539,20 +1539,20 @@ func TestValidateReplicaSet(t *testing.T) { }, }, "empty selector": { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Template: validPodTemplate.Template, }, }, "selector_doesnt_match": { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, Template: validPodTemplate.Template, }, }, "invalid manifest": { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, }, @@ -1566,7 +1566,7 @@ func TestValidateReplicaSet(t *testing.T) { }, }, "negative_replicas": { - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Replicas: -1, Selector: &metav1.LabelSelector{MatchLabels: validLabels}, @@ -1575,7 +1575,7 @@ func TestValidateReplicaSet(t *testing.T) { "invalid_label": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Labels: map[string]string{ "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, @@ -1588,7 +1588,7 @@ func TestValidateReplicaSet(t *testing.T) { "invalid_label 2": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Labels: map[string]string{ "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, @@ -1600,7 +1600,7 @@ func TestValidateReplicaSet(t *testing.T) { "invalid_annotation": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Annotations: map[string]string{ "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, @@ -1613,7 +1613,7 @@ func TestValidateReplicaSet(t *testing.T) { "invalid restart policy 1": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: extensions.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, @@ -1632,7 +1632,7 @@ func TestValidateReplicaSet(t *testing.T) { "invalid restart policy 2": { ObjectMeta: metav1.ObjectMeta{ Name: "abc-123", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: extensions.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validLabels}, diff --git a/pkg/apis/rbac/validation/validation_test.go b/pkg/apis/rbac/validation/validation_test.go index 92615af4e01..25416b009ca 100644 --- a/pkg/apis/rbac/validation/validation_test.go +++ b/pkg/apis/rbac/validation/validation_test.go @@ -21,7 +21,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" ) @@ -143,7 +142,7 @@ func TestValidateClusterRoleBinding(t *testing.T) { func TestValidateRoleBinding(t *testing.T) { errs := ValidateRoleBinding( &rbac.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "master"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "master"}, RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "Role", Name: "valid"}, Subjects: []rbac.Subject{ {Name: "validsaname", Kind: rbac.ServiceAccountKind}, @@ -163,7 +162,7 @@ func TestValidateRoleBinding(t *testing.T) { }{ "bad group": { A: rbac.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "default"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "default"}, RoleRef: rbac.RoleRef{APIGroup: "rbac.GroupName", Kind: "ClusterRole", Name: "valid"}, }, T: field.ErrorTypeNotSupported, @@ -171,7 +170,7 @@ func TestValidateRoleBinding(t *testing.T) { }, "bad kind": { A: rbac.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "default"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "default"}, RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "Type", Name: "valid"}, }, T: field.ErrorTypeNotSupported, @@ -187,7 +186,7 @@ func TestValidateRoleBinding(t *testing.T) { }, "zero-length name": { A: rbac.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}, RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "Role", Name: "valid"}, }, T: field.ErrorTypeRequired, @@ -195,7 +194,7 @@ func TestValidateRoleBinding(t *testing.T) { }, "bad role": { A: rbac.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "default"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "default"}, RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "Role"}, }, T: field.ErrorTypeRequired, @@ -203,7 +202,7 @@ func TestValidateRoleBinding(t *testing.T) { }, "bad subject kind": { A: rbac.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "master"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "master"}, RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "Role", Name: "valid"}, Subjects: []rbac.Subject{{Name: "subject"}}, }, @@ -212,7 +211,7 @@ func TestValidateRoleBinding(t *testing.T) { }, "bad subject name": { A: rbac.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "master"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "master"}, RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "Role", Name: "valid"}, Subjects: []rbac.Subject{{Name: "subject:bad", Kind: rbac.ServiceAccountKind}}, }, @@ -221,7 +220,7 @@ func TestValidateRoleBinding(t *testing.T) { }, "missing subject name": { A: rbac.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "master"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "master"}, RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "Role", Name: "valid"}, Subjects: []rbac.Subject{{Kind: rbac.ServiceAccountKind}}, }, @@ -248,13 +247,13 @@ func TestValidateRoleBinding(t *testing.T) { func TestValidateRoleBindingUpdate(t *testing.T) { old := &rbac.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "master", ResourceVersion: "1"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "master", ResourceVersion: "1"}, RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "Role", Name: "valid"}, } errs := ValidateRoleBindingUpdate( &rbac.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "master", ResourceVersion: "1"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "master", ResourceVersion: "1"}, RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "Role", Name: "valid"}, }, old, @@ -270,7 +269,7 @@ func TestValidateRoleBindingUpdate(t *testing.T) { }{ "changedRef": { A: rbac.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "master", ResourceVersion: "1"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "master", ResourceVersion: "1"}, RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "Role", Name: "changed"}, }, T: field.ErrorTypeInvalid, diff --git a/pkg/client/cache/BUILD b/pkg/client/cache/BUILD index eb5f74a8abb..6755a02763a 100644 --- a/pkg/client/cache/BUILD +++ b/pkg/client/cache/BUILD @@ -35,6 +35,7 @@ go_library( "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/api/errors", "//vendor:k8s.io/apimachinery/pkg/api/meta", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/client/cache/controller_test.go b/pkg/client/cache/controller_test.go index 8c51ba7bf1e..542e37db7b3 100644 --- a/pkg/client/cache/controller_test.go +++ b/pkg/client/cache/controller_test.go @@ -351,12 +351,12 @@ func TestUpdate(t *testing.T) { watchCh := make(chan struct{}) _, controller := NewInformer( &testLW{ - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { watch, err := source.Watch(options) close(watchCh) return watch, err }, - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return source.List(options) }, }, diff --git a/pkg/client/cache/listers.go b/pkg/client/cache/listers.go index 8bd708e7c43..27d51a6b387 100644 --- a/pkg/client/cache/listers.go +++ b/pkg/client/cache/listers.go @@ -25,7 +25,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/api/v1" ) // AppendFunc is used to add a matching item to whatever list the caller is using @@ -45,7 +44,7 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { } func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { - if namespace == v1.NamespaceAll { + if namespace == metav1.NamespaceAll { for _, m := range indexer.List() { metadata, err := meta.Accessor(m) if err != nil { diff --git a/pkg/client/cache/listwatch.go b/pkg/client/cache/listwatch.go index 22d4307c926..1261e758169 100644 --- a/pkg/client/cache/listwatch.go +++ b/pkg/client/cache/listwatch.go @@ -25,8 +25,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" ) // ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. diff --git a/pkg/client/cache/listwatch_test.go b/pkg/client/cache/listwatch_test.go index 384eeeb707d..b607f965677 100644 --- a/pkg/client/cache/listwatch_test.go +++ b/pkg/client/cache/listwatch_test.go @@ -70,18 +70,18 @@ func TestListWatchesCanList(t *testing.T) { }{ // Node { - location: testapi.Default.ResourcePath("nodes", v1.NamespaceAll, ""), + location: testapi.Default.ResourcePath("nodes", metav1.NamespaceAll, ""), resource: "nodes", - namespace: v1.NamespaceAll, + namespace: metav1.NamespaceAll, fieldSelector: parseSelectorOrDie(""), }, // pod with "assigned" field selector. { location: buildLocation( - testapi.Default.ResourcePath("pods", v1.NamespaceAll, ""), + testapi.Default.ResourcePath("pods", metav1.NamespaceAll, ""), buildQueryValues(url.Values{fieldSelectorQueryParamName: []string{"spec.host="}})), resource: "pods", - namespace: v1.NamespaceAll, + namespace: metav1.NamespaceAll, fieldSelector: fields.Set{"spec.host": ""}.AsSelector(), }, // pod in namespace "foo" @@ -105,7 +105,7 @@ func TestListWatchesCanList(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) lw := NewListWatchFromClient(client.Core().RESTClient(), item.resource, item.namespace, item.fieldSelector) // This test merely tests that the correct request is made. - lw.List(v1.ListOptions{}) + lw.List(metav1.ListOptions{}) handler.ValidateRequest(t, item.location, "GET", nil) } } @@ -122,30 +122,30 @@ func TestListWatchesCanWatch(t *testing.T) { // Node { location: buildLocation( - testapi.Default.ResourcePathWithPrefix("watch", "nodes", v1.NamespaceAll, ""), + testapi.Default.ResourcePathWithPrefix("watch", "nodes", metav1.NamespaceAll, ""), buildQueryValues(url.Values{})), rv: "", resource: "nodes", - namespace: v1.NamespaceAll, + namespace: metav1.NamespaceAll, fieldSelector: parseSelectorOrDie(""), }, { location: buildLocation( - testapi.Default.ResourcePathWithPrefix("watch", "nodes", v1.NamespaceAll, ""), + testapi.Default.ResourcePathWithPrefix("watch", "nodes", metav1.NamespaceAll, ""), buildQueryValues(url.Values{"resourceVersion": []string{"42"}})), rv: "42", resource: "nodes", - namespace: v1.NamespaceAll, + namespace: metav1.NamespaceAll, fieldSelector: parseSelectorOrDie(""), }, // pod with "assigned" field selector. { location: buildLocation( - testapi.Default.ResourcePathWithPrefix("watch", "pods", v1.NamespaceAll, ""), + testapi.Default.ResourcePathWithPrefix("watch", "pods", metav1.NamespaceAll, ""), buildQueryValues(url.Values{fieldSelectorQueryParamName: []string{"spec.host="}, "resourceVersion": []string{"0"}})), rv: "0", resource: "pods", - namespace: v1.NamespaceAll, + namespace: metav1.NamespaceAll, fieldSelector: fields.Set{"spec.host": ""}.AsSelector(), }, // pod with namespace foo and assigned field selector @@ -171,7 +171,7 @@ func TestListWatchesCanWatch(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) lw := NewListWatchFromClient(client.Core().RESTClient(), item.resource, item.namespace, item.fieldSelector) // This test merely tests that the correct request is made. - lw.Watch(v1.ListOptions{ResourceVersion: item.rv}) + lw.Watch(metav1.ListOptions{ResourceVersion: item.rv}) handler.ValidateRequest(t, item.location, "GET", nil) } } @@ -181,11 +181,11 @@ type lw struct { watch watch.Interface } -func (w lw) List(options v1.ListOptions) (runtime.Object, error) { +func (w lw) List(options metav1.ListOptions) (runtime.Object, error) { return w.list, nil } -func (w lw) Watch(options v1.ListOptions) (watch.Interface, error) { +func (w lw) Watch(options metav1.ListOptions) (watch.Interface, error) { return w.watch, nil } diff --git a/pkg/client/cache/mutation_detector_test.go b/pkg/client/cache/mutation_detector_test.go index fbe0a0893ed..66cf36f92ae 100644 --- a/pkg/client/cache/mutation_detector_test.go +++ b/pkg/client/cache/mutation_detector_test.go @@ -31,10 +31,10 @@ import ( func TestMutationDetector(t *testing.T) { fakeWatch := watch.NewFake() lw := &testLW{ - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return fakeWatch, nil }, - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return &v1.PodList{}, nil }, } diff --git a/pkg/client/cache/reflector_test.go b/pkg/client/cache/reflector_test.go index c34032203ca..70ce055e00e 100644 --- a/pkg/client/cache/reflector_test.go +++ b/pkg/client/cache/reflector_test.go @@ -34,14 +34,14 @@ import ( var nevererrc chan error type testLW struct { - ListFunc func(options v1.ListOptions) (runtime.Object, error) - WatchFunc func(options v1.ListOptions) (watch.Interface, error) + ListFunc func(options metav1.ListOptions) (runtime.Object, error) + WatchFunc func(options metav1.ListOptions) (watch.Interface, error) } -func (t *testLW) List(options v1.ListOptions) (runtime.Object, error) { +func (t *testLW) List(options metav1.ListOptions) (runtime.Object, error) { return t.ListFunc(options) } -func (t *testLW) Watch(options v1.ListOptions) (watch.Interface, error) { +func (t *testLW) Watch(options metav1.ListOptions) (watch.Interface, error) { return t.WatchFunc(options) } @@ -50,10 +50,10 @@ func TestCloseWatchChannelOnError(t *testing.T) { pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar"}} fw := watch.NewFake() r.listerWatcher = &testLW{ - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return fw, nil }, - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "1"}}, nil }, } @@ -76,10 +76,10 @@ func TestRunUntil(t *testing.T) { r := NewReflector(&testLW{}, &v1.Pod{}, store, 0) fw := watch.NewFake() r.listerWatcher = &testLW{ - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return fw, nil }, - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "1"}}, nil }, } @@ -215,7 +215,7 @@ func TestReflectorListAndWatch(t *testing.T) { // inject an error. expectedRVs := []string{"1", "3"} lw := &testLW{ - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { rv := options.ResourceVersion fw := watch.NewFake() if e, a := expectedRVs[0], rv; e != a { @@ -227,7 +227,7 @@ func TestReflectorListAndWatch(t *testing.T) { go func() { createdFakes <- fw }() return fw, nil }, - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "1"}}, nil }, } @@ -331,7 +331,7 @@ func TestReflectorListAndWatchWithErrors(t *testing.T) { } watchRet, watchErr := item.events, item.watchErr lw := &testLW{ - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if watchErr != nil { return nil, watchErr } @@ -345,7 +345,7 @@ func TestReflectorListAndWatchWithErrors(t *testing.T) { }() return fw, nil }, - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return item.list, item.listErr }, } @@ -369,11 +369,11 @@ func TestReflectorResync(t *testing.T) { } lw := &testLW{ - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { fw := watch.NewFake() return fw, nil }, - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "0"}}, nil }, } diff --git a/pkg/client/clientset_generated/clientset/typed/core/v1/event_expansion.go b/pkg/client/clientset_generated/clientset/typed/core/v1/event_expansion.go index 5cde1d6f4a1..d6ac1b57540 100644 --- a/pkg/client/clientset_generated/clientset/typed/core/v1/event_expansion.go +++ b/pkg/client/clientset_generated/clientset/typed/core/v1/event_expansion.go @@ -19,6 +19,7 @@ package v1 import ( "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -117,7 +118,7 @@ func (e *events) Search(objOrRef runtime.Object) (*v1.EventList, error) { refUID = &stringRefUID } fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) - return e.List(v1.ListOptions{FieldSelector: fieldSelector.String()}) + return e.List(metav1.ListOptions{FieldSelector: fieldSelector.String()}) } // Returns the appropriate field selector based on the API version being used to communicate with the server. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event_expansion.go index c4e2c0e2a90..6d78f82bc6a 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event_expansion.go @@ -19,6 +19,7 @@ package internalversion import ( "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -117,7 +118,7 @@ func (e *events) Search(objOrRef runtime.Object) (*api.EventList, error) { refUID = &stringRefUID } fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) - return e.List(api.ListOptions{FieldSelector: fieldSelector}) + return e.List(metav1.ListOptions{FieldSelector: fieldSelector.String()}) } // Returns the appropriate field selector based on the API version being used to communicate with the server. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_event_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_event_expansion.go index 772e688f1a2..5d13311418b 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_event_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_event_expansion.go @@ -17,6 +17,7 @@ limitations under the License. package fake import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/api" @@ -66,9 +67,9 @@ func (c *FakeEvents) PatchWithEventNamespace(event *api.Event, data []byte) (*ap // Search returns a list of events matching the specified object. func (c *FakeEvents) Search(objOrRef runtime.Object) (*api.EventList, error) { - action := core.NewRootListAction(eventsResource, api.ListOptions{}) + action := core.NewRootListAction(eventsResource, metav1.ListOptions{}) if c.ns != "" { - action = core.NewListAction(eventsResource, c.ns, api.ListOptions{}) + action = core.NewListAction(eventsResource, c.ns, metav1.ListOptions{}) } obj, err := c.Fake.Invokes(action, &api.EventList{}) if obj == nil { diff --git a/pkg/client/legacylisters/listers_test.go b/pkg/client/legacylisters/listers_test.go index 2c142641c1e..e795ae4fbe6 100644 --- a/pkg/client/legacylisters/listers_test.go +++ b/pkg/client/legacylisters/listers_test.go @@ -144,7 +144,7 @@ func TestStoreToReplicationControllerLister(t *testing.T) { }, }, list: func(lister StoreToReplicationControllerLister) ([]*v1.ReplicationController, error) { - return lister.ReplicationControllers(v1.NamespaceAll).List(labels.Set{}.AsSelectorPreValidated()) + return lister.ReplicationControllers(metav1.NamespaceAll).List(labels.Set{}.AsSelectorPreValidated()) }, outRCNames: sets.NewString("hmm", "foo"), }, @@ -539,14 +539,14 @@ func TestStoreToPodLister(t *testing.T) { store.Add(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "quux", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Labels: map[string]string{"name": "quux"}, }, }) spl := StoreToPodLister{store} // Verify that we can always look up by Namespace. - defaultPods, err := spl.Pods(v1.NamespaceDefault).List(labels.Set{}.AsSelectorPreValidated()) + defaultPods, err := spl.Pods(metav1.NamespaceDefault).List(labels.Set{}.AsSelectorPreValidated()) if err != nil { t.Errorf("Unexpected error: %v", err) } else if e, a := 1, len(defaultPods); e != a { diff --git a/pkg/client/record/event.go b/pkg/client/record/event.go index e9dc0317c88..cddcbe87ef3 100644 --- a/pkg/client/record/event.go +++ b/pkg/client/record/event.go @@ -298,7 +298,7 @@ func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, eventtype, reas t := metav1.Time{Time: recorder.clock.Now()} namespace := ref.Namespace if namespace == "" { - namespace = v1.NamespaceDefault + namespace = metav1.NamespaceDefault } return &v1.Event{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/client/testing/cache/fake_controller_source.go b/pkg/client/testing/cache/fake_controller_source.go index c0d65058234..e75071e3a3a 100644 --- a/pkg/client/testing/cache/fake_controller_source.go +++ b/pkg/client/testing/cache/fake_controller_source.go @@ -163,7 +163,7 @@ func (f *FakeControllerSource) getListItemsLocked() ([]runtime.Object, error) { } // List returns a list object, with its resource version set. -func (f *FakeControllerSource) List(options v1.ListOptions) (runtime.Object, error) { +func (f *FakeControllerSource) List(options metav1.ListOptions) (runtime.Object, error) { f.lock.RLock() defer f.lock.RUnlock() list, err := f.getListItemsLocked() @@ -184,7 +184,7 @@ func (f *FakeControllerSource) List(options v1.ListOptions) (runtime.Object, err } // List returns a list object, with its resource version set. -func (f *FakePVControllerSource) List(options v1.ListOptions) (runtime.Object, error) { +func (f *FakePVControllerSource) List(options metav1.ListOptions) (runtime.Object, error) { f.lock.RLock() defer f.lock.RUnlock() list, err := f.FakeControllerSource.getListItemsLocked() @@ -205,7 +205,7 @@ func (f *FakePVControllerSource) List(options v1.ListOptions) (runtime.Object, e } // List returns a list object, with its resource version set. -func (f *FakePVCControllerSource) List(options v1.ListOptions) (runtime.Object, error) { +func (f *FakePVCControllerSource) List(options metav1.ListOptions) (runtime.Object, error) { f.lock.RLock() defer f.lock.RUnlock() list, err := f.FakeControllerSource.getListItemsLocked() @@ -227,7 +227,7 @@ func (f *FakePVCControllerSource) List(options v1.ListOptions) (runtime.Object, // Watch returns a watch, which will be pre-populated with all changes // after resourceVersion. -func (f *FakeControllerSource) Watch(options v1.ListOptions) (watch.Interface, error) { +func (f *FakeControllerSource) Watch(options metav1.ListOptions) (watch.Interface, error) { f.lock.RLock() defer f.lock.RUnlock() rc, err := strconv.Atoi(options.ResourceVersion) diff --git a/pkg/client/testing/cache/fake_controller_source_test.go b/pkg/client/testing/cache/fake_controller_source_test.go index 2538729f9ed..cbd07b89eae 100644 --- a/pkg/client/testing/cache/fake_controller_source_test.go +++ b/pkg/client/testing/cache/fake_controller_source_test.go @@ -66,13 +66,13 @@ func TestRCNumber(t *testing.T) { source.Modify(pod("foo")) source.Modify(pod("foo")) - w, err := source.Watch(v1.ListOptions{ResourceVersion: "1"}) + w, err := source.Watch(metav1.ListOptions{ResourceVersion: "1"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } go consume(t, w, []string{"2", "3"}, wg) - list, err := source.List(v1.ListOptions{}) + list, err := source.List(metav1.ListOptions{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -80,13 +80,13 @@ func TestRCNumber(t *testing.T) { t.Errorf("wanted %v, got %v", e, a) } - w2, err := source.Watch(v1.ListOptions{ResourceVersion: "2"}) + w2, err := source.Watch(metav1.ListOptions{ResourceVersion: "2"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } go consume(t, w2, []string{"3"}, wg) - w3, err := source.Watch(v1.ListOptions{ResourceVersion: "3"}) + w3, err := source.Watch(metav1.ListOptions{ResourceVersion: "3"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } diff --git a/pkg/client/testing/core/actions.go b/pkg/client/testing/core/actions.go index 6bb576498c6..a16c75ec080 100644 --- a/pkg/client/testing/core/actions.go +++ b/pkg/client/testing/core/actions.go @@ -21,12 +21,11 @@ import ( "path" "strings" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" ) func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl { @@ -224,22 +223,18 @@ func NewRootWatchAction(resource schema.GroupVersionResource, opts interface{}) func ExtractFromListOptions(opts interface{}) (labelSelector labels.Selector, fieldSelector fields.Selector, resourceVersion string) { var err error switch t := opts.(type) { - case api.ListOptions: - labelSelector = t.LabelSelector - fieldSelector = t.FieldSelector - resourceVersion = t.ResourceVersion - case v1.ListOptions: + case metav1.ListOptions: labelSelector, err = labels.Parse(t.LabelSelector) if err != nil { - panic(err) + panic(fmt.Errorf("invalid selector %q: %v", t.LabelSelector, err)) } fieldSelector, err = fields.ParseSelector(t.FieldSelector) if err != nil { - panic(err) + panic(fmt.Errorf("invalid selector %q: %v", t.FieldSelector, err)) } resourceVersion = t.ResourceVersion default: - panic(fmt.Errorf("expect a ListOptions")) + panic(fmt.Errorf("expect a ListOptions %T", opts)) } if labelSelector == nil { labelSelector = labels.Everything() diff --git a/pkg/client/testing/core/fake_test.go b/pkg/client/testing/core/fake_test.go index 72a42dee051..07e5cb5496f 100644 --- a/pkg/client/testing/core/fake_test.go +++ b/pkg/client/testing/core/fake_test.go @@ -35,7 +35,7 @@ func TestFakeClientSetFiltering(t *testing.T) { testSA("nsB", "sa-3"), ) - saList1, err := tc.Core().ServiceAccounts("nsA").List(api.ListOptions{}) + saList1, err := tc.Core().ServiceAccounts("nsA").List(metav1.ListOptions{}) if err != nil { t.Fatalf("ServiceAccounts.List: %s", err) } @@ -48,7 +48,7 @@ func TestFakeClientSetFiltering(t *testing.T) { } } - saList2, err := tc.Core().ServiceAccounts("nsB").List(api.ListOptions{}) + saList2, err := tc.Core().ServiceAccounts("nsB").List(metav1.ListOptions{}) if err != nil { t.Fatalf("ServiceAccounts.List: %s", err) } @@ -77,7 +77,7 @@ func TestFakeClientSetFiltering(t *testing.T) { t.Fatalf("Pods.Get: expected nsB/pod-1 not to match, but it matched %s/%s", wrongPod.Namespace, wrongPod.Name) } - allPods, err := tc.Core().Pods(api.NamespaceAll).List(api.ListOptions{}) + allPods, err := tc.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) if err != nil { t.Fatalf("Pods.List: %s", err) } @@ -85,7 +85,7 @@ func TestFakeClientSetFiltering(t *testing.T) { t.Fatalf("Expected %d pods to match, got %d", expected, actual) } - allSAs, err := tc.Core().ServiceAccounts(api.NamespaceAll).List(api.ListOptions{}) + allSAs, err := tc.Core().ServiceAccounts(metav1.NamespaceAll).List(metav1.ListOptions{}) if err != nil { t.Fatalf("ServiceAccounts.List: %s", err) } @@ -105,7 +105,7 @@ func TestFakeClientsetInheritsNamespace(t *testing.T) { t.Fatalf("Namespaces.Create: %s", err) } - allNS, err := tc.Core().Namespaces().List(api.ListOptions{}) + allNS, err := tc.Core().Namespaces().List(metav1.ListOptions{}) if err != nil { t.Fatalf("Namespaces.List: %s", err) } diff --git a/pkg/client/typed/dynamic/client_test.go b/pkg/client/typed/dynamic/client_test.go index 6045622fdca..0426d7b1456 100644 --- a/pkg/client/typed/dynamic/client_test.go +++ b/pkg/client/typed/dynamic/client_test.go @@ -34,7 +34,6 @@ import ( "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" restclientwatch "k8s.io/client-go/rest/watch" - "k8s.io/kubernetes/pkg/api/v1" ) func getJSON(version, kind, name string) []byte { @@ -137,7 +136,7 @@ func TestList(t *testing.T) { } defer srv.Close() - got, err := cl.Resource(resource, tc.namespace).List(&v1.ListOptions{}) + got, err := cl.Resource(resource, tc.namespace).List(&metav1.ListOptions{}) if err != nil { t.Errorf("unexpected error when listing %q: %v", tc.name, err) continue @@ -294,7 +293,7 @@ func TestDeleteCollection(t *testing.T) { } defer srv.Close() - err = cl.Resource(resource, tc.namespace).DeleteCollection(nil, &v1.ListOptions{}) + err = cl.Resource(resource, tc.namespace).DeleteCollection(nil, &metav1.ListOptions{}) if err != nil { t.Errorf("unexpected error when deleting collection %q: %v", tc.name, err) continue @@ -470,7 +469,7 @@ func TestWatch(t *testing.T) { } defer srv.Close() - watcher, err := cl.Resource(resource, tc.namespace).Watch(&v1.ListOptions{}) + watcher, err := cl.Resource(resource, tc.namespace).Watch(&metav1.ListOptions{}) if err != nil { t.Errorf("unexpected error when watching %q: %v", tc.name, err) continue diff --git a/pkg/controller/certificates/certificate_controller.go b/pkg/controller/certificates/certificate_controller.go index cbce5dccd61..55b2fe38c67 100644 --- a/pkg/controller/certificates/certificate_controller.go +++ b/pkg/controller/certificates/certificate_controller.go @@ -20,11 +20,11 @@ import ( "fmt" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" - "k8s.io/kubernetes/pkg/api/v1" certificates "k8s.io/kubernetes/pkg/apis/certificates/v1beta1" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -81,10 +81,10 @@ func NewCertificateController(kubeClient clientset.Interface, syncPeriod time.Du // Manage the addition/update of certificate requests cc.csrStore.Store, cc.csrController = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return cc.kubeClient.Certificates().CertificateSigningRequests().List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return cc.kubeClient.Certificates().CertificateSigningRequests().Watch(options) }, }, diff --git a/pkg/controller/cloud/nodecontroller.go b/pkg/controller/cloud/nodecontroller.go index c7abe357f62..f79880178fe 100644 --- a/pkg/controller/cloud/nodecontroller.go +++ b/pkg/controller/cloud/nodecontroller.go @@ -89,7 +89,7 @@ func (cnc *CloudNodeController) Run() { defer utilruntime.HandleCrash() go wait.Until(func() { - nodes, err := cnc.kubeClient.Core().Nodes().List(v1.ListOptions{ResourceVersion: "0"}) + nodes, err := cnc.kubeClient.Core().Nodes().List(metav1.ListOptions{ResourceVersion: "0"}) if err != nil { glog.Errorf("Error monitoring node status: %v", err) } diff --git a/pkg/controller/controller_utils_test.go b/pkg/controller/controller_utils_test.go index d3b7e075f36..1e3fd92fb4b 100644 --- a/pkg/controller/controller_utils_test.go +++ b/pkg/controller/controller_utils_test.go @@ -59,7 +59,7 @@ func newReplicationController(replicas int) *v1.ReplicationController { ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), Name: "foobar", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "18", }, Spec: v1.ReplicationControllerSpec{ @@ -238,7 +238,7 @@ func TestUIDExpectations(t *testing.T) { } func TestCreatePods(t *testing.T) { - ns := v1.NamespaceDefault + ns := metav1.NamespaceDefault body := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "empty_pod"}}) fakeHandler := utiltesting.FakeHandler{ StatusCode: 200, @@ -267,7 +267,7 @@ func TestCreatePods(t *testing.T) { }, Spec: controllerSpec.Spec.Template.Spec, } - fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath("pods", v1.NamespaceDefault, ""), "POST", nil) + fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath("pods", metav1.NamespaceDefault, ""), "POST", nil) var actualPod = &v1.Pod{} err := json.Unmarshal([]byte(fakeHandler.RequestBody), actualPod) if err != nil { diff --git a/pkg/controller/cronjob/cronjob_controller_test.go b/pkg/controller/cronjob/cronjob_controller_test.go index 25f60efd057..c51491b86b3 100644 --- a/pkg/controller/cronjob/cronjob_controller_test.go +++ b/pkg/controller/cronjob/cronjob_controller_test.go @@ -130,7 +130,7 @@ func newJob(UID string) batch.Job { ObjectMeta: metav1.ObjectMeta{ UID: types.UID(UID), Name: "foobar", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, SelfLink: "/apis/batch/v1/namespaces/snazzycats/jobs/myjob", }, Spec: jobSpec(), diff --git a/pkg/controller/cronjob/utils_test.go b/pkg/controller/cronjob/utils_test.go index 36b0bc02b11..4b84b198906 100644 --- a/pkg/controller/cronjob/utils_test.go +++ b/pkg/controller/cronjob/utils_test.go @@ -101,7 +101,7 @@ func TestGetParentUIDFromJob(t *testing.T) { j := &batch.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "foobar", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: batch.JobSpec{ Selector: &metav1.LabelSelector{ @@ -269,7 +269,7 @@ func TestGetRecentUnmetScheduleTimes(t *testing.T) { sj := batch.CronJob{ ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, Spec: batch.CronJobSpec{ diff --git a/pkg/controller/daemon/daemoncontroller_test.go b/pkg/controller/daemon/daemoncontroller_test.go index 1d3cd0ddc5f..68e283cafdd 100644 --- a/pkg/controller/daemon/daemoncontroller_test.go +++ b/pkg/controller/daemon/daemoncontroller_test.go @@ -58,7 +58,7 @@ func newDaemonSet(name string) *extensions.DaemonSet { TypeMeta: metav1.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()}, ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, @@ -88,7 +88,7 @@ func newNode(name string, label map[string]string) *v1.Node { ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: label, - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Status: v1.NodeStatus{ Conditions: []v1.NodeCondition{ @@ -113,7 +113,7 @@ func newPod(podName string, nodeName string, label map[string]string) *v1.Pod { ObjectMeta: metav1.ObjectMeta{ GenerateName: podName, Labels: label, - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: v1.PodSpec{ NodeName: nodeName, @@ -377,7 +377,7 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) { manager.podStore.Indexer.Add(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: simpleDaemonSetLabel, - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: podSpec, }) @@ -427,7 +427,7 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) { manager.podStore.Indexer.Add(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"bang": "boom"}, - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: v1.PodSpec{ NodeName: "node1", diff --git a/pkg/controller/deployment/deployment_controller_test.go b/pkg/controller/deployment/deployment_controller_test.go index d254d4666a6..1dfcb3e53be 100644 --- a/pkg/controller/deployment/deployment_controller_test.go +++ b/pkg/controller/deployment/deployment_controller_test.go @@ -47,7 +47,7 @@ func rs(name string, replicas int, selector map[string]string, timestamp metav1. ObjectMeta: metav1.ObjectMeta{ Name: name, CreationTimestamp: timestamp, - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: extensions.ReplicaSetSpec{ Replicas: func() *int32 { i := int32(replicas); return &i }(), @@ -71,7 +71,7 @@ func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSu ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), Name: name, - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Annotations: make(map[string]string), }, Spec: extensions.DeploymentSpec{ @@ -112,7 +112,7 @@ func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensi return &extensions.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Labels: d.Spec.Selector.MatchLabels, }, Spec: extensions.ReplicaSetSpec{ diff --git a/pkg/controller/deployment/sync.go b/pkg/controller/deployment/sync.go index 929a868e623..d2aa4d2b18c 100644 --- a/pkg/controller/deployment/sync.go +++ b/pkg/controller/deployment/sync.go @@ -127,7 +127,7 @@ func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(deployment *ext // rsAndPodsWithHashKeySynced returns the RSes and pods the given deployment targets, with pod-template-hash information synced. func (dc *DeploymentController) rsAndPodsWithHashKeySynced(deployment *extensions.Deployment) ([]*extensions.ReplicaSet, *v1.PodList, error) { rsList, err := deploymentutil.ListReplicaSets(deployment, - func(namespace string, options v1.ListOptions) ([]*extensions.ReplicaSet, error) { + func(namespace string, options metav1.ListOptions) ([]*extensions.ReplicaSet, error) { parsed, err := labels.Parse(options.LabelSelector) if err != nil { return nil, err @@ -191,7 +191,7 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet) if err != nil { return nil, fmt.Errorf("error in converting selector to label selector for replica set %s: %s", updatedRS.Name, err) } - options := v1.ListOptions{LabelSelector: selector.String()} + options := metav1.ListOptions{LabelSelector: selector.String()} parsed, err := labels.Parse(options.LabelSelector) if err != nil { return nil, err @@ -240,7 +240,7 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet) func (dc *DeploymentController) listPods(deployment *extensions.Deployment) (*v1.PodList, error) { return deploymentutil.ListPods(deployment, - func(namespace string, options v1.ListOptions) (*v1.PodList, error) { + func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { parsed, err := labels.Parse(options.LabelSelector) if err != nil { return nil, err diff --git a/pkg/controller/deployment/util/deployment_util.go b/pkg/controller/deployment/util/deployment_util.go index 82f6e21f458..4bc24b65785 100644 --- a/pkg/controller/deployment/util/deployment_util.go +++ b/pkg/controller/deployment/util/deployment_util.go @@ -537,7 +537,7 @@ func GetNewReplicaSet(deployment *extensions.Deployment, c clientset.Interface) // listReplicaSets lists all RSes the given deployment targets with the given client interface. func listReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, error) { return ListReplicaSets(deployment, - func(namespace string, options v1.ListOptions) ([]*extensions.ReplicaSet, error) { + func(namespace string, options metav1.ListOptions) ([]*extensions.ReplicaSet, error) { rsList, err := c.Extensions().ReplicaSets(namespace).List(options) if err != nil { return nil, err @@ -553,14 +553,14 @@ func listReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ( // listReplicaSets lists all Pods the given deployment targets with the given client interface. func listPods(deployment *extensions.Deployment, c clientset.Interface) (*v1.PodList, error) { return ListPods(deployment, - func(namespace string, options v1.ListOptions) (*v1.PodList, error) { + func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { return c.Core().Pods(namespace).List(options) }) } // TODO: switch this to full namespacers -type rsListFunc func(string, v1.ListOptions) ([]*extensions.ReplicaSet, error) -type podListFunc func(string, v1.ListOptions) (*v1.PodList, error) +type rsListFunc func(string, metav1.ListOptions) ([]*extensions.ReplicaSet, error) +type podListFunc func(string, metav1.ListOptions) (*v1.PodList, error) // ListReplicaSets returns a slice of RSes the given deployment targets. func ListReplicaSets(deployment *extensions.Deployment, getRSList rsListFunc) ([]*extensions.ReplicaSet, error) { @@ -572,7 +572,7 @@ func ListReplicaSets(deployment *extensions.Deployment, getRSList rsListFunc) ([ if err != nil { return nil, err } - options := v1.ListOptions{LabelSelector: selector.String()} + options := metav1.ListOptions{LabelSelector: selector.String()} return getRSList(namespace, options) } @@ -583,7 +583,7 @@ func ListPods(deployment *extensions.Deployment, getPodList podListFunc) (*v1.Po if err != nil { return nil, err } - options := v1.ListOptions{LabelSelector: selector.String()} + options := metav1.ListOptions{LabelSelector: selector.String()} return getPodList(namespace, options) } diff --git a/pkg/controller/disruption/disruption.go b/pkg/controller/disruption/disruption.go index 0471d324ba2..c9c23983d87 100644 --- a/pkg/controller/disruption/disruption.go +++ b/pkg/controller/disruption/disruption.go @@ -129,11 +129,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient c dc.pdbStore, dc.pdbController = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return dc.kubeClient.Policy().PodDisruptionBudgets(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return dc.kubeClient.Policy().PodDisruptionBudgets(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return dc.kubeClient.Policy().PodDisruptionBudgets(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return dc.kubeClient.Policy().PodDisruptionBudgets(metav1.NamespaceAll).Watch(options) }, }, &policy.PodDisruptionBudget{}, @@ -148,11 +148,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient c dc.rcIndexer, dc.rcController = cache.NewIndexerInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return dc.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return dc.kubeClient.Core().ReplicationControllers(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return dc.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return dc.kubeClient.Core().ReplicationControllers(metav1.NamespaceAll).Watch(options) }, }, &v1.ReplicationController{}, @@ -165,11 +165,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient c dc.rsLister.Indexer, dc.rsController = cache.NewIndexerInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return dc.kubeClient.Extensions().ReplicaSets(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return dc.kubeClient.Extensions().ReplicaSets(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return dc.kubeClient.Extensions().ReplicaSets(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return dc.kubeClient.Extensions().ReplicaSets(metav1.NamespaceAll).Watch(options) }, }, &extensions.ReplicaSet{}, @@ -181,11 +181,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient c dc.dIndexer, dc.dController = cache.NewIndexerInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return dc.kubeClient.Extensions().Deployments(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return dc.kubeClient.Extensions().Deployments(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return dc.kubeClient.Extensions().Deployments(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return dc.kubeClient.Extensions().Deployments(metav1.NamespaceAll).Watch(options) }, }, &extensions.Deployment{}, @@ -197,11 +197,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient c dc.ssStore, dc.ssController = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return dc.kubeClient.Apps().StatefulSets(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return dc.kubeClient.Apps().StatefulSets(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return dc.kubeClient.Apps().StatefulSets(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return dc.kubeClient.Apps().StatefulSets(metav1.NamespaceAll).Watch(options) }, }, &apps.StatefulSet{}, diff --git a/pkg/controller/disruption/disruption_test.go b/pkg/controller/disruption/disruption_test.go index d4f1272e2f8..f88df2e03e8 100644 --- a/pkg/controller/disruption/disruption_test.go +++ b/pkg/controller/disruption/disruption_test.go @@ -122,7 +122,7 @@ func newPodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*pol ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), Name: "foobar", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "18", }, Spec: policy.PodDisruptionBudgetSpec{ @@ -146,7 +146,7 @@ func newPod(t *testing.T, name string) (*v1.Pod, string) { UID: uuid.NewUUID(), Annotations: make(map[string]string), Name: name, - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "18", Labels: fooBar(), }, @@ -172,7 +172,7 @@ func newReplicationController(t *testing.T, size int32) (*v1.ReplicationControll ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), Name: "foobar", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "18", Labels: fooBar(), }, @@ -196,7 +196,7 @@ func newDeployment(t *testing.T, size int32) (*extensions.Deployment, string) { ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), Name: "foobar", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "18", Labels: fooBar(), }, @@ -220,7 +220,7 @@ func newReplicaSet(t *testing.T, size int32) (*extensions.ReplicaSet, string) { ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), Name: "foobar", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "18", Labels: fooBar(), }, @@ -244,7 +244,7 @@ func newStatefulSet(t *testing.T, size int32) (*apps.StatefulSet, string) { ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), Name: "foobar", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "18", Labels: fooBar(), }, diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index b4c6ec4df07..4cf49d66d01 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -84,11 +84,11 @@ func NewEndpointController(podInformer cache.SharedIndexInformer, client clients e.serviceStore.Indexer, e.serviceController = cache.NewIndexerInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return e.client.Core().Services(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return e.client.Core().Services(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return e.client.Core().Services(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return e.client.Core().Services(metav1.NamespaceAll).Watch(options) }, }, &v1.Service{}, @@ -503,7 +503,7 @@ func (e *EndpointController) syncService(key string) error { // some stragglers could have been left behind if the endpoint controller // reboots). func (e *EndpointController) checkLeftoverEndpoints() { - list, err := e.client.Core().Endpoints(v1.NamespaceAll).List(v1.ListOptions{}) + list, err := e.client.Core().Endpoints(metav1.NamespaceAll).List(metav1.ListOptions{}) if err != nil { utilruntime.HandleError(fmt.Errorf("Unable to list endpoints (%v); orphaned endpoints will not be cleaned up. (They're pretty harmless, but you can restart this component if you want another attempt made.)", err)) return diff --git a/pkg/controller/endpoint/endpoints_controller_test.go b/pkg/controller/endpoint/endpoints_controller_test.go index 7a7a14d5c29..6f0d1d85e59 100644 --- a/pkg/controller/endpoint/endpoints_controller_test.go +++ b/pkg/controller/endpoint/endpoints_controller_test.go @@ -93,7 +93,7 @@ func makeTestServer(t *testing.T, namespace string, endpointsResponse serverResp } func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { - ns := v1.NamespaceDefault + ns := metav1.NamespaceDefault testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ @@ -119,10 +119,10 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { } func TestCheckLeftoverEndpoints(t *testing.T) { - ns := v1.NamespaceDefault - // Note that this requests *all* endpoints, therefore the NamespaceAll + ns := metav1.NamespaceDefault + // Note that this requests *all* endpoints, therefore metav1.NamespaceAll // below. - testServer, _ := makeTestServer(t, v1.NamespaceAll, + testServer, _ := makeTestServer(t, metav1.NamespaceAll, serverResponse{http.StatusOK, &v1.EndpointsList{ ListMeta: metav1.ListMeta{ ResourceVersion: "1", @@ -396,8 +396,8 @@ func TestSyncEndpointsItemsPreexisting(t *testing.T) { } func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) { - ns := v1.NamespaceDefault - testServer, endpointsHandler := makeTestServer(t, v1.NamespaceDefault, + ns := metav1.NamespaceDefault + testServer, endpointsHandler := makeTestServer(t, metav1.NamespaceDefault, serverResponse{http.StatusOK, &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ ResourceVersion: "1", @@ -413,16 +413,16 @@ func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints.podStoreSynced = alwaysReady - addPods(endpoints.podStore.Indexer, v1.NamespaceDefault, 1, 1, 0) + addPods(endpoints.podStore.Indexer, metav1.NamespaceDefault, 1, 1, 0) endpoints.serviceStore.Indexer.Add(&v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: v1.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: metav1.NamespaceDefault}, Spec: v1.ServiceSpec{ Selector: map[string]string{"foo": "bar"}, Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}}, }, }) endpoints.syncService(ns + "/foo") - endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", v1.NamespaceDefault, "foo"), "GET", nil) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", metav1.NamespaceDefault, "foo"), "GET", nil) } func TestSyncEndpointsItems(t *testing.T) { diff --git a/pkg/controller/garbagecollector/garbagecollector.go b/pkg/controller/garbagecollector/garbagecollector.go index 8a1b6796733..3ed8be4e05b 100644 --- a/pkg/controller/garbagecollector/garbagecollector.go +++ b/pkg/controller/garbagecollector/garbagecollector.go @@ -450,24 +450,24 @@ type GarbageCollector struct { func gcListWatcher(client *dynamic.Client, resource schema.GroupVersionResource) *cache.ListWatch { return &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { // APIResource.Kind is not used by the dynamic client, so // leave it empty. We want to list this resource in all // namespaces if it's namespace scoped, so leave // APIResource.Namespaced as false is all right. apiResource := metav1.APIResource{Name: resource.Resource} return client.ParameterCodec(dynamic.VersionedParameterEncoderWithV1Fallback). - Resource(&apiResource, v1.NamespaceAll). + Resource(&apiResource, metav1.NamespaceAll). List(&options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { // APIResource.Kind is not used by the dynamic client, so // leave it empty. We want to list this resource in all // namespaces if it's namespace scoped, so leave // APIResource.Namespaced as false is all right. apiResource := metav1.APIResource{Name: resource.Resource} return client.ParameterCodec(dynamic.VersionedParameterEncoderWithV1Fallback). - Resource(&apiResource, v1.NamespaceAll). + Resource(&apiResource, metav1.NamespaceAll). Watch(&options) }, } diff --git a/pkg/controller/garbagecollector/garbagecollector_test.go b/pkg/controller/garbagecollector/garbagecollector_test.go index e6988a180dc..a355bce24db 100644 --- a/pkg/controller/garbagecollector/garbagecollector_test.go +++ b/pkg/controller/garbagecollector/garbagecollector_test.go @@ -349,8 +349,8 @@ func TestGCListWatcher(t *testing.T) { t.Fatal(err) } lw := gcListWatcher(client, podResource) - lw.Watch(v1.ListOptions{ResourceVersion: "1"}) - lw.List(v1.ListOptions{ResourceVersion: "1"}) + lw.Watch(metav1.ListOptions{ResourceVersion: "1"}) + lw.List(metav1.ListOptions{ResourceVersion: "1"}) if e, a := 2, len(testHandler.actions); e != a { t.Errorf("expect %d requests, got %d", e, a) } diff --git a/pkg/controller/informers/BUILD b/pkg/controller/informers/BUILD index c6ecebc20da..46763d3e5c2 100644 --- a/pkg/controller/informers/BUILD +++ b/pkg/controller/informers/BUILD @@ -35,6 +35,7 @@ go_library( "//pkg/client/listers/batch/v1:go_default_library", "//pkg/client/listers/core/internalversion:go_default_library", "//vendor:github.com/golang/glog", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/runtime/schema", "//vendor:k8s.io/apimachinery/pkg/watch", diff --git a/pkg/controller/informers/batch.go b/pkg/controller/informers/batch.go index 09c4dc17977..354824714de 100644 --- a/pkg/controller/informers/batch.go +++ b/pkg/controller/informers/batch.go @@ -20,9 +20,9 @@ import ( "reflect" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" - "k8s.io/kubernetes/pkg/api/v1" batch "k8s.io/kubernetes/pkg/apis/batch/v1" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -61,11 +61,11 @@ func (f *jobInformer) Informer() cache.SharedIndexInformer { func NewJobInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { sharedIndexInformer := cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return client.Batch().Jobs(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return client.Batch().Jobs(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return client.Batch().Jobs(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return client.Batch().Jobs(metav1.NamespaceAll).Watch(options) }, }, &batch.Job{}, diff --git a/pkg/controller/informers/core.go b/pkg/controller/informers/core.go index 67a6f10578a..8f3717cf912 100644 --- a/pkg/controller/informers/core.go +++ b/pkg/controller/informers/core.go @@ -20,6 +20,7 @@ import ( "reflect" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/kubernetes/pkg/api" @@ -359,11 +360,11 @@ func (f *replicationControllerInformer) Lister() *listers.StoreToReplicationCont func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { sharedIndexInformer := cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return client.Core().Pods(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return client.Core().Pods(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return client.Core().Pods(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return client.Core().Pods(metav1.NamespaceAll).Watch(options) }, }, &v1.Pod{}, @@ -378,10 +379,10 @@ func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) cach func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { sharedIndexInformer := cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return client.Core().Nodes().List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return client.Core().Nodes().Watch(options) }, }, @@ -396,11 +397,11 @@ func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) cac func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { sharedIndexInformer := cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return client.Core().PersistentVolumeClaims(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return client.Core().PersistentVolumeClaims(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return client.Core().PersistentVolumeClaims(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return client.Core().PersistentVolumeClaims(metav1.NamespaceAll).Watch(options) }, }, &v1.PersistentVolumeClaim{}, @@ -415,10 +416,10 @@ func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) cach func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { sharedIndexInformer := cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return client.Core().PersistentVolumes().List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return client.Core().PersistentVolumes().Watch(options) }, }, @@ -433,10 +434,10 @@ func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) cache func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { sharedIndexInformer := cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return client.Core().Namespaces().List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return client.Core().Namespaces().Watch(options) }, }, @@ -451,15 +452,11 @@ func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration func NewInternalNamespaceInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { sharedIndexInformer := cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - internalOptions := api.ListOptions{} - v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil) - return client.Core().Namespaces().List(internalOptions) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return client.Core().Namespaces().List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - internalOptions := api.ListOptions{} - v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil) - return client.Core().Namespaces().Watch(internalOptions) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return client.Core().Namespaces().Watch(options) }, }, &api.Namespace{}, @@ -473,11 +470,11 @@ func NewInternalNamespaceInformer(client internalclientset.Interface, resyncPeri func NewLimitRangeInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { sharedIndexInformer := cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return client.Core().LimitRanges(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return client.Core().LimitRanges(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return client.Core().LimitRanges(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return client.Core().LimitRanges(metav1.NamespaceAll).Watch(options) }, }, &v1.LimitRange{}, @@ -491,15 +488,11 @@ func NewLimitRangeInformer(client clientset.Interface, resyncPeriod time.Duratio func NewInternalLimitRangeInformer(internalclient internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { sharedIndexInformer := cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - internalOptions := api.ListOptions{} - v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil) - return internalclient.Core().LimitRanges(v1.NamespaceAll).List(internalOptions) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return internalclient.Core().LimitRanges(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - internalOptions := api.ListOptions{} - v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil) - return internalclient.Core().LimitRanges(v1.NamespaceAll).Watch(internalOptions) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return internalclient.Core().LimitRanges(metav1.NamespaceAll).Watch(options) }, }, &api.LimitRange{}, @@ -513,11 +506,11 @@ func NewInternalLimitRangeInformer(internalclient internalclientset.Interface, r func NewReplicationControllerInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { sharedIndexInformer := cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return client.Core().ReplicationControllers(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return client.Core().ReplicationControllers(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return client.Core().ReplicationControllers(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return client.Core().ReplicationControllers(metav1.NamespaceAll).Watch(options) }, }, &v1.ReplicationController{}, @@ -568,11 +561,11 @@ func (f *serviceAccountInformer) Lister() *listers.StoreToServiceAccountLister { func NewServiceAccountInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { sharedIndexInformer := cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return client.Core().ServiceAccounts(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return client.Core().ServiceAccounts(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return client.Core().ServiceAccounts(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return client.Core().ServiceAccounts(metav1.NamespaceAll).Watch(options) }, }, &v1.ServiceAccount{}, diff --git a/pkg/controller/informers/extensions.go b/pkg/controller/informers/extensions.go index c60ab79f05d..60556ee4bd2 100644 --- a/pkg/controller/informers/extensions.go +++ b/pkg/controller/informers/extensions.go @@ -19,9 +19,9 @@ package informers import ( "reflect" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" - "k8s.io/kubernetes/pkg/api/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/legacylisters" @@ -49,11 +49,11 @@ func (f *daemonSetInformer) Informer() cache.SharedIndexInformer { } informer = cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return f.client.Extensions().DaemonSets(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return f.client.Extensions().DaemonSets(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return f.client.Extensions().DaemonSets(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return f.client.Extensions().DaemonSets(metav1.NamespaceAll).Watch(options) }, }, &extensions.DaemonSet{}, @@ -91,11 +91,11 @@ func (f *deploymentInformer) Informer() cache.SharedIndexInformer { } informer = cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return f.client.Extensions().Deployments(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return f.client.Extensions().Deployments(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return f.client.Extensions().Deployments(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return f.client.Extensions().Deployments(metav1.NamespaceAll).Watch(options) }, }, &extensions.Deployment{}, @@ -133,11 +133,11 @@ func (f *replicaSetInformer) Informer() cache.SharedIndexInformer { } informer = cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return f.client.Extensions().ReplicaSets(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return f.client.Extensions().ReplicaSets(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return f.client.Extensions().ReplicaSets(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return f.client.Extensions().ReplicaSets(metav1.NamespaceAll).Watch(options) }, }, &extensions.ReplicaSet{}, diff --git a/pkg/controller/informers/rbac.go b/pkg/controller/informers/rbac.go index 8cc8623489b..7031e621a00 100644 --- a/pkg/controller/informers/rbac.go +++ b/pkg/controller/informers/rbac.go @@ -19,10 +19,10 @@ package informers import ( "reflect" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/legacylisters" @@ -48,10 +48,10 @@ func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { } informer = cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return f.internalclient.Rbac().ClusterRoles().List(convertListOptionsOrDie(options)) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return f.internalclient.Rbac().ClusterRoles().Watch(convertListOptionsOrDie(options)) }, }, @@ -88,10 +88,10 @@ func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { } informer = cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return f.internalclient.Rbac().ClusterRoleBindings().List(convertListOptionsOrDie(options)) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return f.internalclient.Rbac().ClusterRoleBindings().Watch(convertListOptionsOrDie(options)) }, }, @@ -128,11 +128,11 @@ func (f *roleInformer) Informer() cache.SharedIndexInformer { } informer = cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return f.internalclient.Rbac().Roles(v1.NamespaceAll).List(convertListOptionsOrDie(options)) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return f.internalclient.Rbac().Roles(metav1.NamespaceAll).List(convertListOptionsOrDie(options)) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return f.internalclient.Rbac().Roles(v1.NamespaceAll).Watch(convertListOptionsOrDie(options)) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return f.internalclient.Rbac().Roles(metav1.NamespaceAll).Watch(convertListOptionsOrDie(options)) }, }, &rbac.Role{}, @@ -168,11 +168,11 @@ func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { } informer = cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return f.internalclient.Rbac().RoleBindings(v1.NamespaceAll).List(convertListOptionsOrDie(options)) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return f.internalclient.Rbac().RoleBindings(metav1.NamespaceAll).List(convertListOptionsOrDie(options)) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return f.internalclient.Rbac().RoleBindings(v1.NamespaceAll).Watch(convertListOptionsOrDie(options)) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return f.internalclient.Rbac().RoleBindings(metav1.NamespaceAll).Watch(convertListOptionsOrDie(options)) }, }, &rbac.RoleBinding{}, @@ -188,8 +188,8 @@ func (f *roleBindingInformer) Lister() listers.RoleBindingLister { return listers.NewRoleBindingLister(f.Informer().GetIndexer()) } -func convertListOptionsOrDie(in v1.ListOptions) api.ListOptions { - out := api.ListOptions{} +func convertListOptionsOrDie(in metav1.ListOptions) metav1.ListOptions { + out := metav1.ListOptions{} if err := api.Scheme.Convert(&in, &out, nil); err != nil { panic(err) } diff --git a/pkg/controller/informers/storage.go b/pkg/controller/informers/storage.go index 37138208da3..10cf185acc1 100644 --- a/pkg/controller/informers/storage.go +++ b/pkg/controller/informers/storage.go @@ -19,9 +19,9 @@ package informers import ( "reflect" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" - "k8s.io/kubernetes/pkg/api/v1" storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/legacylisters" @@ -49,10 +49,10 @@ func (f *storageClassInformer) Informer() cache.SharedIndexInformer { } informer = cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return f.client.Storage().StorageClasses().List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return f.client.Storage().StorageClasses().Watch(options) }, }, diff --git a/pkg/controller/job/jobcontroller_test.go b/pkg/controller/job/jobcontroller_test.go index 6f9e60b2a04..c206bead8d8 100644 --- a/pkg/controller/job/jobcontroller_test.go +++ b/pkg/controller/job/jobcontroller_test.go @@ -43,7 +43,7 @@ func newJob(parallelism, completions int32) *batch.Job { j := &batch.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "foobar", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: batch.JobSpec{ Selector: &metav1.LabelSelector{ @@ -526,7 +526,7 @@ func TestJobPodLookup(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "basic"}, }, pod: &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: v1.NamespaceAll}, + ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: metav1.NamespaceAll}, }, expectedName: "", }, diff --git a/pkg/controller/namespace/namespace_controller.go b/pkg/controller/namespace/namespace_controller.go index e5878122a80..e4687724f82 100644 --- a/pkg/controller/namespace/namespace_controller.go +++ b/pkg/controller/namespace/namespace_controller.go @@ -129,10 +129,10 @@ func NewNamespaceController( // configure the backing store/controller store, controller := cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return kubeClient.Core().Namespaces().List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return kubeClient.Core().Namespaces().Watch(options) }, }, diff --git a/pkg/controller/namespace/namespace_controller_utils.go b/pkg/controller/namespace/namespace_controller_utils.go index 6bccde406cf..8cba6e4f271 100644 --- a/pkg/controller/namespace/namespace_controller_utils.go +++ b/pkg/controller/namespace/namespace_controller_utils.go @@ -178,7 +178,7 @@ func deleteCollection( // resource deletions generically. it will ensure all resources in the namespace are purged prior to releasing // namespace itself. orphanDependents := false - err := dynamicClient.Resource(&apiResource, namespace).DeleteCollection(&v1.DeleteOptions{OrphanDependents: &orphanDependents}, &v1.ListOptions{}) + err := dynamicClient.Resource(&apiResource, namespace).DeleteCollection(&v1.DeleteOptions{OrphanDependents: &orphanDependents}, &metav1.ListOptions{}) if err == nil { return true, nil @@ -220,7 +220,7 @@ func listCollection( } apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true} - obj, err := dynamicClient.Resource(&apiResource, namespace).List(&v1.ListOptions{}) + obj, err := dynamicClient.Resource(&apiResource, namespace).List(&metav1.ListOptions{}) if err == nil { unstructuredList, ok := obj.(*unstructured.UnstructuredList) if !ok { @@ -486,7 +486,7 @@ func estimateGracefulTermination(kubeClient clientset.Interface, groupVersionRes func estimateGracefulTerminationForPods(kubeClient clientset.Interface, ns string) (int64, error) { glog.V(5).Infof("namespace controller - estimateGracefulTerminationForPods - namespace %s", ns) estimate := int64(0) - items, err := kubeClient.Core().Pods(ns).List(v1.ListOptions{}) + items, err := kubeClient.Core().Pods(ns).List(metav1.ListOptions{}) if err != nil { return estimate, err } diff --git a/pkg/controller/node/controller_utils.go b/pkg/controller/node/controller_utils.go index 2c888d24806..36f28071a7c 100644 --- a/pkg/controller/node/controller_utils.go +++ b/pkg/controller/node/controller_utils.go @@ -21,6 +21,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -49,8 +50,8 @@ const ( func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore listers.StoreToDaemonSetLister) (bool, error) { remaining := false selector := fields.OneTermEqualSelector(api.PodHostField, nodeName).String() - options := v1.ListOptions{FieldSelector: selector} - pods, err := kubeClient.Core().Pods(v1.NamespaceAll).List(options) + options := metav1.ListOptions{FieldSelector: selector} + pods, err := kubeClient.Core().Pods(metav1.NamespaceAll).List(options) var updateErrList []error if err != nil { @@ -203,8 +204,8 @@ func markAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error { } nodeName := node.Name glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName) - opts := v1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()} - pods, err := kubeClient.Core().Pods(v1.NamespaceAll).List(opts) + opts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()} + pods, err := kubeClient.Core().Pods(metav1.NamespaceAll).List(opts) if err != nil { return err } diff --git a/pkg/controller/node/nodecontroller.go b/pkg/controller/node/nodecontroller.go index 428c3924c1f..3992558e6b9 100644 --- a/pkg/controller/node/nodecontroller.go +++ b/pkg/controller/node/nodecontroller.go @@ -253,7 +253,7 @@ func NewNodeController( // We must poll because apiserver might not be up. This error causes // controller manager to restart. if pollErr := wait.Poll(10*time.Second, apiserverStartupGracePeriod, func() (bool, error) { - nodeList, err = kubeClient.Core().Nodes().List(v1.ListOptions{ + nodeList, err = kubeClient.Core().Nodes().List(metav1.ListOptions{ FieldSelector: fields.Everything().String(), LabelSelector: labels.Everything().String(), }) diff --git a/pkg/controller/node/nodecontroller_test.go b/pkg/controller/node/nodecontroller_test.go index a3946967980..09e64dbae0e 100644 --- a/pkg/controller/node/nodecontroller_test.go +++ b/pkg/controller/node/nodecontroller_test.go @@ -80,7 +80,7 @@ func NewNodeControllerFromClient( } func syncNodeStore(nc *NodeController, fakeNodeHandler *testutil.FakeNodeHandler) error { - nodes, err := fakeNodeHandler.List(v1.ListOptions{}) + nodes, err := fakeNodeHandler.List(metav1.ListOptions{}) if err != nil { return err } diff --git a/pkg/controller/node/testutil/test_utils.go b/pkg/controller/node/testutil/test_utils.go index c78cc8442c3..d90d0a46785 100644 --- a/pkg/controller/node/testutil/test_utils.go +++ b/pkg/controller/node/testutil/test_utils.go @@ -130,7 +130,7 @@ func (m *FakeNodeHandler) Get(name string, opts metav1.GetOptions) (*v1.Node, er } // List returns a list of Nodes from the fake store. -func (m *FakeNodeHandler) List(opts v1.ListOptions) (*v1.NodeList, error) { +func (m *FakeNodeHandler) List(opts metav1.ListOptions) (*v1.NodeList, error) { m.lock.Lock() defer func() { m.RequestCount++ @@ -174,7 +174,7 @@ func (m *FakeNodeHandler) Delete(id string, opt *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of Nodes from the fake store. -func (m *FakeNodeHandler) DeleteCollection(opt *v1.DeleteOptions, listOpts v1.ListOptions) error { +func (m *FakeNodeHandler) DeleteCollection(opt *v1.DeleteOptions, listOpts metav1.ListOptions) error { return nil } @@ -215,7 +215,7 @@ func (m *FakeNodeHandler) PatchStatus(nodeName string, data []byte) (*v1.Node, e } // Watch watches Nodes in a fake store. -func (m *FakeNodeHandler) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (m *FakeNodeHandler) Watch(opts metav1.ListOptions) (watch.Interface, error) { return watch.NewFake(), nil } @@ -263,7 +263,7 @@ func (f *FakeRecorder) makeEvent(ref *v1.ObjectReference, eventtype, reason, mes t := metav1.Time{Time: f.clock.Now()} namespace := ref.Namespace if namespace == "" { - namespace = v1.NamespaceDefault + namespace = metav1.NamespaceDefault } return &v1.Event{ ObjectMeta: metav1.ObjectMeta{ @@ -339,7 +339,7 @@ func contains(node *v1.Node, nodes []*v1.Node) bool { // GetZones returns list of zones for all Nodes stored in FakeNodeHandler func GetZones(nodeHandler *FakeNodeHandler) []string { - nodes, _ := nodeHandler.List(v1.ListOptions{}) + nodes, _ := nodeHandler.List(metav1.ListOptions{}) zones := sets.NewString() for _, node := range nodes.Items { zones.Insert(utilnode.GetZoneKey(&node)) diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index 3a1a93fcdb3..4fda6ed0ac7 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -75,11 +75,11 @@ var upscaleForbiddenWindow = 3 * time.Minute func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return controller.hpaNamespacer.HorizontalPodAutoscalers(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return controller.hpaNamespacer.HorizontalPodAutoscalers(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return controller.hpaNamespacer.HorizontalPodAutoscalers(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return controller.hpaNamespacer.HorizontalPodAutoscalers(metav1.NamespaceAll).Watch(options) }, }, &autoscaling.HorizontalPodAutoscaler{}, diff --git a/pkg/controller/podautoscaler/metrics/BUILD b/pkg/controller/podautoscaler/metrics/BUILD index 87f8d866d30..7f741b3ea36 100644 --- a/pkg/controller/podautoscaler/metrics/BUILD +++ b/pkg/controller/podautoscaler/metrics/BUILD @@ -20,6 +20,7 @@ go_library( "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library", "//vendor:github.com/golang/glog", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/labels", "//vendor:k8s.io/heapster/metrics/api/v1/types", "//vendor:k8s.io/heapster/metrics/apis/metrics/v1alpha1", diff --git a/pkg/controller/podautoscaler/replica_calculator.go b/pkg/controller/podautoscaler/replica_calculator.go index f63f18ba935..d9c9f5acca4 100644 --- a/pkg/controller/podautoscaler/replica_calculator.go +++ b/pkg/controller/podautoscaler/replica_calculator.go @@ -21,6 +21,7 @@ import ( "math" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/pkg/api/v1" @@ -48,7 +49,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti return 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err) } - podList, err := c.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()}) + podList, err := c.podsGetter.Pods(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return 0, 0, time.Time{}, fmt.Errorf("unable to get pods while calculating replica count: %v", err) } @@ -156,7 +157,7 @@ func (c *ReplicaCalculator) GetMetricReplicas(currentReplicas int32, targetUtili return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v", metricName, err) } - podList, err := c.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()}) + podList, err := c.podsGetter.Pods(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return 0, 0, time.Time{}, fmt.Errorf("unable to get pods while calculating replica count: %v", err) } diff --git a/pkg/controller/podgc/BUILD b/pkg/controller/podgc/BUILD index 96d49d805b0..d0da481a495 100644 --- a/pkg/controller/podgc/BUILD +++ b/pkg/controller/podgc/BUILD @@ -24,6 +24,7 @@ go_library( "//pkg/controller/informers:go_default_library", "//pkg/util/metrics:go_default_library", "//vendor:github.com/golang/glog", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/labels", "//vendor:k8s.io/apimachinery/pkg/util/runtime", "//vendor:k8s.io/apimachinery/pkg/util/sets", diff --git a/pkg/controller/podgc/gc_controller.go b/pkg/controller/podgc/gc_controller.go index 7af01a6df91..73e8ae156b9 100644 --- a/pkg/controller/podgc/gc_controller.go +++ b/pkg/controller/podgc/gc_controller.go @@ -21,6 +21,7 @@ import ( "sync" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" @@ -156,7 +157,7 @@ func (gcc *PodGCController) gcTerminated(pods []*v1.Pod) { func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod) { glog.V(4).Infof("GC'ing orphaned") // We want to get list of Nodes from the etcd, to make sure that it's as fresh as possible. - nodes, err := gcc.kubeClient.Core().Nodes().List(v1.ListOptions{}) + nodes, err := gcc.kubeClient.Core().Nodes().List(metav1.ListOptions{}) if err != nil { return } diff --git a/pkg/controller/replicaset/replica_set_test.go b/pkg/controller/replicaset/replica_set_test.go index 8772097afff..ad8a4463db4 100644 --- a/pkg/controller/replicaset/replica_set_test.go +++ b/pkg/controller/replicaset/replica_set_test.go @@ -103,7 +103,7 @@ func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.Repl ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), Name: "foobar", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "18", }, Spec: extensions.ReplicaSetSpec{ @@ -465,7 +465,7 @@ func TestPodControllerLookup(t *testing.T) { { inRSs: []*extensions.ReplicaSet{ {ObjectMeta: metav1.ObjectMeta{Name: "basic"}}}, - pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: v1.NamespaceAll}}, + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: metav1.NamespaceAll}}, outRSName: "", }, // Matching labels, not namespace diff --git a/pkg/controller/replication/replication_controller_test.go b/pkg/controller/replication/replication_controller_test.go index 129227f66cb..7bf20a74673 100644 --- a/pkg/controller/replication/replication_controller_test.go +++ b/pkg/controller/replication/replication_controller_test.go @@ -65,7 +65,7 @@ func newReplicationController(replicas int) *v1.ReplicationController { ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), Name: "foobar", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "18", }, Spec: v1.ReplicationControllerSpec{ @@ -398,7 +398,7 @@ func TestPodControllerLookup(t *testing.T) { { inRCs: []*v1.ReplicationController{ {ObjectMeta: metav1.ObjectMeta{Name: "basic"}}}, - pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: v1.NamespaceAll}}, + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: metav1.NamespaceAll}}, outRCName: "", }, // Matching labels, not namespace diff --git a/pkg/controller/resourcequota/replenishment_controller.go b/pkg/controller/resourcequota/replenishment_controller.go index fc34a73b261..fe818cce435 100644 --- a/pkg/controller/resourcequota/replenishment_controller.go +++ b/pkg/controller/resourcequota/replenishment_controller.go @@ -22,6 +22,7 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -148,11 +149,11 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon // TODO move to informer when defined _, result = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return r.kubeClient.Core().Services(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return r.kubeClient.Core().Services(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return r.kubeClient.Core().Services(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return r.kubeClient.Core().Services(metav1.NamespaceAll).Watch(options) }, }, &v1.Service{}, @@ -166,11 +167,11 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon // TODO move to informer when defined _, result = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return r.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return r.kubeClient.Core().ReplicationControllers(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return r.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return r.kubeClient.Core().ReplicationControllers(metav1.NamespaceAll).Watch(options) }, }, &v1.ReplicationController{}, @@ -189,11 +190,11 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon // TODO (derekwaynecarr) remove me when we can require a sharedInformerFactory in all code paths... _, result = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return r.kubeClient.Core().PersistentVolumeClaims(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return r.kubeClient.Core().PersistentVolumeClaims(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return r.kubeClient.Core().PersistentVolumeClaims(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return r.kubeClient.Core().PersistentVolumeClaims(metav1.NamespaceAll).Watch(options) }, }, &v1.PersistentVolumeClaim{}, @@ -206,11 +207,11 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon // TODO move to informer when defined _, result = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return r.kubeClient.Core().Secrets(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return r.kubeClient.Core().Secrets(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return r.kubeClient.Core().Secrets(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return r.kubeClient.Core().Secrets(metav1.NamespaceAll).Watch(options) }, }, &v1.Secret{}, @@ -223,11 +224,11 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon // TODO move to informer when defined _, result = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return r.kubeClient.Core().ConfigMaps(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return r.kubeClient.Core().ConfigMaps(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return r.kubeClient.Core().ConfigMaps(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return r.kubeClient.Core().ConfigMaps(metav1.NamespaceAll).Watch(options) }, }, &v1.ConfigMap{}, diff --git a/pkg/controller/resourcequota/resource_quota_controller.go b/pkg/controller/resourcequota/resource_quota_controller.go index 21f92811295..8c2ab9d6870 100644 --- a/pkg/controller/resourcequota/resource_quota_controller.go +++ b/pkg/controller/resourcequota/resource_quota_controller.go @@ -95,11 +95,11 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour // build the controller that observes quota rq.rqIndexer, rq.rqController = cache.NewIndexerInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return rq.kubeClient.Core().ResourceQuotas(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return rq.kubeClient.Core().ResourceQuotas(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return rq.kubeClient.Core().ResourceQuotas(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return rq.kubeClient.Core().ResourceQuotas(metav1.NamespaceAll).Watch(options) }, }, &v1.ResourceQuota{}, diff --git a/pkg/controller/route/routecontroller.go b/pkg/controller/route/routecontroller.go index aa943ae3826..bf8e38d8e19 100644 --- a/pkg/controller/route/routecontroller.go +++ b/pkg/controller/route/routecontroller.go @@ -72,10 +72,10 @@ func New(routes cloudprovider.Routes, kubeClient clientset.Interface, clusterNam rc.nodeStore.Store, rc.nodeController = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return rc.kubeClient.Core().Nodes().List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return rc.kubeClient.Core().Nodes().Watch(options) }, }, diff --git a/pkg/controller/service/BUILD b/pkg/controller/service/BUILD index 12bbb9e104b..05406c3f7b8 100644 --- a/pkg/controller/service/BUILD +++ b/pkg/controller/service/BUILD @@ -28,6 +28,7 @@ go_library( "//pkg/util/workqueue:go_default_library", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/api/errors", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/util/runtime", diff --git a/pkg/controller/service/servicecontroller.go b/pkg/controller/service/servicecontroller.go index 756a8bb1a2c..2f3ccc56426 100644 --- a/pkg/controller/service/servicecontroller.go +++ b/pkg/controller/service/servicecontroller.go @@ -26,6 +26,7 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/runtime" @@ -122,11 +123,11 @@ func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterN } s.serviceStore.Indexer, s.serviceController = cache.NewIndexerInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) { - return s.kubeClient.Core().Services(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) { + return s.kubeClient.Core().Services(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return s.kubeClient.Core().Services(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return s.kubeClient.Core().Services(metav1.NamespaceAll).Watch(options) }, }, &v1.Service{}, @@ -176,7 +177,7 @@ func (s *ServiceController) Run(workers int) { for i := 0; i < workers; i++ { go wait.Until(s.worker, time.Second, wait.NeverStop) } - nodeLW := cache.NewListWatchFromClient(s.kubeClient.Core().RESTClient(), "nodes", v1.NamespaceAll, fields.Everything()) + nodeLW := cache.NewListWatchFromClient(s.kubeClient.Core().RESTClient(), "nodes", metav1.NamespaceAll, fields.Everything()) cache.NewReflector(nodeLW, &v1.Node{}, s.nodeLister.Store, 0).Run() go wait.Until(s.nodeSyncLoop, nodeSyncPeriod, wait.NeverStop) } diff --git a/pkg/controller/serviceaccount/serviceaccounts_controller_test.go b/pkg/controller/serviceaccount/serviceaccounts_controller_test.go index 22e34fa5051..557ca7a0c1c 100644 --- a/pkg/controller/serviceaccount/serviceaccounts_controller_test.go +++ b/pkg/controller/serviceaccount/serviceaccounts_controller_test.go @@ -37,7 +37,7 @@ type serverResponse struct { } func TestServiceAccountCreation(t *testing.T) { - ns := v1.NamespaceDefault + ns := metav1.NamespaceDefault defaultName := "default" managedName := "managed" diff --git a/pkg/controller/serviceaccount/tokens_controller.go b/pkg/controller/serviceaccount/tokens_controller.go index 9a2752ea60c..8edd3d3459b 100644 --- a/pkg/controller/serviceaccount/tokens_controller.go +++ b/pkg/controller/serviceaccount/tokens_controller.go @@ -93,11 +93,11 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions e.serviceAccounts, e.serviceAccountController = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return e.client.Core().ServiceAccounts(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return e.client.Core().ServiceAccounts(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return e.client.Core().ServiceAccounts(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return e.client.Core().ServiceAccounts(metav1.NamespaceAll).Watch(options) }, }, &v1.ServiceAccount{}, @@ -112,13 +112,13 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)}) e.secrets, e.secretController = cache.NewIndexerInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = tokenSelector.String() - return e.client.Core().Secrets(v1.NamespaceAll).List(options) + return e.client.Core().Secrets(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = tokenSelector.String() - return e.client.Core().Secrets(v1.NamespaceAll).Watch(options) + return e.client.Core().Secrets(metav1.NamespaceAll).Watch(options) }, }, &v1.Secret{}, diff --git a/pkg/controller/serviceaccount/tokens_controller_test.go b/pkg/controller/serviceaccount/tokens_controller_test.go index 67c1e714a7c..e6c00974f7a 100644 --- a/pkg/controller/serviceaccount/tokens_controller_test.go +++ b/pkg/controller/serviceaccount/tokens_controller_test.go @@ -230,9 +230,9 @@ func TestTokenCreation(t *testing.T) { AddedServiceAccount: serviceAccount(emptySecretReferences()), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), - core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()), - core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), + core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, createdTokenSecret()), + core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))), }, }, "new serviceaccount with no secrets encountering create error": { @@ -256,17 +256,17 @@ func TestTokenCreation(t *testing.T) { AddedServiceAccount: serviceAccount(emptySecretReferences()), ExpectedActions: []core.Action{ // Attempt 1 - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), - core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), + core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, createdTokenSecret()), // Attempt 2 - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), - core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, namedCreatedTokenSecret("default-token-x50vb")), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), + core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, namedCreatedTokenSecret("default-token-x50vb")), // Attempt 3 - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), - core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, namedCreatedTokenSecret("default-token-scq98")), - core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addNamedTokenSecretReference(emptySecretReferences(), "default-token-scq98"))), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), + core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, namedCreatedTokenSecret("default-token-scq98")), + core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, serviceAccount(addNamedTokenSecretReference(emptySecretReferences(), "default-token-scq98"))), }, }, "new serviceaccount with no secrets encountering unending create error": { @@ -286,14 +286,14 @@ func TestTokenCreation(t *testing.T) { AddedServiceAccount: serviceAccount(emptySecretReferences()), ExpectedActions: []core.Action{ // Attempt - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), - core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), + core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, createdTokenSecret()), // Retry 1 - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), - core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, namedCreatedTokenSecret("default-token-x50vb")), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), + core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, namedCreatedTokenSecret("default-token-x50vb")), // Retry 2 - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), - core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, namedCreatedTokenSecret("default-token-scq98")), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), + core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, namedCreatedTokenSecret("default-token-scq98")), }, }, "new serviceaccount with missing secrets": { @@ -301,9 +301,9 @@ func TestTokenCreation(t *testing.T) { AddedServiceAccount: serviceAccount(missingSecretReferences()), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), - core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()), - core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), + core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, createdTokenSecret()), + core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))), }, }, "new serviceaccount with non-token secrets": { @@ -311,9 +311,9 @@ func TestTokenCreation(t *testing.T) { AddedServiceAccount: serviceAccount(regularSecretReferences()), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), - core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()), - core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), + core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, createdTokenSecret()), + core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))), }, }, "new serviceaccount with token secrets": { @@ -328,7 +328,7 @@ func TestTokenCreation(t *testing.T) { AddedServiceAccount: serviceAccount(emptySecretReferences()), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), }, }, "updated serviceaccount with no secrets": { @@ -336,9 +336,9 @@ func TestTokenCreation(t *testing.T) { UpdatedServiceAccount: serviceAccount(emptySecretReferences()), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), - core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()), - core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), + core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, createdTokenSecret()), + core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))), }, }, "updated serviceaccount with missing secrets": { @@ -346,9 +346,9 @@ func TestTokenCreation(t *testing.T) { UpdatedServiceAccount: serviceAccount(missingSecretReferences()), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), - core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()), - core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), + core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, createdTokenSecret()), + core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))), }, }, "updated serviceaccount with non-token secrets": { @@ -356,9 +356,9 @@ func TestTokenCreation(t *testing.T) { UpdatedServiceAccount: serviceAccount(regularSecretReferences()), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), - core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()), - core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), + core.NewCreateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, createdTokenSecret()), + core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))), }, }, "updated serviceaccount with token secrets": { @@ -372,7 +372,7 @@ func TestTokenCreation(t *testing.T) { UpdatedServiceAccount: serviceAccount(emptySecretReferences()), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), }, }, @@ -396,7 +396,7 @@ func TestTokenCreation(t *testing.T) { DeletedServiceAccount: serviceAccount(tokenSecretReferences()), ExpectedActions: []core.Action{ - core.NewDeleteAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"), + core.NewDeleteAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, "token-secret-1"), }, }, @@ -405,8 +405,8 @@ func TestTokenCreation(t *testing.T) { AddedSecret: serviceAccountTokenSecret(), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), - core.NewDeleteAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), + core.NewDeleteAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, "token-secret-1"), }, }, "added secret with serviceaccount": { @@ -421,8 +421,8 @@ func TestTokenCreation(t *testing.T) { AddedSecret: serviceAccountTokenSecretWithoutTokenData(), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"), - core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, "token-secret-1"), + core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, serviceAccountTokenSecret()), }, }, "added token secret without ca data": { @@ -431,8 +431,8 @@ func TestTokenCreation(t *testing.T) { AddedSecret: serviceAccountTokenSecretWithoutCAData(), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"), - core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, "token-secret-1"), + core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, serviceAccountTokenSecret()), }, }, "added token secret with mismatched ca data": { @@ -441,8 +441,8 @@ func TestTokenCreation(t *testing.T) { AddedSecret: serviceAccountTokenSecretWithCAData([]byte("mismatched")), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"), - core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, "token-secret-1"), + core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, serviceAccountTokenSecret()), }, }, "added token secret without namespace data": { @@ -451,8 +451,8 @@ func TestTokenCreation(t *testing.T) { AddedSecret: serviceAccountTokenSecretWithoutNamespaceData(), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"), - core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, "token-secret-1"), + core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, serviceAccountTokenSecret()), }, }, "added token secret with custom namespace data": { @@ -470,8 +470,8 @@ func TestTokenCreation(t *testing.T) { UpdatedSecret: serviceAccountTokenSecret(), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), - core.NewDeleteAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), + core.NewDeleteAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, "token-secret-1"), }, }, "updated secret with serviceaccount": { @@ -486,8 +486,8 @@ func TestTokenCreation(t *testing.T) { UpdatedSecret: serviceAccountTokenSecretWithoutTokenData(), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"), - core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, "token-secret-1"), + core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, serviceAccountTokenSecret()), }, }, "updated token secret without ca data": { @@ -496,8 +496,8 @@ func TestTokenCreation(t *testing.T) { UpdatedSecret: serviceAccountTokenSecretWithoutCAData(), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"), - core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, "token-secret-1"), + core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, serviceAccountTokenSecret()), }, }, "updated token secret with mismatched ca data": { @@ -506,8 +506,8 @@ func TestTokenCreation(t *testing.T) { UpdatedSecret: serviceAccountTokenSecretWithCAData([]byte("mismatched")), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"), - core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, "token-secret-1"), + core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, serviceAccountTokenSecret()), }, }, "updated token secret without namespace data": { @@ -516,8 +516,8 @@ func TestTokenCreation(t *testing.T) { UpdatedSecret: serviceAccountTokenSecretWithoutNamespaceData(), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"), - core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, "token-secret-1"), + core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, metav1.NamespaceDefault, serviceAccountTokenSecret()), }, }, "updated token secret with custom namespace data": { @@ -540,8 +540,8 @@ func TestTokenCreation(t *testing.T) { DeletedSecret: serviceAccountTokenSecret(), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), - core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(emptySecretReferences())), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), + core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, serviceAccount(emptySecretReferences())), }, }, "deleted secret with serviceaccount without reference": { @@ -549,7 +549,7 @@ func TestTokenCreation(t *testing.T) { DeletedSecret: serviceAccountTokenSecret(), ExpectedActions: []core.Action{ - core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"), + core.NewGetAction(schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, metav1.NamespaceDefault, "default"), }, }, } diff --git a/pkg/controller/statefulset/fakes.go b/pkg/controller/statefulset/fakes.go index a043046a4f2..6f138654a84 100644 --- a/pkg/controller/statefulset/fakes.go +++ b/pkg/controller/statefulset/fakes.go @@ -77,7 +77,7 @@ func newStatefulSetWithVolumes(replicas int, name string, petMounts []v1.VolumeM }, ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: types.UID("test"), }, Spec: apps.StatefulSetSpec{ diff --git a/pkg/controller/statefulset/stateful_set.go b/pkg/controller/statefulset/stateful_set.go index 75af8815565..8081a74f51b 100644 --- a/pkg/controller/statefulset/stateful_set.go +++ b/pkg/controller/statefulset/stateful_set.go @@ -113,11 +113,11 @@ func NewStatefulSetController(podInformer cache.SharedIndexInformer, kubeClient psc.psStore.Store, psc.psController = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return psc.kubeClient.Apps().StatefulSets(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return psc.kubeClient.Apps().StatefulSets(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return psc.kubeClient.Apps().StatefulSets(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return psc.kubeClient.Apps().StatefulSets(metav1.NamespaceAll).Watch(options) }, }, &apps.StatefulSet{}, diff --git a/pkg/controller/volume/persistentvolume/pv_controller_base.go b/pkg/controller/volume/persistentvolume/pv_controller_base.go index 81066e3f899..cb8f7fcd14f 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller_base.go +++ b/pkg/controller/volume/persistentvolume/pv_controller_base.go @@ -96,10 +96,10 @@ func NewController(p ControllerParameters) *PersistentVolumeController { volumeSource := p.VolumeSource if volumeSource == nil { volumeSource = &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return p.KubeClient.Core().PersistentVolumes().List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return p.KubeClient.Core().PersistentVolumes().Watch(options) }, } @@ -109,11 +109,11 @@ func NewController(p ControllerParameters) *PersistentVolumeController { claimSource := p.ClaimSource if claimSource == nil { claimSource = &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return p.KubeClient.Core().PersistentVolumeClaims(v1.NamespaceAll).List(options) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return p.KubeClient.Core().PersistentVolumeClaims(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return p.KubeClient.Core().PersistentVolumeClaims(v1.NamespaceAll).Watch(options) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return p.KubeClient.Core().PersistentVolumeClaims(metav1.NamespaceAll).Watch(options) }, } } @@ -122,10 +122,10 @@ func NewController(p ControllerParameters) *PersistentVolumeController { classSource := p.ClassSource if classSource == nil { classSource = &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return p.KubeClient.Storage().StorageClasses().List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return p.KubeClient.Storage().StorageClasses().Watch(options) }, } @@ -170,7 +170,7 @@ func NewController(p ControllerParameters) *PersistentVolumeController { // order to have the caches already filled when first addClaim/addVolume to // perform initial synchronization of the controller. func (ctrl *PersistentVolumeController) initializeCaches(volumeSource, claimSource cache.ListerWatcher) { - volumeListObj, err := volumeSource.List(v1.ListOptions{}) + volumeListObj, err := volumeSource.List(metav1.ListOptions{}) if err != nil { glog.Errorf("PersistentVolumeController can't initialize caches: %v", err) return @@ -194,7 +194,7 @@ func (ctrl *PersistentVolumeController) initializeCaches(volumeSource, claimSour } } - claimListObj, err := claimSource.List(v1.ListOptions{}) + claimListObj, err := claimSource.List(metav1.ListOptions{}) if err != nil { glog.Errorf("PersistentVolumeController can't initialize caches: %v", err) return diff --git a/pkg/genericapiserver/endpoints/BUILD b/pkg/genericapiserver/endpoints/BUILD index ea970885731..be06efbfe97 100644 --- a/pkg/genericapiserver/endpoints/BUILD +++ b/pkg/genericapiserver/endpoints/BUILD @@ -33,6 +33,7 @@ go_test( "//vendor:golang.org/x/net/websocket", "//vendor:k8s.io/apimachinery/pkg/api/errors", "//vendor:k8s.io/apimachinery/pkg/api/meta", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/genericapiserver/endpoints/apiserver_test.go b/pkg/genericapiserver/endpoints/apiserver_test.go index 417cdb053ca..f20b2959401 100644 --- a/pkg/genericapiserver/endpoints/apiserver_test.go +++ b/pkg/genericapiserver/endpoints/apiserver_test.go @@ -2169,7 +2169,7 @@ func TestPatch(t *testing.T) { t: t, expectedSet: "/" + prefix + "/" + testGroupVersion.Group + "/" + testGroupVersion.Version + "/namespaces/default/simple/" + ID, name: ID, - namespace: api.NamespaceDefault, + namespace: metav1.NamespaceDefault, } handler := handleLinker(storage, selfLinker) server := httptest.NewServer(handler) @@ -2229,7 +2229,7 @@ func TestUpdate(t *testing.T) { t: t, expectedSet: "/" + prefix + "/" + testGroupVersion.Group + "/" + testGroupVersion.Version + "/namespaces/default/simple/" + ID, name: ID, - namespace: api.NamespaceDefault, + namespace: metav1.NamespaceDefault, } handler := handleLinker(storage, selfLinker) server := httptest.NewServer(handler) @@ -2275,7 +2275,7 @@ func TestUpdateInvokesAdmissionControl(t *testing.T) { item := &genericapitesting.Simple{ ObjectMeta: metav1.ObjectMeta{ Name: ID, - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Other: "bar", } @@ -2446,7 +2446,7 @@ func TestUpdateMissing(t *testing.T) { item := &genericapitesting.Simple{ ObjectMeta: metav1.ObjectMeta{ Name: ID, - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Other: "bar", } diff --git a/pkg/genericapiserver/endpoints/groupversion.go b/pkg/genericapiserver/endpoints/groupversion.go index 2d4d62b37d6..81d24cd530d 100644 --- a/pkg/genericapiserver/endpoints/groupversion.go +++ b/pkg/genericapiserver/endpoints/groupversion.go @@ -49,7 +49,7 @@ type APIGroupVersion struct { GroupVersion schema.GroupVersion // OptionsExternalVersion controls the Kubernetes APIVersion used for common objects in the apiserver - // schema like api.Status, api.DeleteOptions, and api.ListOptions. Other implementors may + // schema like api.Status, api.DeleteOptions, and metav1.ListOptions. Other implementors may // define a version "v1beta1" but want to use the Kubernetes "v1" internal objects. If // empty, defaults to GroupVersion. OptionsExternalVersion *schema.GroupVersion diff --git a/pkg/genericapiserver/endpoints/handlers/BUILD b/pkg/genericapiserver/endpoints/handlers/BUILD index db25d627f88..64c17fce687 100644 --- a/pkg/genericapiserver/endpoints/handlers/BUILD +++ b/pkg/genericapiserver/endpoints/handlers/BUILD @@ -56,6 +56,7 @@ go_library( "//vendor:golang.org/x/net/websocket", "//vendor:k8s.io/apimachinery/pkg/api/errors", "//vendor:k8s.io/apimachinery/pkg/api/meta", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/runtime", diff --git a/pkg/genericapiserver/endpoints/watch_test.go b/pkg/genericapiserver/endpoints/watch_test.go index 163ad458213..2e3046ab426 100644 --- a/pkg/genericapiserver/endpoints/watch_test.go +++ b/pkg/genericapiserver/endpoints/watch_test.go @@ -421,28 +421,28 @@ func TestWatchParamParsing(t *testing.T) { resourceVersion: "1234", labelSelector: "", fieldSelector: "", - namespace: api.NamespaceAll, + namespace: metav1.NamespaceAll, }, { path: rootPath, rawQuery: "resourceVersion=314159&fieldSelector=Host%3D&labelSelector=name%3Dfoo", resourceVersion: "314159", labelSelector: "name=foo", fieldSelector: "Host=", - namespace: api.NamespaceAll, + namespace: metav1.NamespaceAll, }, { path: rootPath, rawQuery: "fieldSelector=id%3dfoo&resourceVersion=1492", resourceVersion: "1492", labelSelector: "", fieldSelector: "id=foo", - namespace: api.NamespaceAll, + namespace: metav1.NamespaceAll, }, { path: rootPath, rawQuery: "", resourceVersion: "", labelSelector: "", fieldSelector: "", - namespace: api.NamespaceAll, + namespace: metav1.NamespaceAll, }, { path: namespacedPath, diff --git a/pkg/genericapiserver/registry/generic/registry/BUILD b/pkg/genericapiserver/registry/generic/registry/BUILD index 0a379c334bf..7ed3d8a6ff6 100644 --- a/pkg/genericapiserver/registry/generic/registry/BUILD +++ b/pkg/genericapiserver/registry/generic/registry/BUILD @@ -32,6 +32,7 @@ go_library( "//vendor:golang.org/x/net/context", "//vendor:k8s.io/apimachinery/pkg/api/errors", "//vendor:k8s.io/apimachinery/pkg/api/meta", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", @@ -65,6 +66,7 @@ go_test( "//pkg/storage/testing:go_default_library", "//vendor:k8s.io/apimachinery/pkg/api/errors", "//vendor:k8s.io/apimachinery/pkg/api/meta", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/genericapiserver/registry/generic/registry/store_test.go b/pkg/genericapiserver/registry/generic/registry/store_test.go index 0e42a2f8e66..93ae07b6951 100644 --- a/pkg/genericapiserver/registry/generic/registry/store_test.go +++ b/pkg/genericapiserver/registry/generic/registry/store_test.go @@ -418,7 +418,7 @@ func TestNoOpUpdates(t *testing.T) { newPod := func() *api.Pod { return &api.Pod{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "foo", Labels: map[string]string{"prepare_create": "true"}, }, diff --git a/pkg/genericapiserver/registry/rest/BUILD b/pkg/genericapiserver/registry/rest/BUILD index 1440ab85e96..77dc82fda87 100644 --- a/pkg/genericapiserver/registry/rest/BUILD +++ b/pkg/genericapiserver/registry/rest/BUILD @@ -27,6 +27,7 @@ go_library( "//pkg/util/uuid:go_default_library", "//vendor:k8s.io/apimachinery/pkg/api/errors", "//vendor:k8s.io/apimachinery/pkg/api/meta", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/runtime/schema", diff --git a/pkg/genericapiserver/registry/rest/create.go b/pkg/genericapiserver/registry/rest/create.go index db2a776fb18..f73621bd921 100644 --- a/pkg/genericapiserver/registry/rest/create.go +++ b/pkg/genericapiserver/registry/rest/create.go @@ -24,7 +24,6 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/validation/genericvalidation" "k8s.io/kubernetes/pkg/api/validation/path" ) @@ -72,7 +71,7 @@ func BeforeCreate(strategy RESTCreateStrategy, ctx genericapirequest.Context, ob return errors.NewBadRequest("the namespace of the provided object does not match the namespace sent on the request") } } else { - objectMeta.Namespace = api.NamespaceNone + objectMeta.Namespace = metav1.NamespaceNone } objectMeta.DeletionTimestamp = nil objectMeta.DeletionGracePeriodSeconds = nil diff --git a/pkg/genericapiserver/registry/rest/resttest/BUILD b/pkg/genericapiserver/registry/rest/resttest/BUILD index a5b03bb211a..bb32202c29c 100644 --- a/pkg/genericapiserver/registry/rest/resttest/BUILD +++ b/pkg/genericapiserver/registry/rest/resttest/BUILD @@ -16,6 +16,7 @@ go_library( "//pkg/api/validation/path:go_default_library", "//pkg/genericapiserver/registry/rest:go_default_library", "//vendor:k8s.io/apimachinery/pkg/api/errors", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/conversion", "//vendor:k8s.io/apimachinery/pkg/fields", diff --git a/pkg/genericapiserver/registry/rest/resttest/resttest.go b/pkg/genericapiserver/registry/rest/resttest/resttest.go index 3b254d7a612..de98f881bbf 100644 --- a/pkg/genericapiserver/registry/rest/resttest/resttest.go +++ b/pkg/genericapiserver/registry/rest/resttest/resttest.go @@ -91,7 +91,7 @@ func (t *Tester) ReturnDeletedObject() *Tester { // Returns NamespaceNone for cluster-scoped objects. func (t *Tester) TestNamespace() string { if t.clusterScope { - return api.NamespaceNone + return metav1.NamespaceNone } return "test" } @@ -117,7 +117,7 @@ func (t *Tester) setObjectMeta(obj runtime.Object, name string) { meta := t.getObjectMetaOrFail(obj) meta.Name = name if t.clusterScope { - meta.Namespace = api.NamespaceNone + meta.Namespace = metav1.NamespaceNone } else { meta.Namespace = genericapirequest.NamespaceValue(t.TestContext()) } @@ -294,7 +294,7 @@ func (t *Tester) testCreateDiscardsObjectNamespace(valid runtime.Object) { } defer t.delete(t.TestContext(), created) createdObjectMeta := t.getObjectMetaOrFail(created) - if createdObjectMeta.Namespace != api.NamespaceNone { + if createdObjectMeta.Namespace != metav1.NamespaceNone { t.Errorf("Expected empty namespace on created object, got '%v'", createdObjectMeta.Namespace) } } @@ -343,7 +343,7 @@ func (t *Tester) testCreateIgnoresContextNamespace(valid runtime.Object) { } defer t.delete(ctx, created) createdObjectMeta := t.getObjectMetaOrFail(created) - if createdObjectMeta.Namespace != api.NamespaceNone { + if createdObjectMeta.Namespace != metav1.NamespaceNone { t.Errorf("Expected empty namespace on created object, got '%v'", createdObjectMeta.Namespace) } } @@ -362,7 +362,7 @@ func (t *Tester) testCreateIgnoresMismatchedNamespace(valid runtime.Object) { } defer t.delete(ctx, created) createdObjectMeta := t.getObjectMetaOrFail(created) - if createdObjectMeta.Namespace != api.NamespaceNone { + if createdObjectMeta.Namespace != metav1.NamespaceNone { t.Errorf("Expected empty namespace on created object, got '%v'", createdObjectMeta.Namespace) } } diff --git a/pkg/genericapiserver/registry/rest/update.go b/pkg/genericapiserver/registry/rest/update.go index f88022f3a19..3f3d8f9c469 100644 --- a/pkg/genericapiserver/registry/rest/update.go +++ b/pkg/genericapiserver/registry/rest/update.go @@ -89,7 +89,7 @@ func BeforeUpdate(strategy RESTUpdateStrategy, ctx genericapirequest.Context, ob return errors.NewBadRequest("the namespace of the provided object does not match the namespace sent on the request") } } else { - objectMeta.Namespace = api.NamespaceNone + objectMeta.Namespace = metav1.NamespaceNone } // Ensure requests cannot update generation oldMeta, err := metav1.ObjectMetaFor(old) diff --git a/pkg/genericapiserver/server/genericapiserver.go b/pkg/genericapiserver/server/genericapiserver.go index b9ff0974a2d..a2edf94d9db 100644 --- a/pkg/genericapiserver/server/genericapiserver.go +++ b/pkg/genericapiserver/server/genericapiserver.go @@ -54,7 +54,7 @@ type APIGroupInfo struct { // Info about the resources in this group. Its a map from version to resource to the storage. VersionedResourcesStorageMap map[string]map[string]rest.Storage // OptionsExternalVersion controls the APIVersion used for common objects in the - // schema like api.Status, api.DeleteOptions, and api.ListOptions. Other implementors may + // schema like api.Status, api.DeleteOptions, and metav1.ListOptions. Other implementors may // define a version "v1beta1" but want to use the Kubernetes "v1" internal objects. // If nil, defaults to groupMeta.GroupVersion. // TODO: Remove this when https://github.com/kubernetes/kubernetes/issues/19018 is fixed. diff --git a/pkg/genericapiserver/server/genericapiserver_test.go b/pkg/genericapiserver/server/genericapiserver_test.go index d5c4dd5848b..19a154dfc38 100644 --- a/pkg/genericapiserver/server/genericapiserver_test.go +++ b/pkg/genericapiserver/server/genericapiserver_test.go @@ -120,7 +120,7 @@ func TestInstallAPIGroups(t *testing.T) { scheme.AddKnownTypeWithName(gv.WithKind("Getter"), getter.New()) scheme.AddKnownTypeWithName(gv.WithKind("NoVerb"), noVerbs.New()) scheme.AddKnownTypes(v1.SchemeGroupVersion, - &v1.ListOptions{}, + &metav1.ListOptions{}, &v1.DeleteOptions{}, &metav1.ExportOptions{}, &metav1.Status{}, diff --git a/pkg/genericapiserver/server/options/server_run_options.go b/pkg/genericapiserver/server/options/server_run_options.go index 04a1f672189..b51f4ff2f40 100644 --- a/pkg/genericapiserver/server/options/server_run_options.go +++ b/pkg/genericapiserver/server/options/server_run_options.go @@ -21,6 +21,7 @@ import ( "net" "strings" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission" "k8s.io/kubernetes/pkg/api" @@ -219,7 +220,7 @@ func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) { "be excluded from maximum inflight request handling.") fs.MarkDeprecated("long-running-request-regexp", "regular expression matching of long-running requests is no longer supported") - deprecatedMasterServiceNamespace := api.NamespaceDefault + deprecatedMasterServiceNamespace := metav1.NamespaceDefault fs.StringVar(&deprecatedMasterServiceNamespace, "master-service-namespace", deprecatedMasterServiceNamespace, ""+ "DEPRECATED: the namespace from which the kubernetes master services should be injected into pods.") diff --git a/pkg/kubectl/cmd/apply.go b/pkg/kubectl/cmd/apply.go index a50078837dc..340203cf118 100644 --- a/pkg/kubectl/cmd/apply.go +++ b/pkg/kubectl/cmd/apply.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -357,7 +358,7 @@ func RunApply(f cmdutil.Factory, cmd *cobra.Command, out, errOut io.Writer, opti } } for _, m := range nonNamespacedRESTMappings { - if err := p.prune(api.NamespaceNone, m, shortOutput); err != nil { + if err := p.prune(metav1.NamespaceNone, m, shortOutput); err != nil { return fmt.Errorf("error pruning nonNamespaced object %v: %v", m.GroupVersionKind, err) } } diff --git a/pkg/kubectl/cmd/clusterinfo.go b/pkg/kubectl/cmd/clusterinfo.go index cb9e78a6768..48555996afb 100644 --- a/pkg/kubectl/cmd/clusterinfo.go +++ b/pkg/kubectl/cmd/clusterinfo.go @@ -22,6 +22,7 @@ import ( "os" "strconv" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -73,7 +74,7 @@ func RunClusterInfo(f cmdutil.Factory, out io.Writer, cmd *cobra.Command) error mapper, typer := f.Object() cmdNamespace := cmdutil.GetFlagString(cmd, "namespace") if cmdNamespace == "" { - cmdNamespace = api.NamespaceSystem + cmdNamespace = metav1.NamespaceSystem } // TODO use generalized labels once they are implemented (#341) diff --git a/pkg/kubectl/cmd/clusterinfo_dump.go b/pkg/kubectl/cmd/clusterinfo_dump.go index 9af6ab0bd97..d37ed11bc4f 100644 --- a/pkg/kubectl/cmd/clusterinfo_dump.go +++ b/pkg/kubectl/cmd/clusterinfo_dump.go @@ -24,6 +24,7 @@ import ( "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" @@ -96,7 +97,7 @@ func dumpClusterInfo(f cmdutil.Factory, cmd *cobra.Command, args []string, out i return err } - nodes, err := clientset.Core().Nodes().List(api.ListOptions{}) + nodes, err := clientset.Core().Nodes().List(metav1.ListOptions{}) if err != nil { return err } @@ -107,7 +108,7 @@ func dumpClusterInfo(f cmdutil.Factory, cmd *cobra.Command, args []string, out i var namespaces []string if cmdutil.GetFlagBool(cmd, "all-namespaces") { - namespaceList, err := clientset.Core().Namespaces().List(api.ListOptions{}) + namespaceList, err := clientset.Core().Namespaces().List(metav1.ListOptions{}) if err != nil { return err } @@ -122,7 +123,7 @@ func dumpClusterInfo(f cmdutil.Factory, cmd *cobra.Command, args []string, out i return err } namespaces = []string{ - api.NamespaceSystem, + metav1.NamespaceSystem, cmdNamespace, } } @@ -130,7 +131,7 @@ func dumpClusterInfo(f cmdutil.Factory, cmd *cobra.Command, args []string, out i for _, namespace := range namespaces { // TODO: this is repetitive in the extreme. Use reflection or // something to make this a for loop. - events, err := clientset.Core().Events(namespace).List(api.ListOptions{}) + events, err := clientset.Core().Events(namespace).List(metav1.ListOptions{}) if err != nil { return err } @@ -138,7 +139,7 @@ func dumpClusterInfo(f cmdutil.Factory, cmd *cobra.Command, args []string, out i return err } - rcs, err := clientset.Core().ReplicationControllers(namespace).List(api.ListOptions{}) + rcs, err := clientset.Core().ReplicationControllers(namespace).List(metav1.ListOptions{}) if err != nil { return err } @@ -146,7 +147,7 @@ func dumpClusterInfo(f cmdutil.Factory, cmd *cobra.Command, args []string, out i return err } - svcs, err := clientset.Core().Services(namespace).List(api.ListOptions{}) + svcs, err := clientset.Core().Services(namespace).List(metav1.ListOptions{}) if err != nil { return err } @@ -154,7 +155,7 @@ func dumpClusterInfo(f cmdutil.Factory, cmd *cobra.Command, args []string, out i return err } - sets, err := clientset.Extensions().DaemonSets(namespace).List(api.ListOptions{}) + sets, err := clientset.Extensions().DaemonSets(namespace).List(metav1.ListOptions{}) if err != nil { return err } @@ -162,7 +163,7 @@ func dumpClusterInfo(f cmdutil.Factory, cmd *cobra.Command, args []string, out i return err } - deps, err := clientset.Extensions().Deployments(namespace).List(api.ListOptions{}) + deps, err := clientset.Extensions().Deployments(namespace).List(metav1.ListOptions{}) if err != nil { return err } @@ -170,7 +171,7 @@ func dumpClusterInfo(f cmdutil.Factory, cmd *cobra.Command, args []string, out i return err } - rps, err := clientset.Extensions().ReplicaSets(namespace).List(api.ListOptions{}) + rps, err := clientset.Extensions().ReplicaSets(namespace).List(metav1.ListOptions{}) if err != nil { return err } @@ -178,7 +179,7 @@ func dumpClusterInfo(f cmdutil.Factory, cmd *cobra.Command, args []string, out i return err } - pods, err := clientset.Core().Pods(namespace).List(api.ListOptions{}) + pods, err := clientset.Core().Pods(namespace).List(metav1.ListOptions{}) if err != nil { return err } diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index 65c29e3addb..6f951db3ffb 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -381,8 +381,8 @@ func (ps podStatuses) Message() string { // getPodsForDeletion returns all the pods we're going to delete. If there are // any pods preventing us from deleting, we return that list in an error. func (o *DrainOptions) getPodsForDeletion() (pods []api.Pod, err error) { - podList, err := o.client.Core().Pods(api.NamespaceAll).List(api.ListOptions{ - FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": o.nodeInfo.Name})}) + podList, err := o.client.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ + FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": o.nodeInfo.Name}).String()}) if err != nil { return pods, err } diff --git a/pkg/kubectl/cmd/run.go b/pkg/kubectl/cmd/run.go index 97e3bd7f5c4..db4fd7d8bab 100644 --- a/pkg/kubectl/cmd/run.go +++ b/pkg/kubectl/cmd/run.go @@ -377,7 +377,7 @@ func contains(resourcesList []*metav1.APIResourceList, resource schema.GroupVers // waitForPod watches the given pod until the exitCondition is true func waitForPod(podClient coreclient.PodsGetter, ns, name string, exitCondition watch.ConditionFunc) (*api.Pod, error) { - w, err := podClient.Pods(ns).Watch(api.SingleObject(metav1.ObjectMeta{Name: name})) + w, err := podClient.Pods(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: name})) if err != nil { return nil, err } diff --git a/pkg/kubectl/cmd/top_node.go b/pkg/kubectl/cmd/top_node.go index 6e47a672c64..ae0ed5a1c84 100644 --- a/pkg/kubectl/cmd/top_node.go +++ b/pkg/kubectl/cmd/top_node.go @@ -151,8 +151,8 @@ func (o TopNodeOptions) RunTopNode() error { } nodes = append(nodes, *node) } else { - nodeList, err := o.NodeClient.Nodes().List(api.ListOptions{ - LabelSelector: selector, + nodeList, err := o.NodeClient.Nodes().List(metav1.ListOptions{ + LabelSelector: selector.String(), }) if err != nil { return err diff --git a/pkg/kubectl/cmd/top_pod.go b/pkg/kubectl/cmd/top_pod.go index fa905d23389..84954796251 100644 --- a/pkg/kubectl/cmd/top_pod.go +++ b/pkg/kubectl/cmd/top_pod.go @@ -164,8 +164,8 @@ func verifyEmptyMetrics(o TopPodOptions, selector labels.Selector) error { return err } } else { - pods, err := o.PodClient.Pods(o.Namespace).List(api.ListOptions{ - LabelSelector: selector, + pods, err := o.PodClient.Pods(o.Namespace).List(metav1.ListOptions{ + LabelSelector: selector.String(), }) if err != nil { return err diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 53778cc5fb4..42225c561e4 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -34,6 +34,7 @@ import ( "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -279,7 +280,7 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) Factory { // GetFirstPod returns a pod matching the namespace and label selector // and the number of all pods that match the label selector. func GetFirstPod(client coreclient.PodsGetter, namespace string, selector labels.Selector, timeout time.Duration, sortBy func([]*v1.Pod) sort.Interface) (*api.Pod, int, error) { - options := api.ListOptions{LabelSelector: selector} + options := metav1.ListOptions{LabelSelector: selector.String()} podList, err := client.Pods(namespace).List(options) if err != nil { diff --git a/pkg/kubectl/cmd/util/factory_test.go b/pkg/kubectl/cmd/util/factory_test.go index a75ae9875a0..7929e83a8ac 100644 --- a/pkg/kubectl/cmd/util/factory_test.go +++ b/pkg/kubectl/cmd/util/factory_test.go @@ -439,7 +439,7 @@ func newPodList(count, isUnready, isUnhealthy int, labels map[string]string) *ap newPod := api.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("pod-%d", i+1), - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, i, 0, time.UTC), Labels: labels, }, @@ -485,7 +485,7 @@ func TestGetFirstPod(t *testing.T) { expected: &api.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-1", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), Labels: map[string]string{"test": "selector"}, }, @@ -507,7 +507,7 @@ func TestGetFirstPod(t *testing.T) { expected: &api.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-2", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 1, 0, time.UTC), Labels: map[string]string{"test": "selector"}, }, @@ -530,7 +530,7 @@ func TestGetFirstPod(t *testing.T) { expected: &api.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-1", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), Labels: map[string]string{"test": "selector"}, }, @@ -554,7 +554,7 @@ func TestGetFirstPod(t *testing.T) { Object: &api.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-1", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), Labels: map[string]string{"test": "selector"}, }, @@ -573,7 +573,7 @@ func TestGetFirstPod(t *testing.T) { expected: &api.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-1", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), Labels: map[string]string{"test": "selector"}, }, @@ -607,7 +607,7 @@ func TestGetFirstPod(t *testing.T) { } selector := labels.Set(labelSet).AsSelector() - pod, numPods, err := GetFirstPod(fake.Core(), api.NamespaceDefault, selector, 1*time.Minute, test.sortBy) + pod, numPods, err := GetFirstPod(fake.Core(), metav1.NamespaceDefault, selector, 1*time.Minute, test.sortBy) pod.Spec.SecurityContext = nil if !test.expectedErr && err != nil { t.Errorf("%s: unexpected error: %v", test.name, err) diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index 88141f09762..5faaa6812fd 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -202,7 +202,7 @@ func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings if err != nil { return "", err } - resourceQuotaList, err := d.Core().ResourceQuotas(name).List(api.ListOptions{}) + resourceQuotaList, err := d.Core().ResourceQuotas(name).List(metav1.ListOptions{}) if err != nil { if errors.IsNotFound(err) { // Server does not support resource quotas. @@ -212,7 +212,7 @@ func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings return "", err } } - limitRangeList, err := d.Core().LimitRanges(name).List(api.ListOptions{}) + limitRangeList, err := d.Core().LimitRanges(name).List(metav1.ListOptions{}) if err != nil { if errors.IsNotFound(err) { // Server does not support limit ranges. @@ -472,7 +472,7 @@ func (d *PodDescriber) Describe(namespace, name string, describerSettings Descri if describerSettings.ShowEvents { eventsInterface := d.Core().Events(namespace) selector := eventsInterface.GetFieldSelector(&name, &namespace, nil, nil) - options := api.ListOptions{FieldSelector: selector} + options := metav1.ListOptions{FieldSelector: selector.String()} events, err2 := eventsInterface.List(options) if describerSettings.ShowEvents && err2 == nil && len(events.Items) > 0 { return tabbedString(func(out io.Writer) error { @@ -1559,7 +1559,7 @@ func (i *IngressDescriber) describeIngress(ing *extensions.Ingress, describerSet ServiceName: "default-http-backend", ServicePort: intstr.IntOrString{Type: intstr.Int, IntVal: 80}, } - ns = api.NamespaceSystem + ns = metav1.NamespaceSystem } w.Write(LEVEL_0, "Default backend:\t%s (%s)\n", backendStringer(def), i.describeBackend(ns, def)) if len(ing.Spec.TLS) != 0 { @@ -1795,7 +1795,7 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett // missingSecrets is the set of all secrets present in the // serviceAccount but not present in the set of existing secrets. missingSecrets := sets.NewString() - secrets, err := d.Core().Secrets(namespace).List(api.ListOptions{}) + secrets, err := d.Core().Secrets(namespace).List(metav1.ListOptions{}) // errors are tolerated here in order to describe the serviceAccount with all // of the secrets that it references, even if those secrets cannot be fetched. @@ -1904,7 +1904,7 @@ func (d *NodeDescriber) Describe(namespace, name string, describerSettings Descr // in a policy aware setting, users may have access to a node, but not all pods // in that case, we note that the user does not have access to the pods canViewPods := true - nodeNonTerminatedPodsList, err := d.Core().Pods(namespace).List(api.ListOptions{FieldSelector: fieldSelector}) + nodeNonTerminatedPodsList, err := d.Core().Pods(namespace).List(metav1.ListOptions{FieldSelector: fieldSelector.String()}) if err != nil { if !errors.IsForbidden(err) { return "", err @@ -2346,7 +2346,7 @@ func (dd *DeploymentDescriber) Describe(namespace, name string, describerSetting func getDaemonSetsForLabels(c extensionsclient.DaemonSetInterface, labelsToMatch labels.Labels) ([]extensions.DaemonSet, error) { // Get all daemon sets // TODO: this needs a namespace scope as argument - dss, err := c.List(api.ListOptions{}) + dss, err := c.List(metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("error getting daemon set: %v", err) } @@ -2395,7 +2395,7 @@ func printReplicaSetsByLabels(matchingRSs []*versionedextension.ReplicaSet) stri } func getPodStatusForController(c coreclient.PodInterface, selector labels.Selector) (running, waiting, succeeded, failed int, err error) { - options := api.ListOptions{LabelSelector: selector} + options := metav1.ListOptions{LabelSelector: selector.String()} rcPods, err := c.List(options) if err != nil { return diff --git a/pkg/kubectl/metricsutil/metrics_client.go b/pkg/kubectl/metricsutil/metrics_client.go index ed577453aeb..e72f97dd049 100644 --- a/pkg/kubectl/metricsutil/metrics_client.go +++ b/pkg/kubectl/metricsutil/metrics_client.go @@ -21,10 +21,10 @@ import ( "errors" "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/validation" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" ) @@ -68,7 +68,7 @@ func DefaultHeapsterMetricsClient(svcClient coreclient.ServicesGetter) *Heapster } func podMetricsUrl(namespace string, name string) (string, error) { - if namespace == api.NamespaceAll { + if namespace == metav1.NamespaceAll { return fmt.Sprintf("%s/pods", metricsRoot), nil } errs := validation.ValidateNamespaceName(namespace, false) @@ -128,7 +128,7 @@ func (cli *HeapsterMetricsClient) GetNodeMetrics(nodeName string, selector label func (cli *HeapsterMetricsClient) GetPodMetrics(namespace string, podName string, allNamespaces bool, selector labels.Selector) ([]metricsapi.PodMetrics, error) { if allNamespaces { - namespace = api.NamespaceAll + namespace = metav1.NamespaceAll } path, err := podMetricsUrl(namespace, podName) if err != nil { diff --git a/pkg/kubectl/resource/builder.go b/pkg/kubectl/resource/builder.go index 0b56a8db9e7..6a12058d415 100644 --- a/pkg/kubectl/resource/builder.go +++ b/pkg/kubectl/resource/builder.go @@ -24,12 +24,12 @@ import ( "strings" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/validation" ) @@ -291,11 +291,11 @@ func (b *Builder) DefaultNamespace() *Builder { return b } -// AllNamespaces instructs the builder to use NamespaceAll as a namespace to request resources +// AllNamespaces instructs the builder to metav1.NamespaceAll as a namespace to request resources // across all of the namespace. This overrides the namespace set by NamespaceParam(). func (b *Builder) AllNamespaces(allNamespace bool) *Builder { if allNamespace { - b.namespace = api.NamespaceAll + b.namespace = metav1.NamespaceAll } b.allNamespace = allNamespace return b diff --git a/pkg/kubectl/resource/visitor.go b/pkg/kubectl/resource/visitor.go index 8aed50e1911..32102d4ed17 100644 --- a/pkg/kubectl/resource/visitor.go +++ b/pkg/kubectl/resource/visitor.go @@ -31,13 +31,13 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/apimachinery/pkg/watch" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/validation" ) @@ -116,7 +116,7 @@ func (i *Info) Visit(fn VisitorFunc) error { func (i *Info) Get() (err error) { obj, err := NewHelper(i.Client, i.Mapping).Get(i.Namespace, i.Name, i.Export) if err != nil { - if errors.IsNotFound(err) && len(i.Namespace) > 0 && i.Namespace != api.NamespaceDefault && i.Namespace != api.NamespaceAll { + if errors.IsNotFound(err) && len(i.Namespace) > 0 && i.Namespace != metav1.NamespaceDefault && i.Namespace != metav1.NamespaceAll { err2 := i.Client.Get().AbsPath("api", "v1", "namespaces", i.Namespace).Do().Error() if err2 != nil && errors.IsNotFound(err2) { return err2 diff --git a/pkg/kubectl/rollback.go b/pkg/kubectl/rollback.go index d86695667a3..16c174f0f49 100644 --- a/pkg/kubectl/rollback.go +++ b/pkg/kubectl/rollback.go @@ -23,6 +23,7 @@ import ( "os/signal" "syscall" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" @@ -73,7 +74,7 @@ func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations m result := "" // Get current events - events, err := r.c.Core().Events(d.Namespace).List(api.ListOptions{}) + events, err := r.c.Core().Events(d.Namespace).List(metav1.ListOptions{}) if err != nil { return result, err } @@ -82,7 +83,7 @@ func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations m return result, err } // Watch for the changes of events - watch, err := r.c.Core().Events(d.Namespace).Watch(api.ListOptions{Watch: true, ResourceVersion: events.ResourceVersion}) + watch, err := r.c.Core().Events(d.Namespace).Watch(metav1.ListOptions{Watch: true, ResourceVersion: events.ResourceVersion}) if err != nil { return result, err } diff --git a/pkg/kubectl/rolling_updater.go b/pkg/kubectl/rolling_updater.go index f782cb2605c..0e798f47c8a 100644 --- a/pkg/kubectl/rolling_updater.go +++ b/pkg/kubectl/rolling_updater.go @@ -415,7 +415,7 @@ func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController, minR for i := range controllers { controller := controllers[i] selector := labels.Set(controller.Spec.Selector).AsSelector() - options := api.ListOptions{LabelSelector: selector} + options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := r.podClient.Pods(controller.Namespace).List(options) if err != nil { return 0, 0, err @@ -708,7 +708,7 @@ func AddDeploymentKeyToReplicationController(oldRc *api.ReplicationController, r // Update all pods managed by the rc to have the new hash label, so they are correctly adopted // TODO: extract the code from the label command and re-use it here. selector := labels.SelectorFromSet(oldRc.Spec.Selector) - options := api.ListOptions{LabelSelector: selector} + options := metav1.ListOptions{LabelSelector: selector.String()} podList, err := podClient.Pods(namespace).List(options) if err != nil { return nil, err @@ -749,7 +749,7 @@ func AddDeploymentKeyToReplicationController(oldRc *api.ReplicationController, r // doesn't see the update to its pod template and creates a new pod with the old labels after // we've finished re-adopting existing pods to the rc. selector = labels.SelectorFromSet(selectorCopy) - options = api.ListOptions{LabelSelector: selector} + options = metav1.ListOptions{LabelSelector: selector.String()} podList, err = podClient.Pods(namespace).List(options) for ix := range podList.Items { pod := &podList.Items[ix] @@ -830,7 +830,7 @@ func updatePodWithRetries(podClient coreclient.PodsGetter, namespace string, pod } func FindSourceController(r coreclient.ReplicationControllersGetter, namespace, name string) (*api.ReplicationController, error) { - list, err := r.ReplicationControllers(namespace).List(api.ListOptions{}) + list, err := r.ReplicationControllers(namespace).List(metav1.ListOptions{}) if err != nil { return nil, err } diff --git a/pkg/kubectl/rolling_updater_test.go b/pkg/kubectl/rolling_updater_test.go index 9a364acdeee..b6934a752e5 100644 --- a/pkg/kubectl/rolling_updater_test.go +++ b/pkg/kubectl/rolling_updater_test.go @@ -45,7 +45,7 @@ import ( func oldRc(replicas int, original int) *api.ReplicationController { return &api.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "foo-v1", UID: "7764ae47-9092-11e4-8393-42010af018ff", Annotations: map[string]string{ @@ -78,7 +78,7 @@ func newRc(replicas int, desired int) *api.ReplicationController { } rc.Spec.Selector = map[string]string{"version": "v2"} rc.ObjectMeta = metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "foo-v2", Annotations: map[string]string{ desiredReplicasAnnotation: fmt.Sprintf("%d", desired), @@ -949,7 +949,7 @@ func TestRollingUpdater_multipleContainersInPod(t *testing.T) { { oldRc: &api.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "foo", }, Spec: api.ReplicationControllerSpec{ @@ -979,7 +979,7 @@ func TestRollingUpdater_multipleContainersInPod(t *testing.T) { }, newRc: &api.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "foo", }, Spec: api.ReplicationControllerSpec{ @@ -1014,7 +1014,7 @@ func TestRollingUpdater_multipleContainersInPod(t *testing.T) { { oldRc: &api.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "bar", }, Spec: api.ReplicationControllerSpec{ @@ -1040,7 +1040,7 @@ func TestRollingUpdater_multipleContainersInPod(t *testing.T) { }, newRc: &api.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "bar", }, Spec: api.ReplicationControllerSpec{ @@ -1085,7 +1085,7 @@ func TestRollingUpdater_multipleContainersInPod(t *testing.T) { test.newRc.Name = fmt.Sprintf("%s-%s", test.newRc.Name, deploymentHash) config := &NewControllerConfig{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, OldName: test.oldRc.ObjectMeta.Name, NewName: test.newRc.ObjectMeta.Name, Image: test.image, @@ -1229,7 +1229,7 @@ func TestRollingUpdater_cleanupWithClients_Rename(t *testing.T) { func TestFindSourceController(t *testing.T) { ctrl1 := api.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "foo", Annotations: map[string]string{ sourceIdAnnotation: "bar:1234", @@ -1238,7 +1238,7 @@ func TestFindSourceController(t *testing.T) { } ctrl2 := api.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "bar", Annotations: map[string]string{ sourceIdAnnotation: "foo:12345", @@ -1247,7 +1247,7 @@ func TestFindSourceController(t *testing.T) { } ctrl3 := api.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "baz", Annotations: map[string]string{ sourceIdAnnotation: "baz:45667", @@ -1329,7 +1329,7 @@ func TestUpdateExistingReplicationController(t *testing.T) { { rc: &api.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "foo", }, Spec: api.ReplicationControllerSpec{ @@ -1342,7 +1342,7 @@ func TestUpdateExistingReplicationController(t *testing.T) { expectedRc: &api.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "foo", Annotations: map[string]string{ "kubectl.kubernetes.io/next-controller-id": "foo", @@ -1365,7 +1365,7 @@ func TestUpdateExistingReplicationController(t *testing.T) { { rc: &api.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "foo", }, Spec: api.ReplicationControllerSpec{ @@ -1387,7 +1387,7 @@ func TestUpdateExistingReplicationController(t *testing.T) { expectedRc: &api.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "foo", Annotations: map[string]string{ "kubectl.kubernetes.io/next-controller-id": "foo", @@ -1597,7 +1597,7 @@ func TestAddDeploymentHash(t *testing.T) { restClient.Client = fakeClient.Client clientset := internalclientset.New(restClient) - if _, err := AddDeploymentKeyToReplicationController(rc, clientset.Core(), clientset.Core(), "dk", "hash", api.NamespaceDefault, buf); err != nil { + if _, err := AddDeploymentKeyToReplicationController(rc, clientset.Core(), clientset.Core(), "dk", "hash", metav1.NamespaceDefault, buf); err != nil { t.Errorf("unexpected error: %v", err) } for _, pod := range podList.Items { @@ -1625,7 +1625,7 @@ func TestRollingUpdater_readyPods(t *testing.T) { } return &api.Pod{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: fmt.Sprintf("pod-%d", count), Labels: labels, }, diff --git a/pkg/kubectl/scale.go b/pkg/kubectl/scale.go index f1f2d86ab82..ebacf34ca22 100644 --- a/pkg/kubectl/scale.go +++ b/pkg/kubectl/scale.go @@ -224,8 +224,8 @@ func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize return err } if !checkRC(currentRC) { - watchOptions := api.ListOptions{ - FieldSelector: fields.OneTermEqualSelector("metadata.name", name), + watchOptions := metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(), ResourceVersion: updatedResourceVersion, } watcher, err := scaler.c.ReplicationControllers(namespace).Watch(watchOptions) diff --git a/pkg/kubectl/scale_test.go b/pkg/kubectl/scale_test.go index 14871b5c9a5..6ea2b554244 100644 --- a/pkg/kubectl/scale_test.go +++ b/pkg/kubectl/scale_test.go @@ -68,7 +68,7 @@ func TestReplicationControllerScaleRetry(t *testing.T) { preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo-v1" - namespace := api.NamespaceDefault + namespace := metav1.NamespaceDefault scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count, nil) pass, err := scaleFunc() @@ -127,7 +127,7 @@ func TestReplicationControllerScale(t *testing.T) { func TestReplicationControllerScaleFailsPreconditions(t *testing.T) { fake := fake.NewSimpleClientset(&api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "foo"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "foo"}, Spec: api.ReplicationControllerSpec{ Replicas: 10, }, @@ -328,7 +328,7 @@ func TestJobScaleRetry(t *testing.T) { func job() *batch.Job { return &batch.Job{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "foo", }, } @@ -377,7 +377,7 @@ func TestJobScaleFailsPreconditions(t *testing.T) { ten := int32(10) fake := fake.NewSimpleClientset(&batch.Job{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "foo", }, Spec: batch.JobSpec{ @@ -590,7 +590,7 @@ func TestDeploymentScaleRetry(t *testing.T) { func deployment() *extensions.Deployment { return &extensions.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "foo", }, } @@ -638,7 +638,7 @@ func TestDeploymentScaleInvalid(t *testing.T) { func TestDeploymentScaleFailsPreconditions(t *testing.T) { fake := fake.NewSimpleClientset(&extensions.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "foo", }, Spec: extensions.DeploymentSpec{ diff --git a/pkg/kubectl/stop.go b/pkg/kubectl/stop.go index 6fc1418ee97..ab14c3f96aa 100644 --- a/pkg/kubectl/stop.go +++ b/pkg/kubectl/stop.go @@ -142,7 +142,7 @@ type objInterface interface { // getOverlappingControllers finds rcs that this controller overlaps, as well as rcs overlapping this controller. func getOverlappingControllers(rcClient coreclient.ReplicationControllerInterface, rc *api.ReplicationController) ([]api.ReplicationController, error) { - rcs, err := rcClient.List(api.ListOptions{}) + rcs, err := rcClient.List(metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("error getting replication controllers: %v", err) } @@ -346,7 +346,7 @@ func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Durat // StatefulSet should track generation number. pods := reaper.podClient.Pods(namespace) selector, _ := metav1.LabelSelectorAsSelector(ps.Spec.Selector) - options := api.ListOptions{LabelSelector: selector} + options := metav1.ListOptions{LabelSelector: selector.String()} podList, err := pods.List(options) if err != nil { return err @@ -391,7 +391,7 @@ func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gra } // at this point only dead pods are left, that should be removed selector, _ := metav1.LabelSelectorAsSelector(job.Spec.Selector) - options := api.ListOptions{LabelSelector: selector} + options := metav1.ListOptions{LabelSelector: selector.String()} podList, err := pods.List(options) if err != nil { return err @@ -446,7 +446,7 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati return err } - options := api.ListOptions{LabelSelector: selector} + options := metav1.ListOptions{LabelSelector: selector.String()} rsList, err := replicaSets.List(options) if err != nil { return err diff --git a/pkg/kubectl/stop_test.go b/pkg/kubectl/stop_test.go index 6c23ab246bc..dcaf8d6bbc2 100644 --- a/pkg/kubectl/stop_test.go +++ b/pkg/kubectl/stop_test.go @@ -570,11 +570,11 @@ func (c *reaperCoreFake) Services(namespace string) coreclient.ServiceInterface } func pod() *api.Pod { - return &api.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "foo"}} + return &api.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "foo"}} } func service() *api.Service { - return &api.Service{ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "foo"}} + return &api.Service{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "foo"}} } func TestSimpleStop(t *testing.T) { @@ -591,8 +591,8 @@ func TestSimpleStop(t *testing.T) { }, kind: api.Kind("Pod"), actions: []testcore.Action{ - testcore.NewGetAction(api.Resource("pods").WithVersion(""), api.NamespaceDefault, "foo"), - testcore.NewDeleteAction(api.Resource("pods").WithVersion(""), api.NamespaceDefault, "foo"), + testcore.NewGetAction(api.Resource("pods").WithVersion(""), metav1.NamespaceDefault, "foo"), + testcore.NewDeleteAction(api.Resource("pods").WithVersion(""), metav1.NamespaceDefault, "foo"), }, expectError: false, test: "stop pod succeeds", @@ -603,8 +603,8 @@ func TestSimpleStop(t *testing.T) { }, kind: api.Kind("Service"), actions: []testcore.Action{ - testcore.NewGetAction(api.Resource("services").WithVersion(""), api.NamespaceDefault, "foo"), - testcore.NewDeleteAction(api.Resource("services").WithVersion(""), api.NamespaceDefault, "foo"), + testcore.NewGetAction(api.Resource("services").WithVersion(""), metav1.NamespaceDefault, "foo"), + testcore.NewDeleteAction(api.Resource("services").WithVersion(""), metav1.NamespaceDefault, "foo"), }, expectError: false, test: "stop service succeeds", @@ -626,7 +626,7 @@ func TestSimpleStop(t *testing.T) { }, kind: api.Kind("Service"), actions: []testcore.Action{ - testcore.NewGetAction(api.Resource("services").WithVersion(""), api.NamespaceDefault, "foo"), + testcore.NewGetAction(api.Resource("services").WithVersion(""), metav1.NamespaceDefault, "foo"), }, expectError: true, test: "stop service fails, can't delete", diff --git a/pkg/kubelet/config/apiserver.go b/pkg/kubelet/config/apiserver.go index 1219738e1dd..e35e595bc1b 100644 --- a/pkg/kubelet/config/apiserver.go +++ b/pkg/kubelet/config/apiserver.go @@ -18,6 +18,7 @@ limitations under the License. package config import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/api" @@ -29,7 +30,7 @@ import ( // NewSourceApiserver creates a config source that watches and pulls from the apiserver. func NewSourceApiserver(c *clientset.Clientset, nodeName types.NodeName, updates chan<- interface{}) { - lw := cache.NewListWatchFromClient(c.Core().RESTClient(), "pods", v1.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName))) + lw := cache.NewListWatchFromClient(c.Core().RESTClient(), "pods", metav1.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName))) newSourceApiserverFromLW(lw, updates) } diff --git a/pkg/kubelet/config/apiserver_test.go b/pkg/kubelet/config/apiserver_test.go index 17c4054f7d2..55d8137bab7 100644 --- a/pkg/kubelet/config/apiserver_test.go +++ b/pkg/kubelet/config/apiserver_test.go @@ -33,11 +33,11 @@ type fakePodLW struct { watchResp watch.Interface } -func (lw fakePodLW) List(options v1.ListOptions) (runtime.Object, error) { +func (lw fakePodLW) List(options metav1.ListOptions) (runtime.Object, error) { return lw.listResp, nil } -func (lw fakePodLW) Watch(options v1.ListOptions) (watch.Interface, error) { +func (lw fakePodLW) Watch(options metav1.ListOptions) (watch.Interface, error) { return lw.watchResp, nil } diff --git a/pkg/kubelet/config/common.go b/pkg/kubelet/config/common.go index 82c499a737f..d53751bf3ad 100644 --- a/pkg/kubelet/config/common.go +++ b/pkg/kubelet/config/common.go @@ -22,6 +22,7 @@ import ( "encoding/hex" "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilyaml "k8s.io/apimachinery/pkg/util/yaml" @@ -57,7 +58,7 @@ func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName types.Node glog.V(5).Infof("Generated Name %q for UID %q from URL %s", pod.Name, pod.UID, source) if pod.Namespace == "" { - pod.Namespace = kubetypes.NamespaceDefault + pod.Namespace = metav1.NamespaceDefault } glog.V(5).Infof("Using namespace %q for pod %q from %s", pod.Namespace, pod.Name, source) @@ -80,7 +81,7 @@ func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName types.Node func getSelfLink(name, namespace string) string { var selfLink string if len(namespace) == 0 { - namespace = api.NamespaceDefault + namespace = metav1.NamespaceDefault } selfLink = fmt.Sprintf("/api/"+api.Registry.GroupOrDie(api.GroupName).GroupVersion.Version+"/pods/namespaces/%s/%s", name, namespace) return selfLink diff --git a/pkg/kubelet/config/http_test.go b/pkg/kubelet/config/http_test.go index 8c38bddf6ff..72b8c155e6a 100644 --- a/pkg/kubelet/config/http_test.go +++ b/pkg/kubelet/config/http_test.go @@ -232,7 +232,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { Name: "foo" + "-" + nodeName, Namespace: "default", Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "111"}, - SelfLink: getSelfLink("foo-"+nodeName, kubetypes.NamespaceDefault), + SelfLink: getSelfLink("foo-"+nodeName, metav1.NamespaceDefault), }, Spec: v1.PodSpec{ NodeName: nodeName, @@ -260,7 +260,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { Name: "bar" + "-" + nodeName, Namespace: "default", Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "222"}, - SelfLink: getSelfLink("bar-"+nodeName, kubetypes.NamespaceDefault), + SelfLink: getSelfLink("bar-"+nodeName, metav1.NamespaceDefault), }, Spec: v1.PodSpec{ NodeName: nodeName, diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index d114eea1504..86b8255ffbc 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -34,6 +34,7 @@ import ( clientgoclientset "k8s.io/client-go/kubernetes" cadvisorapi "github.com/google/cadvisor/info/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" @@ -381,7 +382,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub serviceStore := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) if kubeClient != nil { - serviceLW := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "services", v1.NamespaceAll, fields.Everything()) + serviceLW := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "services", metav1.NamespaceAll, fields.Everything()) cache.NewReflector(serviceLW, &v1.Service{}, serviceStore, 0).Run() } serviceLister := &listers.StoreToServiceLister{Indexer: serviceStore} @@ -389,7 +390,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub nodeStore := cache.NewStore(cache.MetaNamespaceKeyFunc) if kubeClient != nil { fieldSelector := fields.Set{api.ObjectNameField: string(nodeName)}.AsSelector() - nodeLW := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "nodes", v1.NamespaceAll, fieldSelector) + nodeLW := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "nodes", metav1.NamespaceAll, fieldSelector) cache.NewReflector(nodeLW, &v1.Node{}, nodeStore, 0).Run() } nodeLister := &listers.StoreToNodeLister{Store: nodeStore} diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index ea73e8125aa..a1c0778c547 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -258,7 +258,7 @@ func buildService(name, namespace, clusterIP, protocol string, port int) *v1.Ser func TestMakeEnvironmentVariables(t *testing.T) { services := []*v1.Service{ - buildService("kubernetes", v1.NamespaceDefault, "1.2.3.1", "TCP", 8081), + buildService("kubernetes", metav1.NamespaceDefault, "1.2.3.1", "TCP", 8081), buildService("test", "test1", "1.2.3.3", "TCP", 8083), buildService("kubernetes", "test2", "1.2.3.4", "TCP", 8084), buildService("test", "test2", "1.2.3.5", "TCP", 8085), @@ -296,7 +296,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, }, }, - masterServiceNs: v1.NamespaceDefault, + masterServiceNs: metav1.NamespaceDefault, nilLister: false, expectedEnvs: []kubecontainer.EnvVar{ {Name: "FOO", Value: "BAR"}, @@ -331,7 +331,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, }, }, - masterServiceNs: v1.NamespaceDefault, + masterServiceNs: metav1.NamespaceDefault, nilLister: true, expectedEnvs: []kubecontainer.EnvVar{ {Name: "FOO", Value: "BAR"}, @@ -352,7 +352,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { {Name: "FOO", Value: "BAZ"}, }, }, - masterServiceNs: v1.NamespaceDefault, + masterServiceNs: metav1.NamespaceDefault, nilLister: false, expectedEnvs: []kubecontainer.EnvVar{ {Name: "FOO", Value: "BAZ"}, diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 0ffcd416d6d..2b1b226c73d 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -159,7 +159,7 @@ func newTestKubeletWithImageList( t.Fatalf("can't mkdir(%q): %v", kubelet.rootDirectory, err) } kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return true }) - kubelet.masterServiceNamespace = v1.NamespaceDefault + kubelet.masterServiceNamespace = metav1.NamespaceDefault kubelet.serviceLister = testServiceLister{} kubelet.nodeLister = testNodeLister{} kubelet.nodeInfo = testNodeInfo{} diff --git a/pkg/kubelet/network/hostport/hostport_test.go b/pkg/kubelet/network/hostport/hostport_test.go index 4c131c05349..84932305d01 100644 --- a/pkg/kubelet/network/hostport/hostport_test.go +++ b/pkg/kubelet/network/hostport/hostport_test.go @@ -72,7 +72,7 @@ func TestOpenPodHostports(t *testing.T) { &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "test-pod", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: v1.PodSpec{ Containers: []v1.Container{{ @@ -127,7 +127,7 @@ func TestOpenPodHostports(t *testing.T) { &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "another-test-pod", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: v1.PodSpec{ Containers: []v1.Container{{ diff --git a/pkg/kubelet/pod/pod_manager_test.go b/pkg/kubelet/pod/pod_manager_test.go index f87a0836dc4..06dab024b5d 100644 --- a/pkg/kubelet/pod/pod_manager_test.go +++ b/pkg/kubelet/pod/pod_manager_test.go @@ -118,7 +118,7 @@ func TestDeletePods(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ UID: types.UID("mirror-pod-uid"), Name: "mirror-static-pod-name", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Annotations: map[string]string{ kubetypes.ConfigSourceAnnotationKey: "api", kubetypes.ConfigMirrorAnnotationKey: "mirror", @@ -129,7 +129,7 @@ func TestDeletePods(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ UID: types.UID("static-pod-uid"), Name: "mirror-static-pod-name", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Annotations: map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"}, }, } @@ -139,7 +139,7 @@ func TestDeletePods(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ UID: types.UID("extra-pod-uid"), Name: "extra-pod-name", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Annotations: map[string]string{kubetypes.ConfigSourceAnnotationKey: "api"}, }, }, diff --git a/pkg/kubelet/server/server_test.go b/pkg/kubelet/server/server_test.go index 9b95c9f7baf..15f17763e93 100644 --- a/pkg/kubelet/server/server_test.go +++ b/pkg/kubelet/server/server_test.go @@ -52,7 +52,6 @@ import ( kubecontainertesting "k8s.io/kubernetes/pkg/kubelet/container/testing" "k8s.io/kubernetes/pkg/kubelet/server/remotecommand" "k8s.io/kubernetes/pkg/kubelet/server/stats" - kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/util/httpstream" "k8s.io/kubernetes/pkg/util/httpstream/spdy" "k8s.io/kubernetes/pkg/util/term" @@ -265,7 +264,7 @@ func readResp(resp *http.Response) (string, error) { // A helper function to return the correct pod name. func getPodName(name, namespace string) string { if namespace == "" { - namespace = kubetypes.NamespaceDefault + namespace = metav1.NamespaceDefault } return name + "_" + namespace } diff --git a/pkg/kubelet/server/stats/handler.go b/pkg/kubelet/server/stats/handler.go index 43ddf2b2e08..117f739bebb 100644 --- a/pkg/kubelet/server/stats/handler.go +++ b/pkg/kubelet/server/stats/handler.go @@ -29,6 +29,7 @@ import ( cadvisorapiv2 "github.com/google/cadvisor/info/v2" "github.com/emicklei/go-restful" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubelet/cm" @@ -197,7 +198,7 @@ func (h *handler) handlePodContainer(request *restful.Request, response *restful // Default parameters. params := map[string]string{ - "namespace": v1.NamespaceDefault, + "namespace": metav1.NamespaceDefault, "uid": "", } for k, v := range request.PathParameters() { diff --git a/pkg/kubelet/types/pod_update.go b/pkg/kubelet/types/pod_update.go index 2d8f20a5179..c91077c1c23 100644 --- a/pkg/kubelet/types/pod_update.go +++ b/pkg/kubelet/types/pod_update.go @@ -19,6 +19,7 @@ package types import ( "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api/v1" ) @@ -64,7 +65,7 @@ const ( // Updates from all sources AllSource = "*" - NamespaceDefault = v1.NamespaceDefault + NamespaceDefault = metav1.NamespaceDefault ) // PodUpdate defines an operation sent on the channel. You can add or remove single services by diff --git a/pkg/kubelet/util/csr/csr.go b/pkg/kubelet/util/csr/csr.go index 8390e796ce5..4d0a38dab37 100644 --- a/pkg/kubelet/util/csr/csr.go +++ b/pkg/kubelet/util/csr/csr.go @@ -25,7 +25,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" certutil "k8s.io/client-go/pkg/util/cert" - "k8s.io/kubernetes/pkg/api/v1" certificates "k8s.io/kubernetes/pkg/apis/certificates/v1beta1" certificatesclient "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1" ) diff --git a/pkg/kubemark/hollow_kubelet.go b/pkg/kubemark/hollow_kubelet.go index 949895968d3..c80386c5506 100644 --- a/pkg/kubemark/hollow_kubelet.go +++ b/pkg/kubemark/hollow_kubelet.go @@ -19,6 +19,7 @@ package kubemark import ( "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeletapp "k8s.io/kubernetes/cmd/kubelet/app" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/componentconfig" @@ -116,7 +117,7 @@ func GetHollowKubeletConfig( c.Address = "0.0.0.0" /* bind address */ c.Port = int32(kubeletPort) c.ReadOnlyPort = int32(kubeletReadOnlyPort) - c.MasterServiceNamespace = api.NamespaceDefault + c.MasterServiceNamespace = metav1.NamespaceDefault c.PodManifestPath = manifestFilePath c.FileCheckFrequency.Duration = 20 * time.Second c.HTTPCheckFrequency.Duration = 20 * time.Second diff --git a/pkg/master/controller.go b/pkg/master/controller.go index 631dea6e204..51cdb02c3f3 100644 --- a/pkg/master/controller.go +++ b/pkg/master/controller.go @@ -84,7 +84,7 @@ func (c *Config) NewBootstrapController(legacyRESTStorage corerest.LegacyRESTSto EndpointReconciler: c.EndpointReconcilerConfig.Reconciler, EndpointInterval: c.EndpointReconcilerConfig.Interval, - SystemNamespaces: []string{api.NamespaceSystem}, + SystemNamespaces: []string{metav1.NamespaceSystem}, SystemNamespacesInterval: 1 * time.Minute, ServiceClusterIPRegistry: legacyRESTStorage.ServiceClusterIPAllocator, @@ -169,7 +169,7 @@ func (c *Controller) UpdateKubernetesService(reconcile bool) error { // TODO: when it becomes possible to change this stuff, // stop polling and start watching. // TODO: add endpoints of all replicas, not just the elected master. - if err := c.CreateNamespaceIfNeeded(api.NamespaceDefault); err != nil { + if err := c.CreateNamespaceIfNeeded(metav1.NamespaceDefault); err != nil { return err } @@ -238,12 +238,12 @@ func createEndpointPortSpec(endpointPort int, endpointPortName string, extraEndp // CreateMasterServiceIfNeeded will create the specified service if it // doesn't already exist. func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, serviceIP net.IP, servicePorts []api.ServicePort, serviceType api.ServiceType, reconcile bool) error { - if s, err := c.ServiceClient.Services(api.NamespaceDefault).Get(serviceName, metav1.GetOptions{}); err == nil { + if s, err := c.ServiceClient.Services(metav1.NamespaceDefault).Get(serviceName, metav1.GetOptions{}); err == nil { // The service already exists. if reconcile { if svc, updated := getMasterServiceUpdateIfNeeded(s, servicePorts, serviceType); updated { glog.Warningf("Resetting master service %q to %#v", serviceName, svc) - _, err := c.ServiceClient.Services(api.NamespaceDefault).Update(svc) + _, err := c.ServiceClient.Services(metav1.NamespaceDefault).Update(svc) return err } } @@ -252,7 +252,7 @@ func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, ser svc := &api.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Labels: map[string]string{"provider": "kubernetes", "component": "apiserver"}, }, Spec: api.ServiceSpec{ @@ -265,7 +265,7 @@ func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, ser }, } - _, err := c.ServiceClient.Services(api.NamespaceDefault).Create(svc) + _, err := c.ServiceClient.Services(metav1.NamespaceDefault).Create(svc) if errors.IsAlreadyExists(err) { return c.CreateOrUpdateMasterServiceIfNeeded(serviceName, serviceIP, servicePorts, serviceType, reconcile) } @@ -318,12 +318,12 @@ func NewMasterCountEndpointReconciler(masterCount int, endpointClient coreclient // to be running (c.masterCount). // * ReconcileEndpoints is called periodically from all apiservers. func (r *masterCountEndpointReconciler) ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []api.EndpointPort, reconcilePorts bool) error { - e, err := r.endpointClient.Endpoints(api.NamespaceDefault).Get(serviceName, metav1.GetOptions{}) + e, err := r.endpointClient.Endpoints(metav1.NamespaceDefault).Get(serviceName, metav1.GetOptions{}) if err != nil { e = &api.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, } } @@ -333,7 +333,7 @@ func (r *masterCountEndpointReconciler) ReconcileEndpoints(serviceName string, i Addresses: []api.EndpointAddress{{IP: ip.String()}}, Ports: endpointPorts, }} - _, err = r.endpointClient.Endpoints(api.NamespaceDefault).Create(e) + _, err = r.endpointClient.Endpoints(metav1.NamespaceDefault).Create(e) return err } @@ -347,7 +347,7 @@ func (r *masterCountEndpointReconciler) ReconcileEndpoints(serviceName string, i Ports: endpointPorts, }} glog.Warningf("Resetting endpoints for master service %q to %#v", serviceName, e) - _, err = r.endpointClient.Endpoints(api.NamespaceDefault).Update(e) + _, err = r.endpointClient.Endpoints(metav1.NamespaceDefault).Update(e) return err } if ipCorrect && portsCorrect { @@ -383,7 +383,7 @@ func (r *masterCountEndpointReconciler) ReconcileEndpoints(serviceName string, i e.Subsets[0].Ports = endpointPorts } glog.Warningf("Resetting endpoints for master service %q to %v", serviceName, e) - _, err = r.endpointClient.Endpoints(api.NamespaceDefault).Update(e) + _, err = r.endpointClient.Endpoints(metav1.NamespaceDefault).Update(e) return err } diff --git a/pkg/master/controller_test.go b/pkg/master/controller_test.go index e6731465196..1266b9c22c9 100644 --- a/pkg/master/controller_test.go +++ b/pkg/master/controller_test.go @@ -29,7 +29,7 @@ import ( ) func TestReconcileEndpoints(t *testing.T) { - ns := api.NamespaceDefault + ns := metav1.NamespaceDefault om := func(name string) metav1.ObjectMeta { return metav1.ObjectMeta{Namespace: ns, Name: name} } @@ -542,7 +542,7 @@ func TestReconcileEndpoints(t *testing.T) { } func TestCreateOrUpdateMasterService(t *testing.T) { - ns := api.NamespaceDefault + ns := metav1.NamespaceDefault om := func(name string) metav1.ObjectMeta { return metav1.ObjectMeta{Namespace: ns, Name: name} } diff --git a/pkg/master/master.go b/pkg/master/master.go index dd3797bbc15..d0ded1e672f 100644 --- a/pkg/master/master.go +++ b/pkg/master/master.go @@ -24,6 +24,7 @@ import ( "strconv" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apiserver/pkg/server/healthz" @@ -368,7 +369,7 @@ func (n nodeAddressProvider) externalAddresses() ([]string, error) { apiv1.NodeExternalIP, apiv1.NodeLegacyHostIP, } - nodes, err := n.nodeClient.List(apiv1.ListOptions{}) + nodes, err := n.nodeClient.List(metav1.ListOptions{}) if err != nil { return nil, err } diff --git a/pkg/master/master_test.go b/pkg/master/master_test.go index f4fa55448d7..12f443ad549 100644 --- a/pkg/master/master_test.go +++ b/pkg/master/master_test.go @@ -188,14 +188,14 @@ func TestGetNodeAddresses(t *testing.T) { addressProvider := nodeAddressProvider{fakeNodeClient} // Fail case (no addresses associated with nodes) - nodes, _ := fakeNodeClient.List(apiv1.ListOptions{}) + nodes, _ := fakeNodeClient.List(metav1.ListOptions{}) addrs, err := addressProvider.externalAddresses() assert.Error(err, "addresses should have caused an error as there are no addresses.") assert.Equal([]string(nil), addrs) // Pass case with External type IP - nodes, _ = fakeNodeClient.List(apiv1.ListOptions{}) + nodes, _ = fakeNodeClient.List(metav1.ListOptions{}) for index := range nodes.Items { nodes.Items[index].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeExternalIP, Address: "127.0.0.1"}} fakeNodeClient.Update(&nodes.Items[index]) @@ -205,7 +205,7 @@ func TestGetNodeAddresses(t *testing.T) { assert.Equal([]string{"127.0.0.1", "127.0.0.1"}, addrs) // Pass case with LegacyHost type IP - nodes, _ = fakeNodeClient.List(apiv1.ListOptions{}) + nodes, _ = fakeNodeClient.List(metav1.ListOptions{}) for index := range nodes.Items { nodes.Items[index].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeLegacyHostIP, Address: "127.0.0.2"}} fakeNodeClient.Update(&nodes.Items[index]) diff --git a/pkg/metrics/BUILD b/pkg/metrics/BUILD index e535d2f3148..78c3c4a6b00 100644 --- a/pkg/metrics/BUILD +++ b/pkg/metrics/BUILD @@ -20,13 +20,13 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/master/ports:go_default_library", "//pkg/util/system:go_default_library", "//vendor:github.com/golang/glog", "//vendor:github.com/prometheus/common/expfmt", "//vendor:github.com/prometheus/common/model", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", ], ) diff --git a/pkg/metrics/metrics_grabber.go b/pkg/metrics/metrics_grabber.go index 6095963e855..0686d092d71 100644 --- a/pkg/metrics/metrics_grabber.go +++ b/pkg/metrics/metrics_grabber.go @@ -20,9 +20,9 @@ import ( "fmt" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/util/system" @@ -54,7 +54,7 @@ type MetricsGrabber struct { func NewMetricsGrabber(c clientset.Interface, kubelets bool, scheduler bool, controllers bool, apiServer bool) (*MetricsGrabber, error) { registeredMaster := false masterName := "" - nodeList, err := c.Core().Nodes().List(v1.ListOptions{}) + nodeList, err := c.Core().Nodes().List(metav1.ListOptions{}) if err != nil { return nil, err } @@ -86,7 +86,7 @@ func NewMetricsGrabber(c clientset.Interface, kubelets bool, scheduler bool, con } func (g *MetricsGrabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error) { - nodes, err := g.client.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{api.ObjectNameField: nodeName}.AsSelector().String()}) + nodes, err := g.client.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{api.ObjectNameField: nodeName}.AsSelector().String()}) if err != nil { return KubeletMetrics{}, err } @@ -112,7 +112,7 @@ func (g *MetricsGrabber) GrabFromScheduler() (SchedulerMetrics, error) { if !g.registeredMaster { return SchedulerMetrics{}, fmt.Errorf("Master's Kubelet is not registered. Skipping Scheduler's metrics gathering.") } - output, err := g.getMetricsFromPod(fmt.Sprintf("%v-%v", "kube-scheduler", g.masterName), api.NamespaceSystem, ports.SchedulerPort) + output, err := g.getMetricsFromPod(fmt.Sprintf("%v-%v", "kube-scheduler", g.masterName), metav1.NamespaceSystem, ports.SchedulerPort) if err != nil { return SchedulerMetrics{}, err } @@ -123,7 +123,7 @@ func (g *MetricsGrabber) GrabFromControllerManager() (ControllerManagerMetrics, if !g.registeredMaster { return ControllerManagerMetrics{}, fmt.Errorf("Master's Kubelet is not registered. Skipping ControllerManager's metrics gathering.") } - output, err := g.getMetricsFromPod(fmt.Sprintf("%v-%v", "kube-controller-manager", g.masterName), api.NamespaceSystem, ports.ControllerManagerPort) + output, err := g.getMetricsFromPod(fmt.Sprintf("%v-%v", "kube-controller-manager", g.masterName), metav1.NamespaceSystem, ports.ControllerManagerPort) if err != nil { return ControllerManagerMetrics{}, err } @@ -167,7 +167,7 @@ func (g *MetricsGrabber) Grab() (MetricsCollection, error) { } if g.grabFromKubelets { result.KubeletMetrics = make(map[string]KubeletMetrics) - nodes, err := g.client.Core().Nodes().List(v1.ListOptions{}) + nodes, err := g.client.Core().Nodes().List(metav1.ListOptions{}) if err != nil { errs = append(errs, err) } else { diff --git a/pkg/proxy/config/BUILD b/pkg/proxy/config/BUILD index 108a8cdc047..650fc1b20e2 100644 --- a/pkg/proxy/config/BUILD +++ b/pkg/proxy/config/BUILD @@ -34,7 +34,6 @@ go_test( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/client/cache:go_default_library", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", diff --git a/pkg/proxy/config/api.go b/pkg/proxy/config/api.go index 38b3eb14599..9c55e1931bf 100644 --- a/pkg/proxy/config/api.go +++ b/pkg/proxy/config/api.go @@ -19,6 +19,7 @@ package config import ( "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/cache" @@ -26,10 +27,10 @@ import ( // NewSourceAPI creates config source that watches for changes to the services and endpoints. func NewSourceAPI(c cache.Getter, period time.Duration, servicesChan chan<- ServiceUpdate, endpointsChan chan<- EndpointsUpdate) { - servicesLW := cache.NewListWatchFromClient(c, "services", api.NamespaceAll, fields.Everything()) + servicesLW := cache.NewListWatchFromClient(c, "services", metav1.NamespaceAll, fields.Everything()) cache.NewReflector(servicesLW, &api.Service{}, NewServiceStore(nil, servicesChan), period).Run() - endpointsLW := cache.NewListWatchFromClient(c, "endpoints", api.NamespaceAll, fields.Everything()) + endpointsLW := cache.NewListWatchFromClient(c, "endpoints", metav1.NamespaceAll, fields.Everything()) cache.NewReflector(endpointsLW, &api.Endpoints{}, NewEndpointsStore(nil, endpointsChan), period).Run() } diff --git a/pkg/proxy/config/api_test.go b/pkg/proxy/config/api_test.go index f0f704f2ff1..b55917bc990 100644 --- a/pkg/proxy/config/api_test.go +++ b/pkg/proxy/config/api_test.go @@ -24,7 +24,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/cache" ) @@ -33,11 +32,11 @@ type fakeLW struct { watchResp watch.Interface } -func (lw fakeLW) List(options v1.ListOptions) (runtime.Object, error) { +func (lw fakeLW) List(options metav1.ListOptions) (runtime.Object, error) { return lw.listResp, nil } -func (lw fakeLW) Watch(options v1.ListOptions) (watch.Interface, error) { +func (lw fakeLW) Watch(options metav1.ListOptions) (watch.Interface, error) { return lw.watchResp, nil } diff --git a/pkg/quota/evaluator/core/BUILD b/pkg/quota/evaluator/core/BUILD index 84e618af9dd..79884affd14 100644 --- a/pkg/quota/evaluator/core/BUILD +++ b/pkg/quota/evaluator/core/BUILD @@ -33,6 +33,7 @@ go_library( "//pkg/kubelet/qos:go_default_library", "//pkg/quota:go_default_library", "//pkg/quota/generic:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/runtime/schema", "//vendor:k8s.io/apimachinery/pkg/util/sets", diff --git a/pkg/quota/evaluator/core/configmap.go b/pkg/quota/evaluator/core/configmap.go index ecb9e952d27..edb2841c688 100644 --- a/pkg/quota/evaluator/core/configmap.go +++ b/pkg/quota/evaluator/core/configmap.go @@ -17,9 +17,9 @@ limitations under the License. package core import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/quota" "k8s.io/kubernetes/pkg/quota/generic" @@ -31,7 +31,7 @@ func NewConfigMapEvaluator(kubeClient clientset.Interface) quota.Evaluator { AllowCreateOnUpdate: false, InternalGroupKind: api.Kind("ConfigMap"), ResourceName: api.ResourceConfigMaps, - ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) { + ListFuncByNamespace: func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { itemList, err := kubeClient.Core().ConfigMaps(namespace).List(options) if err != nil { return nil, err diff --git a/pkg/quota/evaluator/core/persistent_volume_claims.go b/pkg/quota/evaluator/core/persistent_volume_claims.go index d26fa47e88b..12e399dfa2c 100644 --- a/pkg/quota/evaluator/core/persistent_volume_claims.go +++ b/pkg/quota/evaluator/core/persistent_volume_claims.go @@ -20,6 +20,7 @@ import ( "fmt" "strings" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" @@ -64,7 +65,7 @@ func listPersistentVolumeClaimsByNamespaceFuncUsingClient(kubeClient clientset.I // TODO: ideally, we could pass dynamic client pool down into this code, and have one way of doing this. // unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require // structured objects. - return func(namespace string, options v1.ListOptions) ([]runtime.Object, error) { + return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { itemList, err := kubeClient.Core().PersistentVolumeClaims(namespace).List(options) if err != nil { return nil, err diff --git a/pkg/quota/evaluator/core/pods.go b/pkg/quota/evaluator/core/pods.go index 9d3475ab1f7..e2940982740 100644 --- a/pkg/quota/evaluator/core/pods.go +++ b/pkg/quota/evaluator/core/pods.go @@ -20,6 +20,7 @@ import ( "fmt" "strings" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" @@ -52,7 +53,7 @@ func listPodsByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic. // TODO: ideally, we could pass dynamic client pool down into this code, and have one way of doing this. // unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require // structured objects. - return func(namespace string, options v1.ListOptions) ([]runtime.Object, error) { + return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { itemList, err := kubeClient.Core().Pods(namespace).List(options) if err != nil { return nil, err diff --git a/pkg/quota/evaluator/core/replication_controllers.go b/pkg/quota/evaluator/core/replication_controllers.go index 29521e83fad..e13e5fe32ab 100644 --- a/pkg/quota/evaluator/core/replication_controllers.go +++ b/pkg/quota/evaluator/core/replication_controllers.go @@ -17,9 +17,9 @@ limitations under the License. package core import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/quota" "k8s.io/kubernetes/pkg/quota/generic" @@ -31,7 +31,7 @@ func NewReplicationControllerEvaluator(kubeClient clientset.Interface) quota.Eva AllowCreateOnUpdate: false, InternalGroupKind: api.Kind("ReplicationController"), ResourceName: api.ResourceReplicationControllers, - ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) { + ListFuncByNamespace: func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { itemList, err := kubeClient.Core().ReplicationControllers(namespace).List(options) if err != nil { return nil, err diff --git a/pkg/quota/evaluator/core/resource_quotas.go b/pkg/quota/evaluator/core/resource_quotas.go index bc9133a9772..3eb9ba6c136 100644 --- a/pkg/quota/evaluator/core/resource_quotas.go +++ b/pkg/quota/evaluator/core/resource_quotas.go @@ -17,9 +17,9 @@ limitations under the License. package core import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/quota" "k8s.io/kubernetes/pkg/quota/generic" @@ -31,7 +31,7 @@ func NewResourceQuotaEvaluator(kubeClient clientset.Interface) quota.Evaluator { AllowCreateOnUpdate: false, InternalGroupKind: api.Kind("ResourceQuota"), ResourceName: api.ResourceQuotas, - ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) { + ListFuncByNamespace: func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { itemList, err := kubeClient.Core().ResourceQuotas(namespace).List(options) if err != nil { return nil, err diff --git a/pkg/quota/evaluator/core/secrets.go b/pkg/quota/evaluator/core/secrets.go index 4b6022e8f22..56dd390726e 100644 --- a/pkg/quota/evaluator/core/secrets.go +++ b/pkg/quota/evaluator/core/secrets.go @@ -17,9 +17,9 @@ limitations under the License. package core import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/quota" "k8s.io/kubernetes/pkg/quota/generic" @@ -31,7 +31,7 @@ func NewSecretEvaluator(kubeClient clientset.Interface) quota.Evaluator { AllowCreateOnUpdate: false, InternalGroupKind: api.Kind("Secret"), ResourceName: api.ResourceSecrets, - ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) { + ListFuncByNamespace: func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { itemList, err := kubeClient.Core().Secrets(namespace).List(options) if err != nil { return nil, err diff --git a/pkg/quota/evaluator/core/services.go b/pkg/quota/evaluator/core/services.go index 0fccb36ad88..2a4aad927eb 100644 --- a/pkg/quota/evaluator/core/services.go +++ b/pkg/quota/evaluator/core/services.go @@ -20,6 +20,7 @@ import ( "fmt" "strings" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" @@ -42,7 +43,7 @@ var serviceResources = []api.ResourceName{ // NewServiceEvaluator returns an evaluator that can evaluate service quotas func NewServiceEvaluator(kubeClient clientset.Interface) quota.Evaluator { return &serviceEvaluator{ - listFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) { + listFuncByNamespace: func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { itemList, err := kubeClient.Core().Services(namespace).List(options) if err != nil { return nil, err diff --git a/pkg/quota/generic/BUILD b/pkg/quota/generic/BUILD index 7fbdf2bf381..f52ac7b34c7 100644 --- a/pkg/quota/generic/BUILD +++ b/pkg/quota/generic/BUILD @@ -17,9 +17,9 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/api/resource:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/controller/informers:go_default_library", "//pkg/quota:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/labels", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/runtime/schema", diff --git a/pkg/quota/generic/evaluator.go b/pkg/quota/generic/evaluator.go index d7fc9e01b0a..6903e369071 100644 --- a/pkg/quota/generic/evaluator.go +++ b/pkg/quota/generic/evaluator.go @@ -19,20 +19,20 @@ package generic import ( "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/quota" ) // ListResourceUsingInformerFunc returns a listing function based on the shared informer factory for the specified resource. func ListResourceUsingInformerFunc(f informers.SharedInformerFactory, groupResource schema.GroupResource) ListFuncByNamespace { - return func(namespace string, options v1.ListOptions) ([]runtime.Object, error) { + return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { labelSelector, err := labels.Parse(options.LabelSelector) if err != nil { return nil, err @@ -46,7 +46,7 @@ func ListResourceUsingInformerFunc(f informers.SharedInformerFactory, groupResou } // ListFuncByNamespace knows how to list resources in a namespace -type ListFuncByNamespace func(namespace string, options v1.ListOptions) ([]runtime.Object, error) +type ListFuncByNamespace func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) // MatchesScopeFunc knows how to evaluate if an object matches a scope type MatchesScopeFunc func(scope api.ResourceQuotaScope, object runtime.Object) (bool, error) @@ -91,7 +91,7 @@ func CalculateUsageStats(options quota.UsageStatsOptions, for _, resourceName := range options.Resources { result.Used[resourceName] = resource.Quantity{Format: resource.DecimalSI} } - items, err := listFunc(options.Namespace, v1.ListOptions{ + items, err := listFunc(options.Namespace, metav1.ListOptions{ LabelSelector: labels.Everything().String(), }) if err != nil { diff --git a/pkg/registry/apps/petset/storage/storage_test.go b/pkg/registry/apps/petset/storage/storage_test.go index 0b07bd1efe6..cbeace275c2 100644 --- a/pkg/registry/apps/petset/storage/storage_test.go +++ b/pkg/registry/apps/petset/storage/storage_test.go @@ -54,7 +54,7 @@ func validNewStatefulSet() *apps.StatefulSet { return &apps.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Labels: map[string]string{"a": "b"}, }, Spec: apps.StatefulSetSpec{ @@ -101,8 +101,8 @@ func TestStatusUpdate(t *testing.T) { storage, statusStorage, server := newStorage(t) defer server.Terminate(t) defer storage.Store.DestroyFunc() - ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), api.NamespaceDefault) - key := "/statefulsets/" + api.NamespaceDefault + "/foo" + ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), metav1.NamespaceDefault) + key := "/statefulsets/" + metav1.NamespaceDefault + "/foo" validStatefulSet := validNewStatefulSet() if err := storage.Storage.Create(ctx, key, validStatefulSet, nil, 0); err != nil { t.Fatalf("unexpected error: %v", err) diff --git a/pkg/registry/apps/petset/strategy_test.go b/pkg/registry/apps/petset/strategy_test.go index f5d7049e107..1690aa8ce96 100644 --- a/pkg/registry/apps/petset/strategy_test.go +++ b/pkg/registry/apps/petset/strategy_test.go @@ -48,7 +48,7 @@ func TestStatefulSetStrategy(t *testing.T) { }, } ps := &apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplate.Template, @@ -110,7 +110,7 @@ func TestStatefulSetStatusStrategy(t *testing.T) { }, } oldPS := &apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault, ResourceVersion: "10"}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault, ResourceVersion: "10"}, Spec: apps.StatefulSetSpec{ Replicas: 3, Selector: &metav1.LabelSelector{MatchLabels: validSelector}, @@ -121,7 +121,7 @@ func TestStatefulSetStatusStrategy(t *testing.T) { }, } newPS := &apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault, ResourceVersion: "9"}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault, ResourceVersion: "9"}, Spec: apps.StatefulSetSpec{ Replicas: 1, Selector: &metav1.LabelSelector{MatchLabels: validSelector}, diff --git a/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage_test.go b/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage_test.go index 0195fbb3e8a..bc07f0d48ae 100644 --- a/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage_test.go +++ b/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage_test.go @@ -20,7 +20,6 @@ import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/autoscaling" // Ensure that autoscaling/v1 package is initialized. "k8s.io/apimachinery/pkg/fields" @@ -49,7 +48,7 @@ func validNewHorizontalPodAutoscaler(name string) *autoscaling.HorizontalPodAuto return &autoscaling.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: autoscaling.HorizontalPodAutoscalerSpec{ ScaleTargetRef: autoscaling.CrossVersionObjectReference{ diff --git a/pkg/registry/batch/cronjob/storage/storage_test.go b/pkg/registry/batch/cronjob/storage/storage_test.go index 939efa03560..58e77dc771a 100644 --- a/pkg/registry/batch/cronjob/storage/storage_test.go +++ b/pkg/registry/batch/cronjob/storage/storage_test.go @@ -44,7 +44,7 @@ func validNewCronJob() *batch.CronJob { return &batch.CronJob{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: batch.CronJobSpec{ Schedule: "* * * * ?", diff --git a/pkg/registry/batch/cronjob/strategy_test.go b/pkg/registry/batch/cronjob/strategy_test.go index 244bcf7aa0b..13733d2bc80 100644 --- a/pkg/registry/batch/cronjob/strategy_test.go +++ b/pkg/registry/batch/cronjob/strategy_test.go @@ -51,7 +51,7 @@ func TestCronJobStrategy(t *testing.T) { scheduledJob := &batch.CronJob{ ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: batch.CronJobSpec{ Schedule: "* * * * ?", @@ -113,7 +113,7 @@ func TestCronJobStatusStrategy(t *testing.T) { oldCronJob := &batch.CronJob{ ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "10", }, Spec: batch.CronJobSpec{ @@ -130,7 +130,7 @@ func TestCronJobStatusStrategy(t *testing.T) { newCronJob := &batch.CronJob{ ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "9", }, Spec: batch.CronJobSpec{ diff --git a/pkg/registry/batch/job/strategy_test.go b/pkg/registry/batch/job/strategy_test.go index 7fd7511e243..590afa0e98f 100644 --- a/pkg/registry/batch/job/strategy_test.go +++ b/pkg/registry/batch/job/strategy_test.go @@ -60,7 +60,7 @@ func TestJobStrategy(t *testing.T) { job := &batch.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "myjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: batch.JobSpec{ Selector: validSelector, @@ -117,7 +117,7 @@ func TestJobStrategyWithGeneration(t *testing.T) { job := &batch.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "myjob2", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: theUID, }, Spec: batch.JobSpec{ @@ -178,7 +178,7 @@ func TestJobStatusStrategy(t *testing.T) { oldJob := &batch.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "myjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "10", }, Spec: batch.JobSpec{ @@ -193,7 +193,7 @@ func TestJobStatusStrategy(t *testing.T) { newJob := &batch.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "myjob", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "9", }, Spec: batch.JobSpec{ diff --git a/pkg/registry/certificates/certificates/BUILD b/pkg/registry/certificates/certificates/BUILD index 30c7d2061f7..d146dd3c580 100644 --- a/pkg/registry/certificates/certificates/BUILD +++ b/pkg/registry/certificates/certificates/BUILD @@ -23,6 +23,7 @@ go_library( "//pkg/genericapiserver/registry/generic:go_default_library", "//pkg/genericapiserver/registry/rest:go_default_library", "//pkg/storage:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/registry/core/componentstatus/BUILD b/pkg/registry/core/componentstatus/BUILD index bb1f391452e..82715dd92a5 100644 --- a/pkg/registry/core/componentstatus/BUILD +++ b/pkg/registry/core/componentstatus/BUILD @@ -20,6 +20,7 @@ go_library( "//pkg/api:go_default_library", "//pkg/probe:go_default_library", "//pkg/probe/http:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/util/net", diff --git a/pkg/registry/core/configmap/BUILD b/pkg/registry/core/configmap/BUILD index 515432bd335..62ff3f1daaf 100644 --- a/pkg/registry/core/configmap/BUILD +++ b/pkg/registry/core/configmap/BUILD @@ -22,6 +22,7 @@ go_library( "//pkg/genericapiserver/registry/generic:go_default_library", "//pkg/genericapiserver/registry/rest:go_default_library", "//pkg/storage:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/registry/core/configmap/strategy_test.go b/pkg/registry/core/configmap/strategy_test.go index 03ff8ae1817..c001f59c420 100644 --- a/pkg/registry/core/configmap/strategy_test.go +++ b/pkg/registry/core/configmap/strategy_test.go @@ -37,7 +37,7 @@ func TestConfigMapStrategy(t *testing.T) { cfg := &api.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "valid-config-data", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Data: map[string]string{ "foo": "bar", @@ -54,7 +54,7 @@ func TestConfigMapStrategy(t *testing.T) { newCfg := &api.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "valid-config-data-2", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, ResourceVersion: "4", }, Data: map[string]string{ diff --git a/pkg/registry/core/controller/BUILD b/pkg/registry/core/controller/BUILD index abb18599a12..982688a326a 100644 --- a/pkg/registry/core/controller/BUILD +++ b/pkg/registry/core/controller/BUILD @@ -22,6 +22,7 @@ go_library( "//pkg/genericapiserver/registry/generic:go_default_library", "//pkg/genericapiserver/registry/rest:go_default_library", "//pkg/storage:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/registry/core/controller/storage/storage_test.go b/pkg/registry/core/controller/storage/storage_test.go index f23934b12e5..35bf53b2132 100644 --- a/pkg/registry/core/controller/storage/storage_test.go +++ b/pkg/registry/core/controller/storage/storage_test.go @@ -35,7 +35,7 @@ import ( ) const ( - namespace = api.NamespaceDefault + namespace = metav1.NamespaceDefault name = "foo" ) diff --git a/pkg/registry/core/controller/strategy_test.go b/pkg/registry/core/controller/strategy_test.go index 51368005e68..cdad6e9b795 100644 --- a/pkg/registry/core/controller/strategy_test.go +++ b/pkg/registry/core/controller/strategy_test.go @@ -49,7 +49,7 @@ func TestControllerStrategy(t *testing.T) { }, } rc := &api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: api.ReplicationControllerSpec{ Selector: validSelector, Template: &validPodTemplate.Template, @@ -107,7 +107,7 @@ func TestControllerStatusStrategy(t *testing.T) { }, } oldController := &api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault, ResourceVersion: "10"}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault, ResourceVersion: "10"}, Spec: api.ReplicationControllerSpec{ Replicas: 3, Selector: validSelector, @@ -119,7 +119,7 @@ func TestControllerStatusStrategy(t *testing.T) { }, } newController := &api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault, ResourceVersion: "9"}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault, ResourceVersion: "9"}, Spec: api.ReplicationControllerSpec{ Replicas: 1, Selector: validSelector, diff --git a/pkg/registry/core/endpoint/BUILD b/pkg/registry/core/endpoint/BUILD index 537cfdf7f91..bf42593f47c 100644 --- a/pkg/registry/core/endpoint/BUILD +++ b/pkg/registry/core/endpoint/BUILD @@ -23,6 +23,7 @@ go_library( "//pkg/genericapiserver/registry/generic:go_default_library", "//pkg/genericapiserver/registry/rest:go_default_library", "//pkg/storage:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/registry/core/endpoint/storage/storage_test.go b/pkg/registry/core/endpoint/storage/storage_test.go index 903f6ae9e0e..1b89cd052d8 100644 --- a/pkg/registry/core/endpoint/storage/storage_test.go +++ b/pkg/registry/core/endpoint/storage/storage_test.go @@ -44,7 +44,7 @@ func validNewEndpoints() *api.Endpoints { return &api.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, diff --git a/pkg/registry/core/limitrange/storage/storage_test.go b/pkg/registry/core/limitrange/storage/storage_test.go index 6cd65fad0d5..0504dbb3210 100644 --- a/pkg/registry/core/limitrange/storage/storage_test.go +++ b/pkg/registry/core/limitrange/storage/storage_test.go @@ -45,7 +45,7 @@ func validNewLimitRange() *api.LimitRange { return &api.LimitRange{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: api.LimitRangeSpec{ Limits: []api.LimitRangeItem{ diff --git a/pkg/registry/core/namespace/BUILD b/pkg/registry/core/namespace/BUILD index 2d5c85a8324..32684371699 100644 --- a/pkg/registry/core/namespace/BUILD +++ b/pkg/registry/core/namespace/BUILD @@ -22,6 +22,7 @@ go_library( "//pkg/genericapiserver/registry/generic:go_default_library", "//pkg/genericapiserver/registry/rest:go_default_library", "//pkg/storage:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/registry/core/node/BUILD b/pkg/registry/core/node/BUILD index 0217e25672e..1e45b54501c 100644 --- a/pkg/registry/core/node/BUILD +++ b/pkg/registry/core/node/BUILD @@ -24,6 +24,7 @@ go_library( "//pkg/kubelet/client:go_default_library", "//pkg/storage:go_default_library", "//vendor:k8s.io/apimachinery/pkg/api/errors", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/registry/core/persistentvolumeclaim/storage/storage_test.go b/pkg/registry/core/persistentvolumeclaim/storage/storage_test.go index fdf168b7c88..d041283431c 100644 --- a/pkg/registry/core/persistentvolumeclaim/storage/storage_test.go +++ b/pkg/registry/core/persistentvolumeclaim/storage/storage_test.go @@ -71,7 +71,7 @@ func TestCreate(t *testing.T) { defer server.Terminate(t) defer storage.Store.DestroyFunc() test := registrytest.New(t, storage.Store) - pv := validNewPersistentVolumeClaim("foo", api.NamespaceDefault) + pv := validNewPersistentVolumeClaim("foo", metav1.NamespaceDefault) pv.ObjectMeta = metav1.ObjectMeta{} test.TestCreate( // valid @@ -90,7 +90,7 @@ func TestUpdate(t *testing.T) { test := registrytest.New(t, storage.Store) test.TestUpdate( // valid - validNewPersistentVolumeClaim("foo", api.NamespaceDefault), + validNewPersistentVolumeClaim("foo", metav1.NamespaceDefault), // updateFunc func(obj runtime.Object) runtime.Object { object := obj.(*api.PersistentVolumeClaim) @@ -105,7 +105,7 @@ func TestDelete(t *testing.T) { defer server.Terminate(t) defer storage.Store.DestroyFunc() test := registrytest.New(t, storage.Store).ReturnDeletedObject() - test.TestDelete(validNewPersistentVolumeClaim("foo", api.NamespaceDefault)) + test.TestDelete(validNewPersistentVolumeClaim("foo", metav1.NamespaceDefault)) } func TestGet(t *testing.T) { @@ -113,7 +113,7 @@ func TestGet(t *testing.T) { defer server.Terminate(t) defer storage.Store.DestroyFunc() test := registrytest.New(t, storage.Store) - test.TestGet(validNewPersistentVolumeClaim("foo", api.NamespaceDefault)) + test.TestGet(validNewPersistentVolumeClaim("foo", metav1.NamespaceDefault)) } func TestList(t *testing.T) { @@ -121,7 +121,7 @@ func TestList(t *testing.T) { defer server.Terminate(t) defer storage.Store.DestroyFunc() test := registrytest.New(t, storage.Store) - test.TestList(validNewPersistentVolumeClaim("foo", api.NamespaceDefault)) + test.TestList(validNewPersistentVolumeClaim("foo", metav1.NamespaceDefault)) } func TestWatch(t *testing.T) { @@ -130,7 +130,7 @@ func TestWatch(t *testing.T) { defer storage.Store.DestroyFunc() test := registrytest.New(t, storage.Store) test.TestWatch( - validNewPersistentVolumeClaim("foo", api.NamespaceDefault), + validNewPersistentVolumeClaim("foo", metav1.NamespaceDefault), // matching labels []labels.Set{}, // not matching labels @@ -156,13 +156,13 @@ func TestUpdateStatus(t *testing.T) { ctx := genericapirequest.NewDefaultContext() key, _ := storage.KeyFunc(ctx, "foo") - pvcStart := validNewPersistentVolumeClaim("foo", api.NamespaceDefault) + pvcStart := validNewPersistentVolumeClaim("foo", metav1.NamespaceDefault) err := storage.Storage.Create(ctx, key, pvcStart, nil, 0) pvc := &api.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: api.PersistentVolumeClaimSpec{ AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, diff --git a/pkg/registry/core/pod/storage/storage_test.go b/pkg/registry/core/pod/storage/storage_test.go index 9449797d9ed..b3ed42f2282 100644 --- a/pkg/registry/core/pod/storage/storage_test.go +++ b/pkg/registry/core/pod/storage/storage_test.go @@ -55,7 +55,7 @@ func validNewPod() *api.Pod { return &api.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyAlways, @@ -168,7 +168,7 @@ func newFailDeleteStorage(t *testing.T, called *bool) (*REST, *etcdtesting.EtcdT func TestIgnoreDeleteNotFound(t *testing.T) { pod := validNewPod() - testContext := genericapirequest.WithNamespace(genericapirequest.NewContext(), api.NamespaceDefault) + testContext := genericapirequest.WithNamespace(genericapirequest.NewContext(), metav1.NamespaceDefault) called := false registry, server := newFailDeleteStorage(t, &called) defer server.Terminate(t) @@ -407,7 +407,7 @@ func TestEtcdCreate(t *testing.T) { // Suddenly, a wild scheduler appears: _, err = bindingStorage.Create(ctx, &api.Binding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "foo"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "foo"}, Target: api.ObjectReference{Name: "machine"}, }) if err != nil { @@ -433,7 +433,7 @@ func TestEtcdCreateBindingNoPod(t *testing.T) { // - Schedule (scheduler) // - Delete (apiserver) _, err := bindingStorage.Create(ctx, &api.Binding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "foo"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "foo"}, Target: api.ObjectReference{Name: "machine"}, }) if err == nil { @@ -478,7 +478,7 @@ func TestEtcdCreateWithContainersNotFound(t *testing.T) { // Suddenly, a wild scheduler appears: _, err = bindingStorage.Create(ctx, &api.Binding{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "foo", Annotations: map[string]string{"label1": "value1"}, }, @@ -513,7 +513,7 @@ func TestEtcdCreateWithConflict(t *testing.T) { // Suddenly, a wild scheduler appears: binding := api.Binding{ ObjectMeta: metav1.ObjectMeta{ - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "foo", Annotations: map[string]string{"label1": "value1"}, }, @@ -542,7 +542,7 @@ func TestEtcdCreateWithExistingContainers(t *testing.T) { // Suddenly, a wild scheduler appears: _, err = bindingStorage.Create(ctx, &api.Binding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "foo"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "foo"}, Target: api.ObjectReference{Name: "machine"}, }) if err != nil { @@ -564,28 +564,28 @@ func TestEtcdCreateBinding(t *testing.T) { }{ "noName": { binding: api.Binding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "foo"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "foo"}, Target: api.ObjectReference{}, }, errOK: func(err error) bool { return err != nil }, }, "badKind": { binding: api.Binding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "foo"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "foo"}, Target: api.ObjectReference{Name: "machine1", Kind: "unknown"}, }, errOK: func(err error) bool { return err != nil }, }, "emptyKind": { binding: api.Binding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "foo"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "foo"}, Target: api.ObjectReference{Name: "machine2"}, }, errOK: func(err error) bool { return err == nil }, }, "kindNode": { binding: api.Binding{ - ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "foo"}, + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "foo"}, Target: api.ObjectReference{Name: "machine3", Kind: "Node"}, }, errOK: func(err error) bool { return err == nil }, @@ -649,7 +649,7 @@ func TestEtcdUpdateScheduled(t *testing.T) { err := storage.Storage.Create(ctx, key, &api.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: api.PodSpec{ NodeName: "machine", @@ -721,7 +721,7 @@ func TestEtcdUpdateStatus(t *testing.T) { podStart := api.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: api.PodSpec{ NodeName: "machine", diff --git a/pkg/registry/core/podtemplate/storage/storage_test.go b/pkg/registry/core/podtemplate/storage/storage_test.go index 5116399c326..662f0b11bf5 100644 --- a/pkg/registry/core/podtemplate/storage/storage_test.go +++ b/pkg/registry/core/podtemplate/storage/storage_test.go @@ -44,7 +44,7 @@ func validNewPodTemplate(name string) *api.PodTemplate { return &api.PodTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Template: api.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/registry/core/resourcequota/storage/storage_test.go b/pkg/registry/core/resourcequota/storage/storage_test.go index 9cf8ab2c9d0..9a2cb6af33f 100644 --- a/pkg/registry/core/resourcequota/storage/storage_test.go +++ b/pkg/registry/core/resourcequota/storage/storage_test.go @@ -48,7 +48,7 @@ func validNewResourceQuota() *api.ResourceQuota { return &api.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: api.ResourceQuotaSpec{ Hard: api.ResourceList{ @@ -168,7 +168,7 @@ func TestUpdateStatus(t *testing.T) { resourcequotaIn := &api.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Status: api.ResourceQuotaStatus{ Used: api.ResourceList{ diff --git a/pkg/registry/core/secret/BUILD b/pkg/registry/core/secret/BUILD index 7f563831b38..f5e983efd82 100644 --- a/pkg/registry/core/secret/BUILD +++ b/pkg/registry/core/secret/BUILD @@ -23,6 +23,7 @@ go_library( "//pkg/genericapiserver/registry/rest:go_default_library", "//pkg/storage:go_default_library", "//vendor:k8s.io/apimachinery/pkg/api/errors", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/registry/core/secret/storage/storage_test.go b/pkg/registry/core/secret/storage/storage_test.go index 3bc1241ed1b..ea0267715ff 100644 --- a/pkg/registry/core/secret/storage/storage_test.go +++ b/pkg/registry/core/secret/storage/storage_test.go @@ -44,7 +44,7 @@ func validNewSecret(name string) *api.Secret { return &api.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Data: map[string][]byte{ "test": []byte("data"), diff --git a/pkg/registry/core/service/BUILD b/pkg/registry/core/service/BUILD index 79c37476de7..6d0621b120c 100644 --- a/pkg/registry/core/service/BUILD +++ b/pkg/registry/core/service/BUILD @@ -33,6 +33,7 @@ go_library( "//pkg/util/config:go_default_library", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/api/errors", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/registry/core/service/ipallocator/controller/BUILD b/pkg/registry/core/service/ipallocator/controller/BUILD index 54474aa9889..9cc7e4dc4fb 100644 --- a/pkg/registry/core/service/ipallocator/controller/BUILD +++ b/pkg/registry/core/service/ipallocator/controller/BUILD @@ -19,6 +19,7 @@ go_library( "//pkg/registry/core/rangeallocation:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", "//vendor:k8s.io/apimachinery/pkg/api/errors", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/util/runtime", "//vendor:k8s.io/apimachinery/pkg/util/wait", ], diff --git a/pkg/registry/core/service/portallocator/controller/BUILD b/pkg/registry/core/service/portallocator/controller/BUILD index 028bb5f0daa..046419ae718 100644 --- a/pkg/registry/core/service/portallocator/controller/BUILD +++ b/pkg/registry/core/service/portallocator/controller/BUILD @@ -20,6 +20,7 @@ go_library( "//pkg/registry/core/service:go_default_library", "//pkg/registry/core/service/portallocator:go_default_library", "//vendor:k8s.io/apimachinery/pkg/api/errors", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/util/net", "//vendor:k8s.io/apimachinery/pkg/util/runtime", "//vendor:k8s.io/apimachinery/pkg/util/wait", diff --git a/pkg/registry/core/service/portallocator/controller/repair.go b/pkg/registry/core/service/portallocator/controller/repair.go index c8c0b8ae7cc..1ce77d47c65 100644 --- a/pkg/registry/core/service/portallocator/controller/repair.go +++ b/pkg/registry/core/service/portallocator/controller/repair.go @@ -21,6 +21,7 @@ import ( "time" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -107,7 +108,7 @@ func (c *Repair) runOnce() error { // the service collection. The caching layer keeps per-collection RVs, // and this is proper, since in theory the collections could be hosted // in separate etcd (or even non-etcd) instances. - list, err := c.serviceClient.Services(api.NamespaceAll).List(api.ListOptions{}) + list, err := c.serviceClient.Services(metav1.NamespaceAll).List(metav1.ListOptions{}) if err != nil { return fmt.Errorf("unable to refresh the port block: %v", err) } diff --git a/pkg/registry/core/service/rest_test.go b/pkg/registry/core/service/rest_test.go index fabed35d24e..580b2b28f9a 100644 --- a/pkg/registry/core/service/rest_test.go +++ b/pkg/registry/core/service/rest_test.go @@ -293,7 +293,7 @@ func TestServiceRegistryUpdate(t *testing.T) { ctx := genericapirequest.NewDefaultContext() storage, registry := NewTestREST(t, nil) svc, err := registry.CreateService(ctx, &api.Service{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", ResourceVersion: "1", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "foo", ResourceVersion: "1", Namespace: metav1.NamespaceDefault}, Spec: api.ServiceSpec{ Selector: map[string]string{"bar": "baz1"}, Ports: []api.ServicePort{{ @@ -560,7 +560,7 @@ func TestServiceRegistryResourceLocation(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, @@ -570,7 +570,7 @@ func TestServiceRegistryResourceLocation(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{}, @@ -678,13 +678,13 @@ func TestServiceRegistryList(t *testing.T) { ctx := genericapirequest.NewDefaultContext() storage, registry := NewTestREST(t, nil) registry.CreateService(ctx, &api.Service{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: metav1.NamespaceDefault}, Spec: api.ServiceSpec{ Selector: map[string]string{"bar": "baz"}, }, }) registry.CreateService(ctx, &api.Service{ - ObjectMeta: metav1.ObjectMeta{Name: "foo2", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "foo2", Namespace: metav1.NamespaceDefault}, Spec: api.ServiceSpec{ Selector: map[string]string{"bar2": "baz2"}, }, diff --git a/pkg/registry/core/service/storage/storage_test.go b/pkg/registry/core/service/storage/storage_test.go index 6a0d16dde06..3fc6311c379 100644 --- a/pkg/registry/core/service/storage/storage_test.go +++ b/pkg/registry/core/service/storage/storage_test.go @@ -46,7 +46,7 @@ func validService() *api.Service { return &api.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: api.ServiceSpec{ Selector: map[string]string{"bar": "baz"}, diff --git a/pkg/registry/core/serviceaccount/BUILD b/pkg/registry/core/serviceaccount/BUILD index 4cb26f78483..703b4e8ab9a 100644 --- a/pkg/registry/core/serviceaccount/BUILD +++ b/pkg/registry/core/serviceaccount/BUILD @@ -22,6 +22,7 @@ go_library( "//pkg/genericapiserver/registry/generic:go_default_library", "//pkg/genericapiserver/registry/rest:go_default_library", "//pkg/storage:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/registry/core/serviceaccount/storage/storage_test.go b/pkg/registry/core/serviceaccount/storage/storage_test.go index 76338e59b47..87131b6abcb 100644 --- a/pkg/registry/core/serviceaccount/storage/storage_test.go +++ b/pkg/registry/core/serviceaccount/storage/storage_test.go @@ -44,7 +44,7 @@ func validNewServiceAccount(name string) *api.ServiceAccount { return &api.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Secrets: []api.ObjectReference{}, } diff --git a/pkg/registry/extensions/daemonset/storage/storage_test.go b/pkg/registry/extensions/daemonset/storage/storage_test.go index 0d2de6aad04..fe041a9d0e6 100644 --- a/pkg/registry/extensions/daemonset/storage/storage_test.go +++ b/pkg/registry/extensions/daemonset/storage/storage_test.go @@ -46,7 +46,7 @@ func newValidDaemonSet() *extensions.DaemonSet { return &extensions.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}, diff --git a/pkg/registry/extensions/deployment/BUILD b/pkg/registry/extensions/deployment/BUILD index f93a6f68122..40c78b71c66 100644 --- a/pkg/registry/extensions/deployment/BUILD +++ b/pkg/registry/extensions/deployment/BUILD @@ -24,6 +24,7 @@ go_library( "//pkg/genericapiserver/registry/generic:go_default_library", "//pkg/genericapiserver/registry/rest:go_default_library", "//pkg/storage:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/registry/extensions/ingress/storage/storage_test.go b/pkg/registry/extensions/ingress/storage/storage_test.go index 8eb717d0b5c..d2016559d29 100644 --- a/pkg/registry/extensions/ingress/storage/storage_test.go +++ b/pkg/registry/extensions/ingress/storage/storage_test.go @@ -44,7 +44,7 @@ func newStorage(t *testing.T) (*REST, *StatusREST, *etcdtesting.EtcdTestServer) } var ( - namespace = api.NamespaceNone + namespace = metav1.NamespaceNone name = "foo-ingress" defaultHostname = "foo.bar.com" defaultBackendName = "default-backend" diff --git a/pkg/registry/extensions/ingress/strategy_test.go b/pkg/registry/extensions/ingress/strategy_test.go index 42523f8c3bf..f7fe25791ba 100644 --- a/pkg/registry/extensions/ingress/strategy_test.go +++ b/pkg/registry/extensions/ingress/strategy_test.go @@ -36,7 +36,7 @@ func newIngress() extensions.Ingress { return extensions.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: extensions.IngressSpec{ Backend: &extensions.IngressBackend{ diff --git a/pkg/registry/extensions/networkpolicy/storage/storage_test.go b/pkg/registry/extensions/networkpolicy/storage/storage_test.go index 6b606a34823..f6314d32fc5 100644 --- a/pkg/registry/extensions/networkpolicy/storage/storage_test.go +++ b/pkg/registry/extensions/networkpolicy/storage/storage_test.go @@ -24,7 +24,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/genericapiserver/registry/generic" "k8s.io/kubernetes/pkg/registry/registrytest" @@ -59,7 +58,7 @@ func validNewNetworkPolicy() *extensions.NetworkPolicy { return &extensions.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Labels: map[string]string{"a": "b"}, }, Spec: extensions.NetworkPolicySpec{ diff --git a/pkg/registry/extensions/networkpolicy/strategy_test.go b/pkg/registry/extensions/networkpolicy/strategy_test.go index 61e6735429f..b609acb7e29 100644 --- a/pkg/registry/extensions/networkpolicy/strategy_test.go +++ b/pkg/registry/extensions/networkpolicy/strategy_test.go @@ -21,7 +21,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" ) @@ -36,7 +35,7 @@ func TestNetworkPolicyStrategy(t *testing.T) { validMatchLabels := map[string]string{"a": "b"} np := &extensions.NetworkPolicy{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.NetworkPolicySpec{ PodSelector: metav1.LabelSelector{MatchLabels: validMatchLabels}, Ingress: []extensions.NetworkPolicyIngressRule{}, diff --git a/pkg/registry/extensions/replicaset/BUILD b/pkg/registry/extensions/replicaset/BUILD index 141691402c7..2c130d43e8c 100644 --- a/pkg/registry/extensions/replicaset/BUILD +++ b/pkg/registry/extensions/replicaset/BUILD @@ -23,6 +23,7 @@ go_library( "//pkg/genericapiserver/registry/generic:go_default_library", "//pkg/genericapiserver/registry/rest:go_default_library", "//pkg/storage:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/registry/extensions/replicaset/storage/storage_test.go b/pkg/registry/extensions/replicaset/storage/storage_test.go index 3b49b567040..7a5c9e3a62d 100644 --- a/pkg/registry/extensions/replicaset/storage/storage_test.go +++ b/pkg/registry/extensions/replicaset/storage/storage_test.go @@ -58,7 +58,7 @@ func validNewReplicaSet() *extensions.ReplicaSet { return &extensions.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: extensions.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}, @@ -254,8 +254,8 @@ func TestScaleGet(t *testing.T) { name := "foo" var rs extensions.ReplicaSet - ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), api.NamespaceDefault) - key := "/replicasets/" + api.NamespaceDefault + "/" + name + ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), metav1.NamespaceDefault) + key := "/replicasets/" + metav1.NamespaceDefault + "/" + name if err := storage.ReplicaSet.Storage.Create(ctx, key, &validReplicaSet, &rs, 0); err != nil { t.Fatalf("error setting new replica set (key: %s) %v: %v", key, validReplicaSet, err) } @@ -263,7 +263,7 @@ func TestScaleGet(t *testing.T) { want := &extensions.Scale{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, UID: rs.UID, ResourceVersion: rs.ResourceVersion, CreationTimestamp: rs.CreationTimestamp, @@ -294,8 +294,8 @@ func TestScaleUpdate(t *testing.T) { name := "foo" var rs extensions.ReplicaSet - ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), api.NamespaceDefault) - key := "/replicasets/" + api.NamespaceDefault + "/" + name + ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), metav1.NamespaceDefault) + key := "/replicasets/" + metav1.NamespaceDefault + "/" + name if err := storage.ReplicaSet.Storage.Create(ctx, key, &validReplicaSet, &rs, 0); err != nil { t.Fatalf("error setting new replica set (key: %s) %v: %v", key, validReplicaSet, err) } @@ -303,7 +303,7 @@ func TestScaleUpdate(t *testing.T) { update := extensions.Scale{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: extensions.ScaleSpec{ Replicas: int32(replicas), @@ -336,8 +336,8 @@ func TestStatusUpdate(t *testing.T) { defer server.Terminate(t) defer storage.ReplicaSet.Store.DestroyFunc() - ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), api.NamespaceDefault) - key := "/replicasets/" + api.NamespaceDefault + "/foo" + ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), metav1.NamespaceDefault) + key := "/replicasets/" + metav1.NamespaceDefault + "/foo" if err := storage.ReplicaSet.Storage.Create(ctx, key, &validReplicaSet, nil, 0); err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/pkg/registry/extensions/replicaset/strategy_test.go b/pkg/registry/extensions/replicaset/strategy_test.go index fa611df088e..8e9ea6ec40e 100644 --- a/pkg/registry/extensions/replicaset/strategy_test.go +++ b/pkg/registry/extensions/replicaset/strategy_test.go @@ -48,7 +48,7 @@ func TestReplicaSetStrategy(t *testing.T) { }, } rs := &extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: extensions.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplate.Template, @@ -106,7 +106,7 @@ func TestReplicaSetStatusStrategy(t *testing.T) { }, } oldRS := &extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault, ResourceVersion: "10"}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault, ResourceVersion: "10"}, Spec: extensions.ReplicaSetSpec{ Replicas: 3, Selector: &metav1.LabelSelector{MatchLabels: validSelector}, @@ -118,7 +118,7 @@ func TestReplicaSetStatusStrategy(t *testing.T) { }, } newRS := &extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault, ResourceVersion: "9"}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault, ResourceVersion: "9"}, Spec: extensions.ReplicaSetSpec{ Replicas: 1, Selector: &metav1.LabelSelector{MatchLabels: validSelector}, diff --git a/pkg/registry/extensions/rest/BUILD b/pkg/registry/extensions/rest/BUILD index e65993ec61e..bba146e5ab1 100644 --- a/pkg/registry/extensions/rest/BUILD +++ b/pkg/registry/extensions/rest/BUILD @@ -16,7 +16,6 @@ go_library( ], tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:go_default_library", @@ -35,6 +34,7 @@ go_library( "//pkg/registry/extensions/thirdpartyresource/storage:go_default_library", "//pkg/registry/extensions/thirdpartyresourcedata:go_default_library", "//vendor:github.com/golang/glog", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/util/runtime", "//vendor:k8s.io/apimachinery/pkg/util/sets", diff --git a/pkg/registry/extensions/rest/thirdparty_controller.go b/pkg/registry/extensions/rest/thirdparty_controller.go index 6ac6201469a..2db39b2376c 100644 --- a/pkg/registry/extensions/rest/thirdparty_controller.go +++ b/pkg/registry/extensions/rest/thirdparty_controller.go @@ -20,9 +20,9 @@ import ( "fmt" "strings" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata" @@ -68,7 +68,7 @@ func (t *ThirdPartyController) SyncOneResource(rsrc *extensions.ThirdPartyResour // Synchronize all resources with RESTful resources on the master func (t *ThirdPartyController) SyncResources() error { - list, err := t.client.ThirdPartyResources().List(api.ListOptions{}) + list, err := t.client.ThirdPartyResources().List(metav1.ListOptions{}) if err != nil { return err } diff --git a/pkg/registry/extensions/thirdpartyresourcedata/BUILD b/pkg/registry/extensions/thirdpartyresourcedata/BUILD index 8530d491184..21f8e452370 100644 --- a/pkg/registry/extensions/thirdpartyresourcedata/BUILD +++ b/pkg/registry/extensions/thirdpartyresourcedata/BUILD @@ -28,6 +28,7 @@ go_library( "//pkg/genericapiserver/registry/rest:go_default_library", "//pkg/storage:go_default_library", "//vendor:k8s.io/apimachinery/pkg/api/meta", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", @@ -54,7 +55,6 @@ go_test( "//pkg/api:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/apis/extensions:go_default_library", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", diff --git a/pkg/registry/extensions/thirdpartyresourcedata/storage/storage_test.go b/pkg/registry/extensions/thirdpartyresourcedata/storage/storage_test.go index 38e09e3d58d..21a88818e80 100644 --- a/pkg/registry/extensions/thirdpartyresourcedata/storage/storage_test.go +++ b/pkg/registry/extensions/thirdpartyresourcedata/storage/storage_test.go @@ -20,7 +20,6 @@ import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" // Ensure that extensions/v1beta1 package is initialized. "k8s.io/apimachinery/pkg/fields" @@ -42,7 +41,7 @@ func validNewThirdPartyResourceData(name string) *extensions.ThirdPartyResourceD return &extensions.ThirdPartyResourceData{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Data: []byte("foobarbaz"), } diff --git a/pkg/registry/policy/poddisruptionbudget/storage/storage_test.go b/pkg/registry/policy/poddisruptionbudget/storage/storage_test.go index 5baf430fa1b..16801ffb33a 100644 --- a/pkg/registry/policy/poddisruptionbudget/storage/storage_test.go +++ b/pkg/registry/policy/poddisruptionbudget/storage/storage_test.go @@ -54,7 +54,7 @@ func validNewPodDisruptionBudget() *policy.PodDisruptionBudget { return &policy.PodDisruptionBudget{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Labels: map[string]string{"a": "b"}, }, Spec: policy.PodDisruptionBudgetSpec{ @@ -85,8 +85,8 @@ func TestStatusUpdate(t *testing.T) { storage, statusStorage, server := newStorage(t) defer server.Terminate(t) defer storage.Store.DestroyFunc() - ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), api.NamespaceDefault) - key := "/poddisruptionbudgets/" + api.NamespaceDefault + "/foo" + ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), metav1.NamespaceDefault) + key := "/poddisruptionbudgets/" + metav1.NamespaceDefault + "/foo" validPodDisruptionBudget := validNewPodDisruptionBudget() if err := storage.Storage.Create(ctx, key, validPodDisruptionBudget, nil, 0); err != nil { t.Fatalf("unexpected error: %v", err) diff --git a/pkg/registry/policy/poddisruptionbudget/strategy_test.go b/pkg/registry/policy/poddisruptionbudget/strategy_test.go index e65a4973748..f535eab2dd8 100644 --- a/pkg/registry/policy/poddisruptionbudget/strategy_test.go +++ b/pkg/registry/policy/poddisruptionbudget/strategy_test.go @@ -21,7 +21,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/util/intstr" ) @@ -37,7 +36,7 @@ func TestPodDisruptionBudgetStrategy(t *testing.T) { validSelector := map[string]string{"a": "b"} pdb := &policy.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: policy.PodDisruptionBudgetSpec{ MinAvailable: intstr.FromInt(3), Selector: &metav1.LabelSelector{MatchLabels: validSelector}, @@ -96,7 +95,7 @@ func TestPodDisruptionBudgetStatusStrategy(t *testing.T) { } validSelector := map[string]string{"a": "b"} oldPdb := &policy.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault, ResourceVersion: "10"}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault, ResourceVersion: "10"}, Spec: policy.PodDisruptionBudgetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, MinAvailable: intstr.FromInt(3), @@ -109,7 +108,7 @@ func TestPodDisruptionBudgetStatusStrategy(t *testing.T) { }, } newPdb := &policy.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault, ResourceVersion: "9"}, + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault, ResourceVersion: "9"}, Spec: policy.PodDisruptionBudgetSpec{ Selector: &metav1.LabelSelector{MatchLabels: validSelector}, MinAvailable: intstr.FromInt(2), diff --git a/pkg/registry/rbac/clusterrole/BUILD b/pkg/registry/rbac/clusterrole/BUILD index 126e4ff26a0..8fbda7df041 100644 --- a/pkg/registry/rbac/clusterrole/BUILD +++ b/pkg/registry/rbac/clusterrole/BUILD @@ -21,6 +21,7 @@ go_library( "//pkg/apis/rbac/validation:go_default_library", "//pkg/genericapiserver/registry/rest:go_default_library", "//pkg/storage:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/registry/rbac/clusterrolebinding/BUILD b/pkg/registry/rbac/clusterrolebinding/BUILD index 87cbf99126c..0b4475f8f88 100644 --- a/pkg/registry/rbac/clusterrolebinding/BUILD +++ b/pkg/registry/rbac/clusterrolebinding/BUILD @@ -21,6 +21,7 @@ go_library( "//pkg/apis/rbac/validation:go_default_library", "//pkg/genericapiserver/registry/rest:go_default_library", "//pkg/storage:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/registry/rbac/rest/BUILD b/pkg/registry/rbac/rest/BUILD index 92bfa4bfbf2..5361dc4b6ab 100644 --- a/pkg/registry/rbac/rest/BUILD +++ b/pkg/registry/rbac/rest/BUILD @@ -12,7 +12,6 @@ go_library( srcs = ["storage_rbac.go"], tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/apis/rbac/v1alpha1:go_default_library", "//pkg/apis/rbac/v1beta1:go_default_library", @@ -35,6 +34,7 @@ go_library( "//pkg/registry/rbac/validation:go_default_library", "//plugin/pkg/auth/authorizer/rbac/bootstrappolicy:go_default_library", "//vendor:github.com/golang/glog", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime/schema", "//vendor:k8s.io/apimachinery/pkg/util/runtime", "//vendor:k8s.io/apimachinery/pkg/util/wait", diff --git a/pkg/registry/rbac/role/BUILD b/pkg/registry/rbac/role/BUILD index 3f5a4eea20d..cca088b3432 100644 --- a/pkg/registry/rbac/role/BUILD +++ b/pkg/registry/rbac/role/BUILD @@ -21,6 +21,7 @@ go_library( "//pkg/apis/rbac/validation:go_default_library", "//pkg/genericapiserver/registry/rest:go_default_library", "//pkg/storage:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/registry/rbac/rolebinding/BUILD b/pkg/registry/rbac/rolebinding/BUILD index 2ef7bf27fb5..bf4407edd66 100644 --- a/pkg/registry/rbac/rolebinding/BUILD +++ b/pkg/registry/rbac/rolebinding/BUILD @@ -21,6 +21,7 @@ go_library( "//pkg/apis/rbac/validation:go_default_library", "//pkg/genericapiserver/registry/rest:go_default_library", "//pkg/storage:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/registry/registrytest/BUILD b/pkg/registry/registrytest/BUILD index 6544f8e03f0..d52cd59ec7c 100644 --- a/pkg/registry/registrytest/BUILD +++ b/pkg/registry/registrytest/BUILD @@ -28,6 +28,7 @@ go_library( "//pkg/storage/testing:go_default_library", "//vendor:k8s.io/apimachinery/pkg/api/errors", "//vendor:k8s.io/apimachinery/pkg/api/meta", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/internalversion", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/fields", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/registry/registrytest/service.go b/pkg/registry/registrytest/service.go index d408c660723..af33e183a3b 100644 --- a/pkg/registry/registrytest/service.go +++ b/pkg/registry/registrytest/service.go @@ -59,7 +59,7 @@ func (r *ServiceRegistry) ListServices(ctx genericapirequest.Context, options *m res.TypeMeta = r.List.TypeMeta res.ListMeta = r.List.ListMeta - if ns != api.NamespaceAll { + if ns != metav1.NamespaceAll { for _, service := range r.List.Items { if ns == service.Namespace { res.Items = append(res.Items, service) diff --git a/pkg/storage/BUILD b/pkg/storage/BUILD index 2762b5555db..4ff44102c71 100644 --- a/pkg/storage/BUILD +++ b/pkg/storage/BUILD @@ -23,7 +23,6 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/api/validation/path:go_default_library", "//pkg/client/cache:go_default_library", "//pkg/util:go_default_library", diff --git a/pkg/storage/cacher.go b/pkg/storage/cacher.go index 55e1876bb31..a73a4cf91c3 100644 --- a/pkg/storage/cacher.go +++ b/pkg/storage/cacher.go @@ -35,7 +35,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/util" @@ -692,7 +691,7 @@ func newCacherListerWatcher(storage Interface, resourcePrefix string, newListFun } // Implements cache.ListerWatcher interface. -func (lw *cacherListerWatcher) List(options v1.ListOptions) (runtime.Object, error) { +func (lw *cacherListerWatcher) List(options metav1.ListOptions) (runtime.Object, error) { list := lw.newListFunc() if err := lw.storage.List(context.TODO(), lw.resourcePrefix, "", Everything, list); err != nil { return nil, err @@ -701,7 +700,7 @@ func (lw *cacherListerWatcher) List(options v1.ListOptions) (runtime.Object, err } // Implements cache.ListerWatcher interface. -func (lw *cacherListerWatcher) Watch(options v1.ListOptions) (watch.Interface, error) { +func (lw *cacherListerWatcher) Watch(options metav1.ListOptions) (watch.Interface, error) { return lw.storage.WatchList(context.TODO(), lw.resourcePrefix, options.ResourceVersion, Everything) } diff --git a/pkg/storage/watch_cache_test.go b/pkg/storage/watch_cache_test.go index b253030ca4f..26439a3674b 100644 --- a/pkg/storage/watch_cache_test.go +++ b/pkg/storage/watch_cache_test.go @@ -31,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/pkg/util/clock" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/cache" ) @@ -318,14 +317,14 @@ func TestWaitUntilFreshAndListTimeout(t *testing.T) { } type testLW struct { - ListFunc func(options v1.ListOptions) (runtime.Object, error) - WatchFunc func(options v1.ListOptions) (watch.Interface, error) + ListFunc func(options metav1.ListOptions) (runtime.Object, error) + WatchFunc func(options metav1.ListOptions) (watch.Interface, error) } -func (t *testLW) List(options v1.ListOptions) (runtime.Object, error) { +func (t *testLW) List(options metav1.ListOptions) (runtime.Object, error) { return t.ListFunc(options) } -func (t *testLW) Watch(options v1.ListOptions) (watch.Interface, error) { +func (t *testLW) Watch(options metav1.ListOptions) (watch.Interface, error) { return t.WatchFunc(options) } @@ -343,12 +342,12 @@ func TestReflectorForWatchCache(t *testing.T) { } lw := &testLW{ - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { fw := watch.NewFake() go fw.Stop() return fw, nil }, - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return &api.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "10"}}, nil }, } diff --git a/pkg/volume/plugins.go b/pkg/volume/plugins.go index 574419612cf..f63b8993203 100644 --- a/pkg/volume/plugins.go +++ b/pkg/volume/plugins.go @@ -535,7 +535,7 @@ func NewPersistentVolumeRecyclerPodTemplate() *v1.Pod { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "pv-recycler-", - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Spec: v1.PodSpec{ ActiveDeadlineSeconds: &timeout, diff --git a/pkg/volume/util_test.go b/pkg/volume/util_test.go index c9b3b42538c..04d8eeba558 100644 --- a/pkg/volume/util_test.go +++ b/pkg/volume/util_test.go @@ -58,7 +58,7 @@ func newEvent(eventtype, message string) watch.Event { Type: watch.Added, Object: &v1.Event{ ObjectMeta: metav1.ObjectMeta{ - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, Reason: "MockEvent", Message: message, @@ -70,7 +70,7 @@ func newEvent(eventtype, message string) watch.Event { func newPod(name string, phase v1.PodPhase, message string) *v1.Pod { return &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: name, }, Status: v1.PodStatus{ diff --git a/plugin/pkg/admission/gc/gc_admission_test.go b/plugin/pkg/admission/gc/gc_admission_test.go index 621cab4baba..b37dbe17c6c 100644 --- a/plugin/pkg/admission/gc/gc_admission_test.go +++ b/plugin/pkg/admission/gc/gc_admission_test.go @@ -202,7 +202,7 @@ func TestGCAdmission(t *testing.T) { operation = admission.Update } user := &user.DefaultInfo{Name: tc.username} - attributes := admission.NewAttributesRecord(tc.newObj, tc.oldObj, schema.GroupVersionKind{}, api.NamespaceDefault, "foo", tc.resource, "", operation, user) + attributes := admission.NewAttributesRecord(tc.newObj, tc.oldObj, schema.GroupVersionKind{}, metav1.NamespaceDefault, "foo", tc.resource, "", operation, user) err := gcAdmit.Admit(attributes) switch { diff --git a/plugin/pkg/admission/initialresources/hawkular.go b/plugin/pkg/admission/initialresources/hawkular.go index 4d2401ca45d..63d2f21b464 100644 --- a/plugin/pkg/admission/initialresources/hawkular.go +++ b/plugin/pkg/admission/initialresources/hawkular.go @@ -29,6 +29,7 @@ import ( "github.com/golang/glog" "github.com/hawkular/hawkular-client-go/metrics" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api" restclient "k8s.io/client-go/rest" @@ -90,7 +91,7 @@ func (hs *hawkularSource) GetUsagePercentile(kind api.ResourceName, perc int64, m := make([]metrics.Modifier, len(hs.modifiers), 2+len(hs.modifiers)) copy(m, hs.modifiers) - if namespace != api.NamespaceAll { + if namespace != metav1.NamespaceAll { m = append(m, metrics.Tenant(namespace)) } diff --git a/plugin/pkg/admission/limitranger/BUILD b/plugin/pkg/admission/limitranger/BUILD index d77ae6442a4..911f88410ec 100644 --- a/plugin/pkg/admission/limitranger/BUILD +++ b/plugin/pkg/admission/limitranger/BUILD @@ -24,6 +24,7 @@ go_library( "//pkg/kubeapiserver/admission:go_default_library", "//vendor:github.com/hashicorp/golang-lru", "//vendor:k8s.io/apimachinery/pkg/api/meta", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/labels", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/util/errors", diff --git a/plugin/pkg/admission/limitranger/admission.go b/plugin/pkg/admission/limitranger/admission.go index 5e41e8f77cd..8515a7cd0fa 100644 --- a/plugin/pkg/admission/limitranger/admission.go +++ b/plugin/pkg/admission/limitranger/admission.go @@ -26,6 +26,7 @@ import ( lru "github.com/hashicorp/golang-lru" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -112,7 +113,7 @@ func (l *limitRanger) Admit(a admission.Attributes) (err error) { // If there is already in-flight List() for a given namespace, we should wait until // it is finished and cache is updated instead of doing the same, also to avoid // throttling - see #22422 for details. - liveList, err := l.client.Core().LimitRanges(a.GetNamespace()).List(api.ListOptions{}) + liveList, err := l.client.Core().LimitRanges(a.GetNamespace()).List(metav1.ListOptions{}) if err != nil { return admission.NewForbidden(a, err) } diff --git a/plugin/pkg/admission/namespace/lifecycle/admission.go b/plugin/pkg/admission/namespace/lifecycle/admission.go index 9981707aab2..a6c04a89f29 100644 --- a/plugin/pkg/admission/namespace/lifecycle/admission.go +++ b/plugin/pkg/admission/namespace/lifecycle/admission.go @@ -51,7 +51,7 @@ const ( func init() { admission.RegisterPlugin(PluginName, func(config io.Reader) (admission.Interface, error) { - return NewLifecycle(sets.NewString(api.NamespaceDefault, api.NamespaceSystem)) + return NewLifecycle(sets.NewString(metav1.NamespaceDefault, metav1.NamespaceSystem)) }) } diff --git a/plugin/pkg/admission/namespace/lifecycle/admission_test.go b/plugin/pkg/admission/namespace/lifecycle/admission_test.go index a02a8addba8..71efd4f4e96 100644 --- a/plugin/pkg/admission/namespace/lifecycle/admission_test.go +++ b/plugin/pkg/admission/namespace/lifecycle/admission_test.go @@ -43,7 +43,7 @@ func newHandlerForTest(c clientset.Interface) (admission.Interface, informers.Sh // newHandlerForTestWithClock returns a configured handler for testing. func newHandlerForTestWithClock(c clientset.Interface, cacheClock clock.Clock) (admission.Interface, informers.SharedInformerFactory, error) { f := informers.NewSharedInformerFactory(nil, c, 5*time.Minute) - handler, err := newLifecycleWithClock(sets.NewString(api.NamespaceDefault, api.NamespaceSystem), cacheClock) + handler, err := newLifecycleWithClock(sets.NewString(metav1.NamespaceDefault, metav1.NamespaceSystem), cacheClock) if err != nil { return nil, f, err } @@ -168,7 +168,7 @@ func TestAdmissionNamespaceTerminating(t *testing.T) { } // verify delete of namespace default can never proceed - err = handler.Admit(admission.NewAttributesRecord(nil, nil, api.Kind("Namespace").WithVersion("version"), "", api.NamespaceDefault, api.Resource("namespaces").WithVersion("version"), "", admission.Delete, nil)) + err = handler.Admit(admission.NewAttributesRecord(nil, nil, api.Kind("Namespace").WithVersion("version"), "", metav1.NamespaceDefault, api.Resource("namespaces").WithVersion("version"), "", admission.Delete, nil)) if err == nil { t.Errorf("Expected an error that this namespace can never be deleted") } diff --git a/plugin/pkg/admission/resourcequota/BUILD b/plugin/pkg/admission/resourcequota/BUILD index e6de37bb494..e4a6b6a85b4 100644 --- a/plugin/pkg/admission/resourcequota/BUILD +++ b/plugin/pkg/admission/resourcequota/BUILD @@ -19,7 +19,6 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/client/cache:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/kubeapiserver/admission:go_default_library", diff --git a/plugin/pkg/admission/resourcequota/resource_access.go b/plugin/pkg/admission/resourcequota/resource_access.go index b272b45de10..2c35bc642b7 100644 --- a/plugin/pkg/admission/resourcequota/resource_access.go +++ b/plugin/pkg/admission/resourcequota/resource_access.go @@ -24,14 +24,12 @@ import ( lru "github.com/hashicorp/golang-lru" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/storage/etcd" ) @@ -75,15 +73,11 @@ func newQuotaAccessor(client clientset.Interface) (*quotaAccessor, error) { return nil, err } lw := &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - internalOptions := api.ListOptions{} - v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil) - return client.Core().ResourceQuotas(api.NamespaceAll).List(internalOptions) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return client.Core().ResourceQuotas(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - internalOptions := api.ListOptions{} - v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil) - return client.Core().ResourceQuotas(api.NamespaceAll).Watch(internalOptions) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return client.Core().ResourceQuotas(metav1.NamespaceAll).Watch(options) }, } indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.ResourceQuota{}, 0) @@ -156,7 +150,7 @@ func (e *quotaAccessor) GetQuotas(namespace string) ([]api.ResourceQuota, error) // If there is already in-flight List() for a given namespace, we should wait until // it is finished and cache is updated instead of doing the same, also to avoid // throttling - see #22422 for details. - liveList, err := e.client.Core().ResourceQuotas(namespace).List(api.ListOptions{}) + liveList, err := e.client.Core().ResourceQuotas(namespace).List(metav1.ListOptions{}) if err != nil { return nil, err } diff --git a/plugin/pkg/admission/security/podsecuritypolicy/BUILD b/plugin/pkg/admission/security/podsecuritypolicy/BUILD index bf90f9663cb..bc3d2f63983 100644 --- a/plugin/pkg/admission/security/podsecuritypolicy/BUILD +++ b/plugin/pkg/admission/security/podsecuritypolicy/BUILD @@ -14,7 +14,6 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/client/cache:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", @@ -26,6 +25,7 @@ go_library( "//pkg/util/maps:go_default_library", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/api/errors", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/util/validation/field", "//vendor:k8s.io/apimachinery/pkg/watch", diff --git a/plugin/pkg/admission/security/podsecuritypolicy/admission.go b/plugin/pkg/admission/security/podsecuritypolicy/admission.go index 1b1e7877e35..39d45e75091 100644 --- a/plugin/pkg/admission/security/podsecuritypolicy/admission.go +++ b/plugin/pkg/admission/security/podsecuritypolicy/admission.go @@ -24,6 +24,7 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/watch" @@ -31,7 +32,6 @@ import ( "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" @@ -108,15 +108,11 @@ func (a *podSecurityPolicyPlugin) SetInternalClientSet(client internalclientset. a.store = cache.NewStore(cache.MetaNamespaceKeyFunc) a.reflector = cache.NewReflector( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - internalOptions := api.ListOptions{} - v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil) - return client.Extensions().PodSecurityPolicies().List(internalOptions) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return client.Extensions().PodSecurityPolicies().List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - internalOptions := api.ListOptions{} - v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil) - return client.Extensions().PodSecurityPolicies().Watch(internalOptions) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return client.Extensions().PodSecurityPolicies().Watch(options) }, }, &extensions.PodSecurityPolicy{}, diff --git a/plugin/pkg/admission/serviceaccount/BUILD b/plugin/pkg/admission/serviceaccount/BUILD index ae68b1fb6c3..7706d7dc8d6 100644 --- a/plugin/pkg/admission/serviceaccount/BUILD +++ b/plugin/pkg/admission/serviceaccount/BUILD @@ -17,7 +17,6 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/client/cache:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/kubeapiserver/admission:go_default_library", diff --git a/plugin/pkg/admission/serviceaccount/admission.go b/plugin/pkg/admission/serviceaccount/admission.go index 72934f580a1..cf68bfb9ed8 100644 --- a/plugin/pkg/admission/serviceaccount/admission.go +++ b/plugin/pkg/admission/serviceaccount/admission.go @@ -33,7 +33,6 @@ import ( "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/storage/names" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" @@ -108,15 +107,11 @@ func (a *serviceAccount) SetInternalClientSet(cl internalclientset.Interface) { a.client = cl a.serviceAccounts, a.serviceAccountsReflector = cache.NewNamespaceKeyedIndexerAndReflector( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - internalOptions := api.ListOptions{} - v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil) - return cl.Core().ServiceAccounts(api.NamespaceAll).List(internalOptions) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return cl.Core().ServiceAccounts(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - internalOptions := api.ListOptions{} - v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil) - return cl.Core().ServiceAccounts(api.NamespaceAll).Watch(internalOptions) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return cl.Core().ServiceAccounts(metav1.NamespaceAll).Watch(options) }, }, &api.ServiceAccount{}, @@ -126,17 +121,13 @@ func (a *serviceAccount) SetInternalClientSet(cl internalclientset.Interface) { tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)}) a.secrets, a.secretsReflector = cache.NewNamespaceKeyedIndexerAndReflector( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - internalOptions := api.ListOptions{} - v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil) - internalOptions.FieldSelector = tokenSelector - return cl.Core().Secrets(api.NamespaceAll).List(internalOptions) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = tokenSelector.String() + return cl.Core().Secrets(metav1.NamespaceAll).List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - internalOptions := api.ListOptions{} - v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil) - internalOptions.FieldSelector = tokenSelector - return cl.Core().Secrets(api.NamespaceAll).Watch(internalOptions) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = tokenSelector.String() + return cl.Core().Secrets(metav1.NamespaceAll).Watch(options) }, }, &api.Secret{}, diff --git a/plugin/pkg/admission/storageclass/default/BUILD b/plugin/pkg/admission/storageclass/default/BUILD index ae91e4d6d86..8fce57f4dc8 100644 --- a/plugin/pkg/admission/storageclass/default/BUILD +++ b/plugin/pkg/admission/storageclass/default/BUILD @@ -14,7 +14,6 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/apis/storage:go_default_library", "//pkg/apis/storage/util:go_default_library", "//pkg/client/cache:go_default_library", @@ -22,6 +21,7 @@ go_library( "//pkg/kubeapiserver/admission:go_default_library", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/api/errors", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/watch", "//vendor:k8s.io/apiserver/pkg/admission", diff --git a/plugin/pkg/admission/storageclass/default/admission.go b/plugin/pkg/admission/storageclass/default/admission.go index 9e7eee0b35a..2ae18ec653d 100644 --- a/plugin/pkg/admission/storageclass/default/admission.go +++ b/plugin/pkg/admission/storageclass/default/admission.go @@ -23,11 +23,11 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" admission "k8s.io/apiserver/pkg/admission" api "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/storage" storageutil "k8s.io/kubernetes/pkg/apis/storage/util" "k8s.io/kubernetes/pkg/client/cache" @@ -71,15 +71,11 @@ func (a *claimDefaulterPlugin) SetInternalClientSet(client internalclientset.Int a.store = cache.NewStore(cache.MetaNamespaceKeyFunc) a.reflector = cache.NewReflector( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - internalOptions := api.ListOptions{} - v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil) - return client.Storage().StorageClasses().List(internalOptions) + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return client.Storage().StorageClasses().List(options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - internalOptions := api.ListOptions{} - v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil) - return client.Storage().StorageClasses().Watch(internalOptions) + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return client.Storage().StorageClasses().Watch(options) }, }, &storage.StorageClass{}, diff --git a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go index 60d87f00b8c..226d755746a 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go @@ -99,7 +99,7 @@ func TestSelectorSpreadPriority(t *testing.T) { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, pods: []*v1.Pod{ {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, - {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}}, {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, @@ -110,15 +110,15 @@ func TestSelectorSpreadPriority(t *testing.T) { test: "five pods, one service pod in no namespace", }, { - pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}}, + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}}, pods: []*v1.Pod{ {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, - {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}}, {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, }, nodes: []string{"machine1", "machine2"}, - services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: v1.NamespaceDefault}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}}, test: "four pods, one service pod in default namespace", }, @@ -126,7 +126,7 @@ func TestSelectorSpreadPriority(t *testing.T) { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, pods: []*v1.Pod{ {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, - {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}}, {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns2"}}, {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, @@ -606,15 +606,15 @@ func TestZoneSpreadPriority(t *testing.T) { test: "three pods, two service pods on different machines", }, { - pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}}, + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}}, pods: []*v1.Pod{ {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, - {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}}, {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, }, nodes: labeledNodes, - services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: v1.NamespaceDefault}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 0}, {Host: "machine12", Score: 0}, {Host: "machine21", Score: 10}, {Host: "machine22", Score: 10}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, diff --git a/plugin/pkg/scheduler/factory/factory.go b/plugin/pkg/scheduler/factory/factory.go index 1affa51db12..adf59fee3d3 100644 --- a/plugin/pkg/scheduler/factory/factory.go +++ b/plugin/pkg/scheduler/factory/factory.go @@ -548,7 +548,7 @@ func getNodeConditionPredicate() listers.NodeConditionPredicate { // scheduled. func (factory *ConfigFactory) createUnassignedNonTerminatedPodLW() *cache.ListWatch { selector := fields.ParseSelectorOrDie("spec.nodeName==" + "" + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed)) - return cache.NewListWatchFromClient(factory.client.Core().RESTClient(), "pods", v1.NamespaceAll, selector) + return cache.NewListWatchFromClient(factory.client.Core().RESTClient(), "pods", metav1.NamespaceAll, selector) } // Returns a cache.ListWatch that finds all pods that are @@ -556,7 +556,7 @@ func (factory *ConfigFactory) createUnassignedNonTerminatedPodLW() *cache.ListWa // TODO: return a ListerWatcher interface instead? func (factory *ConfigFactory) createAssignedNonTerminatedPodLW() *cache.ListWatch { selector := fields.ParseSelectorOrDie("spec.nodeName!=" + "" + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed)) - return cache.NewListWatchFromClient(factory.client.Core().RESTClient(), "pods", v1.NamespaceAll, selector) + return cache.NewListWatchFromClient(factory.client.Core().RESTClient(), "pods", metav1.NamespaceAll, selector) } // createNodeLW returns a cache.ListWatch that gets all changes to nodes. @@ -564,32 +564,32 @@ func (factory *ConfigFactory) createNodeLW() *cache.ListWatch { // all nodes are considered to ensure that the scheduler cache has access to all nodes for lookups // the NodeCondition is used to filter out the nodes that are not ready or unschedulable // the filtered list is used as the super set of nodes to consider for scheduling - return cache.NewListWatchFromClient(factory.client.Core().RESTClient(), "nodes", v1.NamespaceAll, fields.ParseSelectorOrDie("")) + return cache.NewListWatchFromClient(factory.client.Core().RESTClient(), "nodes", metav1.NamespaceAll, fields.ParseSelectorOrDie("")) } // createPersistentVolumeLW returns a cache.ListWatch that gets all changes to persistentVolumes. func (factory *ConfigFactory) createPersistentVolumeLW() *cache.ListWatch { - return cache.NewListWatchFromClient(factory.client.Core().RESTClient(), "persistentVolumes", v1.NamespaceAll, fields.ParseSelectorOrDie("")) + return cache.NewListWatchFromClient(factory.client.Core().RESTClient(), "persistentVolumes", metav1.NamespaceAll, fields.ParseSelectorOrDie("")) } // createPersistentVolumeClaimLW returns a cache.ListWatch that gets all changes to persistentVolumeClaims. func (factory *ConfigFactory) createPersistentVolumeClaimLW() *cache.ListWatch { - return cache.NewListWatchFromClient(factory.client.Core().RESTClient(), "persistentVolumeClaims", v1.NamespaceAll, fields.ParseSelectorOrDie("")) + return cache.NewListWatchFromClient(factory.client.Core().RESTClient(), "persistentVolumeClaims", metav1.NamespaceAll, fields.ParseSelectorOrDie("")) } // Returns a cache.ListWatch that gets all changes to services. func (factory *ConfigFactory) createServiceLW() *cache.ListWatch { - return cache.NewListWatchFromClient(factory.client.Core().RESTClient(), "services", v1.NamespaceAll, fields.ParseSelectorOrDie("")) + return cache.NewListWatchFromClient(factory.client.Core().RESTClient(), "services", metav1.NamespaceAll, fields.ParseSelectorOrDie("")) } // Returns a cache.ListWatch that gets all changes to controllers. func (factory *ConfigFactory) createControllerLW() *cache.ListWatch { - return cache.NewListWatchFromClient(factory.client.Core().RESTClient(), "replicationControllers", v1.NamespaceAll, fields.ParseSelectorOrDie("")) + return cache.NewListWatchFromClient(factory.client.Core().RESTClient(), "replicationControllers", metav1.NamespaceAll, fields.ParseSelectorOrDie("")) } // Returns a cache.ListWatch that gets all changes to replicasets. func (factory *ConfigFactory) createReplicaSetLW() *cache.ListWatch { - return cache.NewListWatchFromClient(factory.client.Extensions().RESTClient(), "replicasets", v1.NamespaceAll, fields.ParseSelectorOrDie("")) + return cache.NewListWatchFromClient(factory.client.Extensions().RESTClient(), "replicasets", metav1.NamespaceAll, fields.ParseSelectorOrDie("")) } func (factory *ConfigFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue *cache.FIFO) func(pod *v1.Pod, err error) { diff --git a/plugin/pkg/scheduler/factory/factory_test.go b/plugin/pkg/scheduler/factory/factory_test.go index 4f646a9fbdb..04d419eec34 100644 --- a/plugin/pkg/scheduler/factory/factory_test.go +++ b/plugin/pkg/scheduler/factory/factory_test.go @@ -203,7 +203,7 @@ func TestBind(t *testing.T) { }{ {binding: &v1.Binding{ ObjectMeta: metav1.ObjectMeta{ - Namespace: v1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Name: "foo", }, Target: v1.ObjectReference{ @@ -228,7 +228,7 @@ func TestBind(t *testing.T) { continue } expectedBody := runtime.EncodeOrDie(testapi.Default.Codec(), item.binding) - handler.ValidateRequest(t, testapi.Default.ResourcePath("bindings", v1.NamespaceDefault, ""), "POST", &expectedBody) + handler.ValidateRequest(t, testapi.Default.ResourcePath("bindings", metav1.NamespaceDefault, ""), "POST", &expectedBody) } } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/request/context.go b/staging/src/k8s.io/apiserver/pkg/endpoints/request/context.go index b6e7d0dba8f..cc64aa19452 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/request/context.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/request/context.go @@ -63,7 +63,7 @@ const ( // userAgentKey is the context key for the request user agent. userAgentKey - namespaceDefault = "default" // TODO(sttts): solve import cycle when using api.NamespaceDefault + namespaceDefault = "default" // TODO(sttts): solve import cycle when using metav1.NamespaceDefault ) // NewContext instantiates a base context object for request flows. diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/request/context_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/request/context_test.go index c5e56564acf..365cb114e44 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/request/context_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/request/context_test.go @@ -19,10 +19,10 @@ package request_test import ( "testing" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apiserver/pkg/authentication/user" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/kubernetes/pkg/api" ) // TestNamespaceContext validates that a namespace can be get/set on a context object @@ -32,8 +32,8 @@ func TestNamespaceContext(t *testing.T) { if !ok { t.Fatalf("Error getting namespace") } - if api.NamespaceDefault != result { - t.Fatalf("Expected: %s, Actual: %s", api.NamespaceDefault, result) + if metav1.NamespaceDefault != result { + t.Fatalf("Expected: %s, Actual: %s", metav1.NamespaceDefault, result) } ctx = genericapirequest.NewContext() diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go b/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go index 4f231319e9d..bbfc547afab 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go @@ -176,7 +176,7 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er } } } else { - requestInfo.Namespace = "" // TODO(sttts): solve import cycle when using api.NamespaceNone + requestInfo.Namespace = "" // TODO(sttts): solve import cycle when using metav1.NamespaceNone } // parsing successful, so we now know the proper value for .Parts diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo_test.go index 6e8550f42da..20ddb4b48db 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo_test.go @@ -31,7 +31,7 @@ func (f fakeRL) TryAccept() bool { return bool(f) } func (f fakeRL) Accept() {} func TestGetAPIRequestInfo(t *testing.T) { - namespaceAll := "" // TODO(sttts): solve import cycle when using api.NamespaceAll + namespaceAll := "" // TODO(sttts): solve import cycle when using metav1.NamespaceAll successCases := []struct { method string url string diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go index f66bb862333..defbf263fbd 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go @@ -19,7 +19,6 @@ package fake import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/pkg/api" "k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/testing" ) @@ -67,9 +66,9 @@ func (c *FakeEvents) PatchWithEventNamespace(event *v1.Event, data []byte) (*v1. // Search returns a list of events matching the specified object. func (c *FakeEvents) Search(objOrRef runtime.Object) (*v1.EventList, error) { - action := testing.NewRootListAction(eventsResource, api.ListOptions{}) + action := testing.NewRootListAction(eventsResource, metav1.ListOptions{}) if c.ns != "" { - action = testing.NewListAction(eventsResource, c.ns, api.ListOptions{}) + action = testing.NewListAction(eventsResource, c.ns, metav1.ListOptions{}) } obj, err := c.Fake.Invokes(action, &v1.EventList{}) if obj == nil { diff --git a/staging/src/k8s.io/client-go/pkg/apis/kubeadm/register.go b/staging/src/k8s.io/client-go/pkg/apis/kubeadm/register.go index 22fdc0c4a09..ebbeaefcf11 100644 --- a/staging/src/k8s.io/client-go/pkg/apis/kubeadm/register.go +++ b/staging/src/k8s.io/client-go/pkg/apis/kubeadm/register.go @@ -49,7 +49,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &MasterConfiguration{}, &NodeConfiguration{}, &ClusterInfo{}, - &api.ListOptions{}, + &metav1.ListOptions{}, &api.DeleteOptions{}, &metav1.ExportOptions{}, ) diff --git a/staging/src/k8s.io/client-go/pkg/federation/apis/federation/register.go b/staging/src/k8s.io/client-go/pkg/federation/apis/federation/register.go index 34e24bd5ce2..d8122aff8a2 100644 --- a/staging/src/k8s.io/client-go/pkg/federation/apis/federation/register.go +++ b/staging/src/k8s.io/client-go/pkg/federation/apis/federation/register.go @@ -47,7 +47,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Cluster{}, &ClusterList{}, - &api.ListOptions{}, &api.DeleteOptions{}, ) return nil diff --git a/staging/src/k8s.io/client-go/rest/request_test.go b/staging/src/k8s.io/client-go/rest/request_test.go index 223dc783472..7de9a26f4af 100755 --- a/staging/src/k8s.io/client-go/rest/request_test.go +++ b/staging/src/k8s.io/client-go/rest/request_test.go @@ -242,7 +242,7 @@ func TestRequestVersionedParams(t *testing.T) { func TestRequestVersionedParamsFromListOptions(t *testing.T) { r := &Request{content: ContentConfig{GroupVersion: &v1.SchemeGroupVersion}} - r.VersionedParams(&api.ListOptions{ResourceVersion: "1"}, api.ParameterCodec) + r.VersionedParams(&metav1.ListOptions{ResourceVersion: "1"}, api.ParameterCodec) if !reflect.DeepEqual(r.params, url.Values{ "resourceVersion": []string{"1"}, }) { @@ -250,7 +250,7 @@ func TestRequestVersionedParamsFromListOptions(t *testing.T) { } var timeout int64 = 10 - r.VersionedParams(&api.ListOptions{ResourceVersion: "2", TimeoutSeconds: &timeout}, api.ParameterCodec) + r.VersionedParams(&metav1.ListOptions{ResourceVersion: "2", TimeoutSeconds: &timeout}, api.ParameterCodec) if !reflect.DeepEqual(r.params, url.Values{ "resourceVersion": []string{"1", "2"}, "timeoutSeconds": []string{"10"}, diff --git a/staging/src/k8s.io/client-go/testing/actions.go b/staging/src/k8s.io/client-go/testing/actions.go index 44656b2e43d..97b4c91f2bb 100644 --- a/staging/src/k8s.io/client-go/testing/actions.go +++ b/staging/src/k8s.io/client-go/testing/actions.go @@ -25,7 +25,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/pkg/api" "k8s.io/client-go/pkg/api/v1" ) @@ -224,7 +223,7 @@ func NewRootWatchAction(resource schema.GroupVersionResource, opts interface{}) func ExtractFromListOptions(opts interface{}) (labelSelector labels.Selector, fieldSelector fields.Selector, resourceVersion string) { var err error switch t := opts.(type) { - case api.ListOptions: + case metav1.ListOptions: labelSelector = t.LabelSelector fieldSelector = t.FieldSelector resourceVersion = t.ResourceVersion diff --git a/test/e2e/BUILD b/test/e2e/BUILD index 42870aa69af..db0c4d87ca8 100644 --- a/test/e2e/BUILD +++ b/test/e2e/BUILD @@ -213,12 +213,12 @@ go_test( "integration", ], deps = [ - "//pkg/api/v1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/metrics:go_default_library", "//test/e2e/framework:go_default_library", "//vendor:github.com/onsi/ginkgo", "//vendor:github.com/onsi/gomega", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", ], ) diff --git a/test/e2e/addon_update.go b/test/e2e/addon_update.go index 922090c863f..666a2217607 100644 --- a/test/e2e/addon_update.go +++ b/test/e2e/addon_update.go @@ -26,7 +26,6 @@ import ( "golang.org/x/crypto/ssh" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/test/e2e/framework" @@ -206,7 +205,7 @@ spec: const ( addonTestPollInterval = 3 * time.Second addonTestPollTimeout = 5 * time.Minute - defaultNsName = v1.NamespaceDefault + defaultNsName = metav1.NamespaceDefault addonNsName = "kube-system" ) diff --git a/test/e2e/cadvisor.go b/test/e2e/cadvisor.go index fe7568a7307..8fc5c017066 100644 --- a/test/e2e/cadvisor.go +++ b/test/e2e/cadvisor.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/test/e2e/framework" @@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("Cadvisor", func() { func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration) { // It should be OK to list unschedulable Nodes here. By("getting list of nodes") - nodeList, err := c.Core().Nodes().List(v1.ListOptions{}) + nodeList, err := c.Core().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err) var errors []error diff --git a/test/e2e/cluster_logging_es.go b/test/e2e/cluster_logging_es.go index 9663f2de5b0..82d7e51bf31 100644 --- a/test/e2e/cluster_logging_es.go +++ b/test/e2e/cluster_logging_es.go @@ -26,7 +26,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/test/e2e/framework" @@ -89,7 +88,7 @@ var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Featu func checkElasticsearchReadiness(f *framework.Framework) error { // Check for the existence of the Elasticsearch service. By("Checking the Elasticsearch service exists.") - s := f.ClientSet.Core().Services(api.NamespaceSystem) + s := f.ClientSet.Core().Services(metav1.NamespaceSystem) // Make a few attempts to connect. This makes the test robust against // being run as the first e2e test just after the e2e cluster has been created. var err error @@ -104,8 +103,8 @@ func checkElasticsearchReadiness(f *framework.Framework) error { // Wait for the Elasticsearch pods to enter the running state. By("Checking to make sure the Elasticsearch pods are running") label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "elasticsearch-logging"})) - options := v1.ListOptions{LabelSelector: label.String()} - pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options) + options := metav1.ListOptions{LabelSelector: label.String()} + pods, err := f.ClientSet.Core().Pods(metav1.NamespaceSystem).List(options) Expect(err).NotTo(HaveOccurred()) for _, pod := range pods.Items { err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod) @@ -128,7 +127,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error { defer cancel() // Query against the root URL for Elasticsearch. - response := proxyRequest.Namespace(api.NamespaceSystem). + response := proxyRequest.Namespace(metav1.NamespaceSystem). Context(ctx). Name("elasticsearch-logging"). Do() @@ -168,7 +167,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error { ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) defer cancel() - body, err = proxyRequest.Namespace(api.NamespaceSystem). + body, err = proxyRequest.Namespace(metav1.NamespaceSystem). Context(ctx). Name("elasticsearch-logging"). Suffix("_cluster/health"). @@ -219,7 +218,7 @@ func getMissingLinesCountElasticsearch(f *framework.Framework, expectedCount int // Ask Elasticsearch to return all the log lines that were tagged with the // pod name. Ask for ten times as many log lines because duplication is possible. - body, err := proxyRequest.Namespace(api.NamespaceSystem). + body, err := proxyRequest.Namespace(metav1.NamespaceSystem). Context(ctx). Name("elasticsearch-logging"). Suffix("_search"). diff --git a/test/e2e/cluster_logging_utils.go b/test/e2e/cluster_logging_utils.go index e36c5eabbd2..62f7899b13f 100644 --- a/test/e2e/cluster_logging_utils.go +++ b/test/e2e/cluster_logging_utils.go @@ -23,7 +23,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/test/e2e/framework" ) @@ -74,13 +73,13 @@ func reportLogsFromFluentdPod(f *framework.Framework) error { } label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "fluentd-logging"})) - options := v1.ListOptions{LabelSelector: label.String()} - fluentdPods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options) + options := metav1.ListOptions{LabelSelector: label.String()} + fluentdPods, err := f.ClientSet.Core().Pods(metav1.NamespaceSystem).List(options) for _, fluentdPod := range fluentdPods.Items { if fluentdPod.Spec.NodeName == synthLoggerNodeName { containerName := fluentdPod.Spec.Containers[0].Name - logs, err := framework.GetPodLogs(f.ClientSet, api.NamespaceSystem, fluentdPod.Name, containerName) + logs, err := framework.GetPodLogs(f.ClientSet, metav1.NamespaceSystem, fluentdPod.Name, containerName) if err != nil { return fmt.Errorf("Failed to get logs from fluentd pod %s due to %v", fluentdPod.Name, err) } diff --git a/test/e2e/cluster_size_autoscaling.go b/test/e2e/cluster_size_autoscaling.go index 3594169a384..55ebca22954 100644 --- a/test/e2e/cluster_size_autoscaling.go +++ b/test/e2e/cluster_size_autoscaling.go @@ -107,7 +107,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { EventsLoop: for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) { By("Waiting for NotTriggerScaleUp event") - events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(v1.ListOptions{}) + events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(metav1.ListOptions{}) framework.ExpectNoError(err) for _, e := range events.Items { @@ -565,7 +565,7 @@ func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, e // WaitForClusterSize waits until the cluster size matches the given function. func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { - nodes, err := c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{ + nodes, err := c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { @@ -592,7 +592,7 @@ func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, time func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface) error { var notready []string for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) { - pods, err := c.Core().Pods(f.Namespace.Name).List(v1.ListOptions{}) + pods, err := c.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{}) if err != nil { return fmt.Errorf("failed to get pods: %v", err) } diff --git a/test/e2e/common/init_container.go b/test/e2e/common/init_container.go index 6caf7cc347d..f6072f66bdc 100644 --- a/test/e2e/common/init_container.go +++ b/test/e2e/common/init_container.go @@ -82,7 +82,7 @@ var _ = framework.KubeDescribe("InitContainer", func() { Expect(err).To(BeNil()) } startedPod := podClient.Create(pod) - w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta)) + w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta)) Expect(err).NotTo(HaveOccurred(), "error watching a pod") wr := watch.NewRecorder(w) event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodCompleted) @@ -151,7 +151,7 @@ var _ = framework.KubeDescribe("InitContainer", func() { Expect(err).To(BeNil()) } startedPod := podClient.Create(pod) - w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta)) + w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta)) Expect(err).NotTo(HaveOccurred(), "error watching a pod") wr := watch.NewRecorder(w) event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodRunning) @@ -221,7 +221,7 @@ var _ = framework.KubeDescribe("InitContainer", func() { Expect(err).To(BeNil()) } startedPod := podClient.Create(pod) - w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta)) + w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta)) Expect(err).NotTo(HaveOccurred(), "error watching a pod") wr := watch.NewRecorder(w) @@ -345,7 +345,7 @@ var _ = framework.KubeDescribe("InitContainer", func() { } startedPod := podClient.Create(pod) - w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta)) + w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta)) Expect(err).NotTo(HaveOccurred(), "error watching a pod") wr := watch.NewRecorder(w) diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go index f3507653ad5..fcb15256cc3 100644 --- a/test/e2e/common/pods.go +++ b/test/e2e/common/pods.go @@ -167,11 +167,11 @@ var _ = framework.KubeDescribe("Pods", func() { By("setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) - options := v1.ListOptions{LabelSelector: selector.String()} + options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) Expect(err).NotTo(HaveOccurred(), "failed to query for pods") Expect(len(pods.Items)).To(Equal(0)) - options = v1.ListOptions{ + options = metav1.ListOptions{ LabelSelector: selector.String(), ResourceVersion: pods.ListMeta.ResourceVersion, } @@ -183,7 +183,7 @@ var _ = framework.KubeDescribe("Pods", func() { By("verifying the pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) - options = v1.ListOptions{LabelSelector: selector.String()} + options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) Expect(err).NotTo(HaveOccurred(), "failed to query for pods") Expect(len(pods.Items)).To(Equal(1)) @@ -255,7 +255,7 @@ var _ = framework.KubeDescribe("Pods", func() { Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero()) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) - options = v1.ListOptions{LabelSelector: selector.String()} + options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) Expect(err).NotTo(HaveOccurred(), "failed to query for pods") Expect(len(pods.Items)).To(Equal(0)) @@ -288,7 +288,7 @@ var _ = framework.KubeDescribe("Pods", func() { By("verifying the pod is in kubernetes") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) - options := v1.ListOptions{LabelSelector: selector.String()} + options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) Expect(err).NotTo(HaveOccurred(), "failed to query for pods") Expect(len(pods.Items)).To(Equal(1)) @@ -303,7 +303,7 @@ var _ = framework.KubeDescribe("Pods", func() { By("verifying the updated pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) - options = v1.ListOptions{LabelSelector: selector.String()} + options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) Expect(err).NotTo(HaveOccurred(), "failed to query for pods") Expect(len(pods.Items)).To(Equal(1)) @@ -337,7 +337,7 @@ var _ = framework.KubeDescribe("Pods", func() { By("verifying the pod is in kubernetes") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) - options := v1.ListOptions{LabelSelector: selector.String()} + options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) Expect(err).NotTo(HaveOccurred(), "failed to query for pods") Expect(len(pods.Items)).To(Equal(1)) diff --git a/test/e2e/cronjob.go b/test/e2e/cronjob.go index 13f14bb4139..d759c9cd55b 100644 --- a/test/e2e/cronjob.go +++ b/test/e2e/cronjob.go @@ -68,7 +68,7 @@ var _ = framework.KubeDescribe("CronJob", func() { Expect(err).NotTo(HaveOccurred()) By("Ensuring at least two running jobs exists by listing jobs explicitly") - jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{}) + jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) activeJobs := filterActiveJobs(jobs) Expect(len(activeJobs) >= 2).To(BeTrue()) @@ -91,7 +91,7 @@ var _ = framework.KubeDescribe("CronJob", func() { Expect(err).To(HaveOccurred()) By("Ensuring no job exists by listing jobs explicitly") - jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{}) + jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(jobs.Items).To(HaveLen(0)) @@ -117,7 +117,7 @@ var _ = framework.KubeDescribe("CronJob", func() { Expect(cronJob.Status.Active).Should(HaveLen(1)) By("Ensuring exaclty one running job exists by listing jobs explicitly") - jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{}) + jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) activeJobs := filterActiveJobs(jobs) Expect(activeJobs).To(HaveLen(1)) @@ -148,7 +148,7 @@ var _ = framework.KubeDescribe("CronJob", func() { Expect(cronJob.Status.Active).Should(HaveLen(1)) By("Ensuring exaclty one running job exists by listing jobs explicitly") - jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{}) + jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) activeJobs := filterActiveJobs(jobs) Expect(activeJobs).To(HaveLen(1)) @@ -322,7 +322,7 @@ func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty boo // Wait for a job to be replaced with a new one. func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { - jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{}) + jobs, err := c.Batch().Jobs(ns).List(metav1.ListOptions{}) if err != nil { return false, err } @@ -339,7 +339,7 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error // waitForJobsAtLeast waits for at least a number of jobs to appear. func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { - jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{}) + jobs, err := c.Batch().Jobs(ns).List(metav1.ListOptions{}) if err != nil { return false, err } @@ -350,7 +350,7 @@ func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error { // waitForAnyFinishedJob waits for any completed job to appear. func waitForAnyFinishedJob(c clientset.Interface, ns string) error { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { - jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{}) + jobs, err := c.Batch().Jobs(ns).List(metav1.ListOptions{}) if err != nil { return false, err } diff --git a/test/e2e/daemon_restart.go b/test/e2e/daemon_restart.go index d1f5284cc47..990d7824f8f 100644 --- a/test/e2e/daemon_restart.go +++ b/test/e2e/daemon_restart.go @@ -21,6 +21,7 @@ import ( "strconv" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" @@ -170,7 +171,7 @@ func replacePods(pods []*v1.Pod, store cache.Store) { // getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector, // and a list of nodenames across which these containers restarted. func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) { - options := v1.ListOptions{LabelSelector: labelSelector.String()} + options := metav1.ListOptions{LabelSelector: labelSelector.String()} pods, err := c.Core().Pods(ns).List(options) framework.ExpectNoError(err) failedContainers := 0 @@ -220,12 +221,12 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() { tracker = newPodTracker() newPods, controller = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = labelSelector.String() obj, err := f.ClientSet.Core().Pods(ns).List(options) return runtime.Object(obj), err }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labelSelector.String() return f.ClientSet.Core().Pods(ns).Watch(options) }, diff --git a/test/e2e/daemon_set.go b/test/e2e/daemon_set.go index 01e969832d4..201c9eaf64c 100644 --- a/test/e2e/daemon_set.go +++ b/test/e2e/daemon_set.go @@ -59,12 +59,12 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() { var f *framework.Framework AfterEach(func() { - if daemonsets, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).List(v1.ListOptions{}); err == nil { + if daemonsets, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil { framework.Logf("daemonset: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(api.Registry.EnabledVersions()...), daemonsets)) } else { framework.Logf("unable to dump daemonsets: %v", err) } - if pods, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(v1.ListOptions{}); err == nil { + if pods, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil { framework.Logf("pods: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(api.Registry.EnabledVersions()...), pods)) } else { framework.Logf("unable to dump pods: %v", err) @@ -136,7 +136,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() { podClient := c.Core().Pods(ns) selector := labels.Set(label).AsSelector() - options := v1.ListOptions{LabelSelector: selector.String()} + options := metav1.ListOptions{LabelSelector: selector.String()} podList, err := podClient.List(options) Expect(err).NotTo(HaveOccurred()) Expect(len(podList.Items)).To(BeNumerically(">", 0)) @@ -345,7 +345,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, nodeNames []string) func() (bool, error) { return func() (bool, error) { selector := labels.Set(selector).AsSelector() - options := v1.ListOptions{LabelSelector: selector.String()} + options := metav1.ListOptions{LabelSelector: selector.String()} podList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(options) if err != nil { return false, nil @@ -374,7 +374,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, n func checkRunningOnAllNodes(f *framework.Framework, selector map[string]string) func() (bool, error) { return func() (bool, error) { - nodeList, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{}) + nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err) nodeNames := make([]string, 0) for _, node := range nodeList.Items { diff --git a/test/e2e/dashboard.go b/test/e2e/dashboard.go index 724432df872..f0558da479b 100644 --- a/test/e2e/dashboard.go +++ b/test/e2e/dashboard.go @@ -21,9 +21,9 @@ import ( "net/http" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/test/e2e/framework" testutils "k8s.io/kubernetes/test/utils" @@ -35,7 +35,7 @@ var _ = framework.KubeDescribe("Kubernetes Dashboard", func() { const ( uiServiceName = "kubernetes-dashboard" uiAppName = uiServiceName - uiNamespace = api.NamespaceSystem + uiNamespace = metav1.NamespaceSystem serverStartTimeout = 1 * time.Minute ) diff --git a/test/e2e/density.go b/test/e2e/density.go index fb366ec51a5..8a55e006d6e 100644 --- a/test/e2e/density.go +++ b/test/e2e/density.go @@ -171,7 +171,7 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC func logPodStartupStatus(c clientset.Interface, expectedPods int, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) { label := labels.SelectorFromSet(labels.Set(observedLabels)) - podStore := testutils.NewPodStore(c, v1.NamespaceAll, label, fields.Everything()) + podStore := testutils.NewPodStore(c, metav1.NamespaceAll, label, fields.Everything()) defer podStore.Stop() ticker := time.NewTicker(period) defer ticker.Stop() @@ -228,12 +228,12 @@ func runDensityTest(dtc DensityTestConfig) time.Duration { // Print some data about Pod to Node allocation By("Printing Pod to Node allocation data") - podList, err := dtc.ClientSet.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{}) + podList, err := dtc.ClientSet.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) framework.ExpectNoError(err) pausePodAllocation := make(map[string]int) systemPodAllocation := make(map[string][]string) for _, pod := range podList.Items { - if pod.Namespace == api.NamespaceSystem { + if pod.Namespace == metav1.NamespaceSystem { systemPodAllocation[pod.Spec.NodeName] = append(systemPodAllocation[pod.Spec.NodeName], pod.Name) } else { pausePodAllocation[pod.Spec.NodeName]++ @@ -565,12 +565,12 @@ var _ = framework.KubeDescribe("Density", func() { nsName := namespaces[i].Name latencyPodsStore, controller := cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String() obj, err := c.Core().Pods(nsName).List(options) return runtime.Object(obj), err }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String() return c.Core().Pods(nsName).Watch(options) }, @@ -655,7 +655,7 @@ var _ = framework.KubeDescribe("Density", func() { "involvedObject.namespace": nsName, "source": v1.DefaultSchedulerName, }.AsSelector().String() - options := v1.ListOptions{FieldSelector: selector} + options := metav1.ListOptions{FieldSelector: selector} schedEvents, err := c.Core().Events(nsName).List(options) framework.ExpectNoError(err) for k := range createTimes { diff --git a/test/e2e/deployment.go b/test/e2e/deployment.go index 3087294dff5..a1c2e567022 100644 --- a/test/e2e/deployment.go +++ b/test/e2e/deployment.go @@ -195,7 +195,7 @@ func stopDeploymentMaybeOverlap(c clientset.Interface, internalClient internalcl framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) Expect(err).NotTo(HaveOccurred()) - options := v1.ListOptions{LabelSelector: selector.String()} + options := metav1.ListOptions{LabelSelector: selector.String()} rss, err := c.Extensions().ReplicaSets(ns).List(options) Expect(err).NotTo(HaveOccurred()) // RSes may be created by overlapping deployments right after this deployment is deleted, ignore them @@ -412,11 +412,11 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { deploymentName := "test-cleanup-deployment" framework.Logf("Creating deployment %s", deploymentName) - pods, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: labels.Everything().String()}) + pods, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err) } - options := v1.ListOptions{ + options := metav1.ListOptions{ ResourceVersion: pods.ListMeta.ResourceVersion, } stopCh := make(chan struct{}) @@ -572,7 +572,7 @@ func testPausedDeployment(f *framework.Framework) { if err != nil { Expect(err).NotTo(HaveOccurred()) } - opts := v1.ListOptions{LabelSelector: selector.String()} + opts := metav1.ListOptions{LabelSelector: selector.String()} w, err := c.Extensions().ReplicaSets(ns).Watch(opts) Expect(err).NotTo(HaveOccurred()) @@ -916,7 +916,7 @@ func testDeploymentLabelAdopted(f *framework.Framework) { // All pods targeted by the deployment should contain pod-template-hash in their labels, and there should be only 3 pods selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) Expect(err).NotTo(HaveOccurred()) - options := v1.ListOptions{LabelSelector: selector.String()} + options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := c.Core().Pods(ns).List(options) Expect(err).NotTo(HaveOccurred()) err = framework.CheckPodHashLabel(pods) @@ -1163,7 +1163,7 @@ func testOverlappingDeployment(f *framework.Framework) { // Only the first deployment is synced By("Checking only the first overlapping deployment is synced") - options := v1.ListOptions{} + options := metav1.ListOptions{} rsList, err := c.Extensions().ReplicaSets(ns).List(options) Expect(err).NotTo(HaveOccurred(), "Failed listing all replica sets in namespace %s", ns) Expect(rsList.Items).To(HaveLen(int(replicas))) @@ -1365,7 +1365,7 @@ func testIterativeDeployments(f *framework.Framework) { framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) Expect(err).NotTo(HaveOccurred()) - opts := v1.ListOptions{LabelSelector: selector.String()} + opts := metav1.ListOptions{LabelSelector: selector.String()} podList, err := c.Core().Pods(ns).List(opts) Expect(err).NotTo(HaveOccurred()) if len(podList.Items) == 0 { diff --git a/test/e2e/dns.go b/test/e2e/dns.go index 7010652fff8..37bed43c009 100644 --- a/test/e2e/dns.go +++ b/test/e2e/dns.go @@ -288,9 +288,9 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames } func verifyDNSPodIsRunning(f *framework.Framework) { - systemClient := f.ClientSet.Core().Pods(api.NamespaceSystem) + systemClient := f.ClientSet.Core().Pods(metav1.NamespaceSystem) By("Waiting for DNS Service to be Running") - options := v1.ListOptions{LabelSelector: dnsServiceLabelSelector.String()} + options := metav1.ListOptions{LabelSelector: dnsServiceLabelSelector.String()} dnsPods, err := systemClient.List(options) if err != nil { framework.Failf("Failed to list all dns service pods") diff --git a/test/e2e/dns_autoscaling.go b/test/e2e/dns_autoscaling.go index c3868378cb6..04a05a541bb 100644 --- a/test/e2e/dns_autoscaling.go +++ b/test/e2e/dns_autoscaling.go @@ -26,7 +26,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -89,7 +88,7 @@ var _ = framework.KubeDescribe("DNS horizontal autoscaling", func() { By("Wait for number of running and ready kube-dns pods recover") label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName})) - _, err := framework.WaitForPodsWithLabelRunningReady(c, api.NamespaceSystem, label, originDNSReplicasCount, DNSdefaultTimeout) + _, err := framework.WaitForPodsWithLabelRunningReady(c, metav1.NamespaceSystem, label, originDNSReplicasCount, DNSdefaultTimeout) Expect(err).NotTo(HaveOccurred()) }() By("Wait for kube-dns scaled to expected number") @@ -231,7 +230,7 @@ func getScheduableCores(nodes []v1.Node) int64 { } func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) { - cm, err := c.Core().ConfigMaps(api.NamespaceSystem).Get(DNSAutoscalerLabelName, metav1.GetOptions{}) + cm, err := c.Core().ConfigMaps(metav1.NamespaceSystem).Get(DNSAutoscalerLabelName, metav1.GetOptions{}) if err != nil { return nil, err } @@ -239,7 +238,7 @@ func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) { } func deleteDNSScalingConfigMap(c clientset.Interface) error { - if err := c.Core().ConfigMaps(api.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil { + if err := c.Core().ConfigMaps(metav1.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil { return err } framework.Logf("DNS autoscaling ConfigMap deleted.") @@ -259,13 +258,13 @@ func packLinearParams(params *DNSParamsLinear) map[string]string { func packDNSScalingConfigMap(params map[string]string) *v1.ConfigMap { configMap := v1.ConfigMap{} configMap.ObjectMeta.Name = DNSAutoscalerLabelName - configMap.ObjectMeta.Namespace = api.NamespaceSystem + configMap.ObjectMeta.Namespace = metav1.NamespaceSystem configMap.Data = params return &configMap } func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) error { - _, err := c.Core().ConfigMaps(api.NamespaceSystem).Update(configMap) + _, err := c.Core().ConfigMaps(metav1.NamespaceSystem).Update(configMap) if err != nil { return err } @@ -275,8 +274,8 @@ func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) e func getDNSReplicas(c clientset.Interface) (int, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName})) - listOpts := v1.ListOptions{LabelSelector: label.String()} - deployments, err := c.Extensions().Deployments(api.NamespaceSystem).List(listOpts) + listOpts := metav1.ListOptions{LabelSelector: label.String()} + deployments, err := c.Extensions().Deployments(metav1.NamespaceSystem).List(listOpts) if err != nil { return 0, err } @@ -290,8 +289,8 @@ func getDNSReplicas(c clientset.Interface) (int, error) { func deleteDNSAutoscalerPod(c clientset.Interface) error { label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSAutoscalerLabelName})) - listOpts := v1.ListOptions{LabelSelector: label.String()} - pods, err := c.Core().Pods(api.NamespaceSystem).List(listOpts) + listOpts := metav1.ListOptions{LabelSelector: label.String()} + pods, err := c.Core().Pods(metav1.NamespaceSystem).List(listOpts) if err != nil { return err } @@ -300,7 +299,7 @@ func deleteDNSAutoscalerPod(c clientset.Interface) error { } podName := pods.Items[0].Name - if err := c.Core().Pods(api.NamespaceSystem).Delete(podName, nil); err != nil { + if err := c.Core().Pods(metav1.NamespaceSystem).Delete(podName, nil); err != nil { return err } framework.Logf("DNS autoscaling pod %v deleted.", podName) diff --git a/test/e2e/dns_configmap.go b/test/e2e/dns_configmap.go index 3421560c9eb..a2021ab9a78 100644 --- a/test/e2e/dns_configmap.go +++ b/test/e2e/dns_configmap.go @@ -69,7 +69,7 @@ var _ = framework.KubeDescribe("DNS config map", func() { func (t *dnsConfigMapTest) init() { By("Finding a DNS pod") label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"})) - options := v1.ListOptions{LabelSelector: label.String()} + options := metav1.ListOptions{LabelSelector: label.String()} pods, err := t.f.ClientSet.Core().Pods("kube-system").List(options) Expect(err).NotTo(HaveOccurred()) @@ -218,7 +218,7 @@ func (t *dnsConfigMapTest) setConfigMap(cm *v1.ConfigMap, fedMap map[string]stri cm.ObjectMeta.Namespace = t.ns cm.ObjectMeta.Name = t.name - options := v1.ListOptions{ + options := metav1.ListOptions{ FieldSelector: fields.Set{ "metadata.namespace": t.ns, "metadata.name": t.name, diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 14d9fb6ed4a..962d29016ef 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -30,6 +30,7 @@ import ( "github.com/onsi/ginkgo/reporters" "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" runtimeutils "k8s.io/apimachinery/pkg/util/runtime" utilyaml "k8s.io/apimachinery/pkg/util/yaml" @@ -110,7 +111,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // Delete any namespaces except default and kube-system. This ensures no // lingering resources are left over from a previous test run. if framework.TestContext.CleanStart { - deleted, err := framework.DeleteNamespaces(c, nil /* deleteFilter */, []string{api.NamespaceSystem, v1.NamespaceDefault, federationapi.FederationNamespaceSystem}) + deleted, err := framework.DeleteNamespaces(c, nil /* deleteFilter */, []string{metav1.NamespaceSystem, metav1.NamespaceDefault, federationapi.FederationNamespaceSystem}) if err != nil { framework.Failf("Error deleting orphaned namespaces: %v", err) } @@ -130,14 +131,14 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // test pods from running, and tests that ensure all pods are running and // ready will fail). podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout - if err := framework.WaitForPodsRunningReady(c, api.NamespaceSystem, int32(framework.TestContext.MinStartupPods), podStartupTimeout, framework.ImagePullerLabels, true); err != nil { - framework.DumpAllNamespaceInfo(c, api.NamespaceSystem) - framework.LogFailedContainers(c, api.NamespaceSystem, framework.Logf) - runKubernetesServiceTestContainer(c, v1.NamespaceDefault) + if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), podStartupTimeout, framework.ImagePullerLabels, true); err != nil { + framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem) + framework.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf) + runKubernetesServiceTestContainer(c, metav1.NamespaceDefault) framework.Failf("Error waiting for all pods to be running and ready: %v", err) } - if err := framework.WaitForPodsSuccess(c, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingTimeout); err != nil { + if err := framework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingTimeout); err != nil { // There is no guarantee that the image pulling will succeed in 3 minutes // and we don't even run the image puller on all platforms (including GKE). // We wait for it so we get an indication of failures in the logs, and to @@ -148,7 +149,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // Dump the output of the nethealth containers only once per run if framework.TestContext.DumpLogsOnFailure { framework.Logf("Dumping network health container logs from all nodes") - framework.LogContainersInPodsWithLabels(c, api.NamespaceSystem, framework.ImagePullerLabels, "nethealth", framework.Logf) + framework.LogContainersInPodsWithLabels(c, metav1.NamespaceSystem, framework.ImagePullerLabels, "nethealth", framework.Logf) } // Reference common test to make the import valid. diff --git a/test/e2e/etcd_failure.go b/test/e2e/etcd_failure.go index 20ade3ad302..8c4ac9bbe36 100644 --- a/test/e2e/etcd_failure.go +++ b/test/e2e/etcd_failure.go @@ -19,6 +19,7 @@ package e2e import ( "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/api/v1" @@ -106,7 +107,7 @@ func checkExistingRCRecovers(f *framework.Framework) { By("deleting pods from existing replication controller") framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) { - options := v1.ListOptions{LabelSelector: rcSelector.String()} + options := metav1.ListOptions{LabelSelector: rcSelector.String()} pods, err := podClient.List(options) if err != nil { framework.Logf("apiserver returned error, as expected before recovery: %v", err) @@ -125,7 +126,7 @@ func checkExistingRCRecovers(f *framework.Framework) { By("waiting for replication controller to recover") framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) { - options := v1.ListOptions{LabelSelector: rcSelector.String()} + options := metav1.ListOptions{LabelSelector: rcSelector.String()} pods, err := podClient.List(options) Expect(err).NotTo(HaveOccurred()) for _, pod := range pods.Items { diff --git a/test/e2e/events.go b/test/e2e/events.go index 26d31c40e20..057683306c2 100644 --- a/test/e2e/events.go +++ b/test/e2e/events.go @@ -75,7 +75,7 @@ var _ = framework.KubeDescribe("Events", func() { By("verifying the pod is in kubernetes") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) - options := v1.ListOptions{LabelSelector: selector.String()} + options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) Expect(len(pods.Items)).To(Equal(1)) @@ -95,7 +95,7 @@ var _ = framework.KubeDescribe("Events", func() { "involvedObject.namespace": f.Namespace.Name, "source": v1.DefaultSchedulerName, }.AsSelector().String() - options := v1.ListOptions{FieldSelector: selector} + options := metav1.ListOptions{FieldSelector: selector} events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(options) if err != nil { return false, err @@ -115,7 +115,7 @@ var _ = framework.KubeDescribe("Events", func() { "involvedObject.namespace": f.Namespace.Name, "source": "kubelet", }.AsSelector().String() - options := v1.ListOptions{FieldSelector: selector} + options := metav1.ListOptions{FieldSelector: selector} events, err = f.ClientSet.Core().Events(f.Namespace.Name).List(options) if err != nil { return false, err diff --git a/test/e2e/example_cluster_dns.go b/test/e2e/example_cluster_dns.go index e2d06e652c8..251ec6529ed 100644 --- a/test/e2e/example_cluster_dns.go +++ b/test/e2e/example_cluster_dns.go @@ -21,6 +21,7 @@ import ( "path/filepath" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" @@ -98,7 +99,7 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() { // the application itself may have not been initialized. Just query the application. for _, ns := range namespaces { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName})) - options := v1.ListOptions{LabelSelector: label.String()} + options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.Core().Pods(ns.Name).List(options) Expect(err).NotTo(HaveOccurred()) err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods) @@ -118,7 +119,7 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() { // dns error or timeout. // This code is probably unnecessary, but let's stay on the safe side. label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendPodName})) - options := v1.ListOptions{LabelSelector: label.String()} + options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.Core().Pods(namespaces[0].Name).List(options) if err != nil || pods == nil || len(pods.Items) == 0 { diff --git a/test/e2e/examples.go b/test/e2e/examples.go index 36de2ab84e5..bb1b76a1c6c 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -298,7 +298,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"})) err = wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { - podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: label.String()}) + podList, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: label.String()}) if err != nil { return false, fmt.Errorf("Unable to get list of pods in statefulset %s", label) } diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 619d2129071..a3e4874bef1 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -282,7 +282,7 @@ func (f *Framework) AfterEach() { // Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client. DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name) By(fmt.Sprintf("Dumping a list of prepulled images on each node")) - LogContainersInPodsWithLabels(f.ClientSet, api.NamespaceSystem, ImagePullerLabels, "image-puller", Logf) + LogContainersInPodsWithLabels(f.ClientSet, metav1.NamespaceSystem, ImagePullerLabels, "image-puller", Logf) } summaries := make([]TestDataSummary, 0) @@ -404,7 +404,7 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error { for { // TODO: Endpoints client should take a field selector so we // don't have to list everything. - list, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(v1.ListOptions{}) + list, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(metav1.ListOptions{}) if err != nil { return err } @@ -419,7 +419,7 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error { } } - options := v1.ListOptions{ + options := metav1.ListOptions{ FieldSelector: fields.Set{"metadata.name": serviceName}.AsSelector().String(), ResourceVersion: rv, } @@ -728,10 +728,10 @@ func filterLabels(selectors map[string]string, cli clientset.Interface, ns strin // everything manually. if len(selectors) > 0 { selector = labels.SelectorFromSet(labels.Set(selectors)) - options := v1.ListOptions{LabelSelector: selector.String()} + options := metav1.ListOptions{LabelSelector: selector.String()} pl, err = cli.Core().Pods(ns).List(options) } else { - pl, err = cli.Core().Pods(ns).List(v1.ListOptions{}) + pl, err = cli.Core().Pods(ns).List(metav1.ListOptions{}) } return pl, err } diff --git a/test/e2e/framework/kubelet_stats.go b/test/e2e/framework/kubelet_stats.go index 25a1cfcba09..04571ef8072 100644 --- a/test/e2e/framework/kubelet_stats.go +++ b/test/e2e/framework/kubelet_stats.go @@ -30,10 +30,10 @@ import ( cadvisorapi "github.com/google/cadvisor/info/v1" "github.com/prometheus/common/model" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" @@ -158,7 +158,7 @@ func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor client: c, nodesRuntimeOps: make(map[string]NodeRuntimeOperationErrorRate), } - nodes, err := m.client.Core().Nodes().List(v1.ListOptions{}) + nodes, err := m.client.Core().Nodes().List(metav1.ListOptions{}) if err != nil { Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err) } @@ -701,7 +701,7 @@ func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingI func (r *ResourceMonitor) Start() { // It should be OK to monitor unschedulable Nodes - nodes, err := r.client.Core().Nodes().List(v1.ListOptions{}) + nodes, err := r.client.Core().Nodes().List(metav1.ListOptions{}) if err != nil { Failf("ResourceMonitor: unable to get list of nodes: %v", err) } diff --git a/test/e2e/framework/metrics_util.go b/test/e2e/framework/metrics_util.go index 19f9ecdf1e8..9b94bdcb663 100644 --- a/test/e2e/framework/metrics_util.go +++ b/test/e2e/framework/metrics_util.go @@ -28,9 +28,8 @@ import ( "strings" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/metrics" @@ -325,7 +324,7 @@ func getSchedulingLatency(c clientset.Interface) (SchedulingLatency, error) { result := SchedulingLatency{} // Check if master Node is registered - nodes, err := c.Core().Nodes().List(v1.ListOptions{}) + nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) ExpectNoError(err) var data string @@ -342,7 +341,7 @@ func getSchedulingLatency(c clientset.Interface) (SchedulingLatency, error) { rawData, err := c.Core().RESTClient().Get(). Context(ctx). Prefix("proxy"). - Namespace(api.NamespaceSystem). + Namespace(metav1.NamespaceSystem). Resource("pods"). Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)). Suffix("metrics"). diff --git a/test/e2e/framework/networking_utils.go b/test/e2e/framework/networking_utils.go index 88d09d455f8..4eac6c44ebc 100644 --- a/test/e2e/framework/networking_utils.go +++ b/test/e2e/framework/networking_utils.go @@ -501,7 +501,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) { func (config *NetworkingTestConfig) cleanup() { nsClient := config.getNamespacesClient() - nsList, err := nsClient.List(v1.ListOptions{}) + nsList, err := nsClient.List(metav1.ListOptions{}) if err == nil { for _, ns := range nsList.Items { if strings.Contains(ns.Name, config.f.BaseName) && ns.Name != config.Namespace { diff --git a/test/e2e/framework/nodes_util.go b/test/e2e/framework/nodes_util.go index 3ca7e04bec4..272e37809d0 100644 --- a/test/e2e/framework/nodes_util.go +++ b/test/e2e/framework/nodes_util.go @@ -23,6 +23,7 @@ import ( "strings" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/api/v1" @@ -153,7 +154,7 @@ func CheckNodesReady(c clientset.Interface, nt time.Duration, expect int) ([]str // A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver // knows about all of the nodes. Thus, we retry the list nodes call // until we get the expected number of nodes. - nodeList, errLast = c.Core().Nodes().List(v1.ListOptions{ + nodeList, errLast = c.Core().Nodes().List(metav1.ListOptions{ FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String()}) if errLast != nil { return false, nil diff --git a/test/e2e/framework/resource_usage_gatherer.go b/test/e2e/framework/resource_usage_gatherer.go index 135b26c6cee..96f4375c2f6 100644 --- a/test/e2e/framework/resource_usage_gatherer.go +++ b/test/e2e/framework/resource_usage_gatherer.go @@ -27,8 +27,8 @@ import ( "text/tabwriter" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/util/system" ) @@ -232,7 +232,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt finished: false, }) } else { - pods, err := c.Core().Pods("kube-system").List(v1.ListOptions{}) + pods, err := c.Core().Pods("kube-system").List(metav1.ListOptions{}) if err != nil { Logf("Error while listing Pods: %v", err) return nil, err @@ -244,7 +244,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt g.containerIDs = append(g.containerIDs, containerID) } } - nodeList, err := c.Core().Nodes().List(v1.ListOptions{}) + nodeList, err := c.Core().Nodes().List(metav1.ListOptions{}) if err != nil { Logf("Error while listing Nodes: %v", err) return nil, err diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index a792234b3e0..0bc030fade8 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -552,7 +552,7 @@ func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]s label := labels.SelectorFromSet(labels.Set(j.Labels)) Logf("Waiting up to %v for %d pods to be created", timeout, replicas) for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { - options := v1.ListOptions{LabelSelector: label.String()} + options := metav1.ListOptions{LabelSelector: label.String()} pods, err := j.Client.Core().Pods(namespace).List(options) if err != nil { return nil, err @@ -1001,7 +1001,7 @@ func ValidateEndpointsOrFail(c clientset.Interface, namespace, serviceName strin i++ } - if pods, err := c.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{}); err == nil { + if pods, err := c.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}); err == nil { for _, pod := range pods.Items { Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp) } diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index b142549e743..6b721a25892 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -365,7 +365,7 @@ func SkipIfMissingResource(clientPool dynamic.ClientPool, gvr schema.GroupVersio Failf("Unexpected error getting dynamic client for %v: %v", gvr.GroupVersion(), err) } apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true} - _, err = dynamicClient.Resource(&apiResource, namespace).List(&v1.ListOptions{}) + _, err = dynamicClient.Resource(&apiResource, namespace).List(&metav1.ListOptions{}) if err != nil { // not all resources support list, so we ignore those if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) { @@ -451,7 +451,7 @@ func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[s start, badPods, desiredPods := time.Now(), []v1.Pod{}, 0 if wait.PollImmediate(30*time.Second, timeout, func() (bool, error) { - podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: successPodSelector.String()}) + podList, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: successPodSelector.String()}) if err != nil { Logf("Error getting pods in namespace %q: %v", ns, err) return false, nil @@ -520,7 +520,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, ti // checked. replicas, replicaOk := int32(0), int32(0) - rcList, err := c.Core().ReplicationControllers(ns).List(v1.ListOptions{}) + rcList, err := c.Core().ReplicationControllers(ns).List(metav1.ListOptions{}) if err != nil { Logf("Error getting replication controllers in namespace '%s': %v", ns, err) return false, nil @@ -530,7 +530,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, ti replicaOk += rc.Status.ReadyReplicas } - rsList, err := c.Extensions().ReplicaSets(ns).List(v1.ListOptions{}) + rsList, err := c.Extensions().ReplicaSets(ns).List(metav1.ListOptions{}) if err != nil { Logf("Error getting replication sets in namespace %q: %v", ns, err) return false, nil @@ -540,7 +540,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, ti replicaOk += rs.Status.ReadyReplicas } - podList, err := c.Core().Pods(ns).List(v1.ListOptions{}) + podList, err := c.Core().Pods(ns).List(metav1.ListOptions{}) if err != nil { Logf("Error getting pods in namespace '%s': %v", ns, err) return false, nil @@ -613,7 +613,7 @@ func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string } func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) { - podList, err := c.Core().Pods(ns).List(v1.ListOptions{}) + podList, err := c.Core().Pods(ns).List(metav1.ListOptions{}) if err != nil { logFunc("Error getting pods in namespace '%s': %v", ns, err) return @@ -627,7 +627,7 @@ func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm stri } func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string, logFunc func(ftm string, args ...interface{})) { - podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()}) + podList, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()}) if err != nil { logFunc("Error getting pods in namespace %q: %v", ns, err) return @@ -639,7 +639,7 @@ func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string } func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[string]string, containerSubstr string, logFunc func(ftm string, args ...interface{})) { - podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()}) + podList, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()}) if err != nil { Logf("Error getting pods in namespace %q: %v", ns, err) return @@ -654,7 +654,7 @@ func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[s // Returns the list of deleted namespaces or an error. func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) { By("Deleting namespaces") - nsList, err := c.Core().Namespaces().List(v1.ListOptions{}) + nsList, err := c.Core().Namespaces().List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) var deleted []string var wg sync.WaitGroup @@ -701,7 +701,7 @@ func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeou //Now POLL until all namespaces have been eradicated. return wait.Poll(2*time.Second, timeout, func() (bool, error) { - nsList, err := c.Core().Namespaces().List(v1.ListOptions{}) + nsList, err := c.Core().Namespaces().List(metav1.ListOptions{}) if err != nil { return false, err } @@ -715,7 +715,7 @@ func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeou } func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error { - w, err := c.Core().ServiceAccounts(ns).Watch(v1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName})) + w, err := c.Core().ServiceAccounts(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName})) if err != nil { return err } @@ -750,10 +750,10 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeou // WaitForMatchPodsCondition finds match pods based on the input ListOptions. // waits and checks if all match pods are in the given podCondition -func WaitForMatchPodsCondition(c clientset.Interface, opts v1.ListOptions, desc string, timeout time.Duration, condition podCondition) error { +func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, desc string, timeout time.Duration, condition podCondition) error { Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - pods, err := c.Core().Pods(v1.NamespaceAll).List(opts) + pods, err := c.Core().Pods(metav1.NamespaceAll).List(opts) if err != nil { return err } @@ -901,7 +901,7 @@ func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error { Logf("Waiting for terminating namespaces to be deleted...") for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) { - namespaces, err := c.Core().Namespaces().List(v1.ListOptions{}) + namespaces, err := c.Core().Namespaces().List(metav1.ListOptions{}) if err != nil { Logf("Listing namespaces failed: %v", err) continue @@ -984,7 +984,7 @@ func deleteNS(c clientset.Interface, clientPool dynamic.ClientPool, namespace st // logNamespaces logs the number of namespaces by phase // namespace is the namespace the test was operating against that failed to delete so it can be grepped in logs func logNamespaces(c clientset.Interface, namespace string) { - namespaceList, err := c.Core().Namespaces().List(v1.ListOptions{}) + namespaceList, err := c.Core().Namespaces().List(metav1.ListOptions{}) if err != nil { Logf("namespace: %v, unable to list namespaces: %v", namespace, err) return @@ -1019,7 +1019,7 @@ func logNamespace(c clientset.Interface, namespace string) { // countRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp. func countRemainingPods(c clientset.Interface, namespace string) (int, int, error) { // check for remaining pods - pods, err := c.Core().Pods(namespace).List(v1.ListOptions{}) + pods, err := c.Core().Pods(namespace).List(metav1.ListOptions{}) if err != nil { return 0, 0, err } @@ -1082,7 +1082,7 @@ func hasRemainingContent(c clientset.Interface, clientPool dynamic.ClientPool, n Logf("namespace: %s, resource: %s, ignored listing per whitelist", namespace, apiResource.Name) continue } - obj, err := dynamicClient.Resource(&apiResource, namespace).List(&v1.ListOptions{}) + obj, err := dynamicClient.Resource(&apiResource, namespace).List(&metav1.ListOptions{}) if err != nil { // not all resources support list, so we ignore those if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) { @@ -1231,7 +1231,7 @@ func waitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace, } func waitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error { - w, err := c.Core().Pods(namespace).Watch(v1.SingleObject(metav1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) + w, err := c.Core().Pods(namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) if err != nil { return err } @@ -1246,7 +1246,7 @@ func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namesp } func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error { - w, err := c.Core().Pods(namespace).Watch(v1.SingleObject(metav1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) + w, err := c.Core().Pods(namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) if err != nil { return err } @@ -1255,7 +1255,7 @@ func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, } func waitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error { - w, err := c.Core().Pods(namespace).Watch(v1.SingleObject(metav1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) + w, err := c.Core().Pods(namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) if err != nil { return err } @@ -1267,7 +1267,7 @@ func waitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace // The resourceVersion is used when Watching object changes, it tells since when we care // about changes to the pod. func WaitForPodNotPending(c clientset.Interface, ns, podName, resourceVersion string) error { - w, err := c.Core().Pods(ns).Watch(v1.SingleObject(metav1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) + w, err := c.Core().Pods(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) if err != nil { return err } @@ -1326,7 +1326,7 @@ func waitForRCPodOnNode(c clientset.Interface, ns, rcName, node string) (*v1.Pod var p *v1.Pod = nil err := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) { Logf("Waiting for pod %s to appear on node %s", rcName, node) - options := v1.ListOptions{LabelSelector: label.String()} + options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.Core().Pods(ns).List(options) if err != nil { return false, err @@ -1345,7 +1345,7 @@ func waitForRCPodOnNode(c clientset.Interface, ns, rcName, node string) (*v1.Pod // WaitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status. func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.Duration) error { - options := v1.ListOptions{FieldSelector: fields.Set{ + options := metav1.ListOptions{FieldSelector: fields.Set{ "metadata.name": name, "metadata.namespace": ns, }.AsSelector().String()} @@ -1376,7 +1376,7 @@ func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.D func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error { return wait.PollImmediate(interval, timeout, func() (bool, error) { Logf("Waiting for pod %s to disappear", podName) - options := v1.ListOptions{LabelSelector: label.String()} + options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.Core().Pods(ns).List(options) if err != nil { return false, err @@ -1440,7 +1440,7 @@ func WaitForService(c clientset.Interface, namespace, name string, exist bool, i func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error { return wait.Poll(interval, timeout, func() (bool, error) { Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum) - list, err := c.Core().Endpoints(namespace).List(v1.ListOptions{}) + list, err := c.Core().Endpoints(namespace).List(metav1.ListOptions{}) if err != nil { return false, err } @@ -1514,7 +1514,7 @@ func PodProxyResponseChecker(c clientset.Interface, ns string, label labels.Sele // reply with their own pod name. func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) { successes := 0 - options := v1.ListOptions{LabelSelector: r.label.String()} + options := metav1.ListOptions{LabelSelector: r.label.String()} currentPods, err := r.c.Core().Pods(r.ns).List(options) Expect(err).NotTo(HaveOccurred()) for i, pod := range r.pods.Items { @@ -1651,7 +1651,7 @@ func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.Po func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) { timeout := 2 * time.Minute for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { - options := v1.ListOptions{LabelSelector: label.String()} + options := metav1.ListOptions{LabelSelector: label.String()} // List the pods, making sure we observe all the replicas. pods, err := c.Core().Pods(ns).List(options) @@ -2175,11 +2175,11 @@ func RunRC(config testutils.RCConfig) error { return testutils.RunRC(config) } -type EventsLister func(opts v1.ListOptions, ns string) (*v1.EventList, error) +type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error) func DumpEventsInNamespace(eventsLister EventsLister, namespace string) { By(fmt.Sprintf("Collecting events from namespace %q.", namespace)) - events, err := eventsLister(v1.ListOptions{}, namespace) + events, err := eventsLister(metav1.ListOptions{}, namespace) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("Found %d events.", len(events.Items))) @@ -2197,7 +2197,7 @@ func DumpEventsInNamespace(eventsLister EventsLister, namespace string) { } func DumpAllNamespaceInfo(c clientset.Interface, namespace string) { - DumpEventsInNamespace(func(opts v1.ListOptions, ns string) (*v1.EventList, error) { + DumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) { return c.Core().Events(ns).List(opts) }, namespace) @@ -2206,7 +2206,7 @@ func DumpAllNamespaceInfo(c clientset.Interface, namespace string) { // 2. there are so many of them that working with them are mostly impossible // So we dump them only if the cluster is relatively small. maxNodesForDump := 20 - if nodes, err := c.Core().Nodes().List(v1.ListOptions{}); err == nil { + if nodes, err := c.Core().Nodes().List(metav1.ListOptions{}); err == nil { if len(nodes.Items) <= maxNodesForDump { dumpAllPodInfo(c) dumpAllNodeInfo(c) @@ -2232,7 +2232,7 @@ func (o byFirstTimestamp) Less(i, j int) bool { } func dumpAllPodInfo(c clientset.Interface) { - pods, err := c.Core().Pods("").List(v1.ListOptions{}) + pods, err := c.Core().Pods("").List(metav1.ListOptions{}) if err != nil { Logf("unable to fetch pod debug info: %v", err) } @@ -2241,7 +2241,7 @@ func dumpAllPodInfo(c clientset.Interface) { func dumpAllNodeInfo(c clientset.Interface) { // It should be OK to list unschedulable Nodes here. - nodes, err := c.Core().Nodes().List(v1.ListOptions{}) + nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) if err != nil { Logf("unable to fetch node list: %v", err) return @@ -2296,11 +2296,11 @@ func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event { selector := fields.Set{ "involvedObject.kind": "Node", "involvedObject.name": nodeName, - "involvedObject.namespace": v1.NamespaceAll, + "involvedObject.namespace": metav1.NamespaceAll, "source": "kubelet", }.AsSelector().String() - options := v1.ListOptions{FieldSelector: selector} - events, err := c.Core().Events(api.NamespaceSystem).List(options) + options := metav1.ListOptions{FieldSelector: selector} + events, err := c.Core().Events(metav1.NamespaceSystem).List(options) if err != nil { Logf("Unexpected error retrieving node events %v", err) return []v1.Event{} @@ -2313,7 +2313,7 @@ func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList { var nodes *v1.NodeList var err error if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { - nodes, err = c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{ + nodes, err = c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) return err == nil, nil @@ -2386,7 +2386,7 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er return wait.PollImmediate(30*time.Second, timeout, func() (bool, error) { attempt++ notSchedulable = nil - opts := v1.ListOptions{ + opts := metav1.ListOptions{ ResourceVersion: "0", FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(), } @@ -2676,7 +2676,7 @@ func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label label // Wait up to PodListTimeout for getting pods with certain label func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) { for t := time.Now(); time.Since(t) < PodListTimeout; time.Sleep(Poll) { - options := v1.ListOptions{LabelSelector: label.String()} + options := metav1.ListOptions{LabelSelector: label.String()} pods, err = c.Core().Pods(ns).List(options) Expect(err).NotTo(HaveOccurred()) if len(pods.Items) > 0 { @@ -3012,7 +3012,7 @@ func waitForReplicaSetPodsGone(c clientset.Interface, rs *extensions.ReplicaSet) return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) { selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) ExpectNoError(err) - options := v1.ListOptions{LabelSelector: selector.String()} + options := metav1.ListOptions{LabelSelector: selector.String()} if pods, err := c.Core().Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 { return true, nil } @@ -3030,7 +3030,7 @@ func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error { if err != nil { return err } - options := v1.ListOptions{LabelSelector: selector.String()} + options := metav1.ListOptions{LabelSelector: selector.String()} podList, err := c.Core().Pods(rs.Namespace).List(options) if err != nil { return err @@ -3256,7 +3256,7 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type) } - w, err := c.Extensions().Deployments(d.Namespace).Watch(v1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion})) + w, err := c.Extensions().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion})) if err != nil { return err } @@ -3380,7 +3380,7 @@ func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, exp func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - options := v1.ListOptions{LabelSelector: label.String()} + options := metav1.ListOptions{LabelSelector: label.String()} return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { pods, err := c.Core().Pods(ns).List(options) if err != nil { @@ -3450,7 +3450,7 @@ func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, r func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment) { minReadySeconds := deployment.Spec.MinReadySeconds podList, err := deploymentutil.ListPods(deployment, - func(namespace string, options v1.ListOptions) (*v1.PodList, error) { + func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { return c.Core().Pods(namespace).List(options) }) if err != nil { @@ -4038,7 +4038,7 @@ func AllNodesReady(c clientset.Interface, timeout time.Duration) error { err := wait.PollImmediate(Poll, timeout, func() (bool, error) { notReady = nil // It should be OK to list unschedulable Nodes here. - nodes, err := c.Core().Nodes().List(v1.ListOptions{}) + nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) if err != nil { return false, err } @@ -4084,7 +4084,7 @@ func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error err := wait.PollImmediate(Poll, timeout, func() (bool, error) { notReady = nil // It should be OK to list unschedulable Nodes here. - nodes, err := c.Core().Nodes().List(v1.ListOptions{ResourceVersion: "0"}) + nodes, err := c.Core().Nodes().List(metav1.ListOptions{ResourceVersion: "0"}) if err != nil { return false, err } @@ -4093,14 +4093,14 @@ func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error notReady = append(notReady, node) } } - pods, err := c.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{ResourceVersion: "0"}) + pods, err := c.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: "0"}) if err != nil { return false, err } systemPodsPerNode := make(map[string][]string) for _, pod := range pods.Items { - if pod.Namespace == api.NamespaceSystem && pod.Status.Phase == v1.PodRunning { + if pod.Namespace == metav1.NamespaceSystem && pod.Status.Phase == v1.PodRunning { if pod.Spec.NodeName != "" { systemPodsPerNode[pod.Spec.NodeName] = append(systemPodsPerNode[pod.Spec.NodeName], pod.Name) } @@ -4258,7 +4258,7 @@ func WaitForApiserverUp(c clientset.Interface) error { // By cluster size we mean number of Nodes excluding Master Node. func WaitForClusterSize(c clientset.Interface, size int, timeout time.Duration) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { - nodes, err := c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{ + nodes, err := c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { @@ -4289,7 +4289,7 @@ func GenerateMasterRegexp(prefix string) string { // waitForMasters waits until the cluster has the desired number of ready masters in it. func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { - nodes, err := c.Core().Nodes().List(v1.ListOptions{}) + nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) if err != nil { Logf("Failed to list nodes: %v", err) continue @@ -4500,7 +4500,7 @@ func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (s // kube-proxy NodePorts won't work. var nodes *v1.NodeList if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { - nodes, err = client.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{ + nodes, err = client.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) return err == nil, nil @@ -4525,7 +4525,7 @@ func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (s // ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till // none are running, otherwise it does what a synchronous scale operation would do. func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, ns string, l map[string]string, replicas uint) error { - listOpts := v1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()} + listOpts := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()} rcs, err := clientset.Core().ReplicationControllers(ns).List(listOpts) if err != nil { return err @@ -4884,7 +4884,7 @@ func UpdatePodWithRetries(client clientset.Interface, ns, name string, update fu } func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) { - pods, err := c.Core().Pods(ns).List(v1.ListOptions{}) + pods, err := c.Core().Pods(ns).List(metav1.ListOptions{}) if err != nil { return []*v1.Pod{}, err } @@ -4973,7 +4973,7 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int { timeout := 10 * time.Minute startTime := time.Now() - allPods, err := c.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{}) + allPods, err := c.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) ExpectNoError(err) // API server returns also Pods that succeeded. We need to filter them out. currentPods := make([]v1.Pod, 0, len(allPods.Items)) @@ -4988,7 +4988,7 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int { for len(currentlyNotScheduledPods) != 0 { time.Sleep(2 * time.Second) - allPods, err := c.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{}) + allPods, err := c.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) ExpectNoError(err) scheduledPods, currentlyNotScheduledPods = GetPodsScheduled(masterNodes, allPods) @@ -5004,7 +5004,7 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int { func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList) { nodes := &v1.NodeList{} masters := sets.NewString() - all, _ := c.Core().Nodes().List(v1.ListOptions{}) + all, _ := c.Core().Nodes().List(metav1.ListOptions{}) for _, n := range all.Items { if system.IsMasterNode(n.Name) { masters.Insert(n.Name) @@ -5016,7 +5016,7 @@ func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeL } func ListNamespaceEvents(c clientset.Interface, ns string) error { - ls, err := c.Core().Events(ns).List(v1.ListOptions{}) + ls, err := c.Core().Events(ns).List(metav1.ListOptions{}) if err != nil { return err } @@ -5137,7 +5137,7 @@ func getMaster(c clientset.Interface) Address { master := Address{} // Populate the internal IP. - eps, err := c.Core().Endpoints(v1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) + eps, err := c.Core().Endpoints(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) if err != nil { Failf("Failed to get kubernetes endpoints: %v", err) } diff --git a/test/e2e/garbage_collector.go b/test/e2e/garbage_collector.go index 3729a73f8a1..b8504954908 100644 --- a/test/e2e/garbage_collector.go +++ b/test/e2e/garbage_collector.go @@ -106,7 +106,7 @@ func verifyRemainingDeploymentsAndReplicaSets( deploymentNum, rsNum int, ) (bool, error) { var ret = true - rs, err := clientSet.Extensions().ReplicaSets(f.Namespace.Name).List(v1.ListOptions{}) + rs, err := clientSet.Extensions().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list rs: %v", err) } @@ -114,7 +114,7 @@ func verifyRemainingDeploymentsAndReplicaSets( ret = false By(fmt.Sprintf("expected %d rs, got %d rs", rsNum, len(rs.Items))) } - deployments, err := clientSet.Extensions().Deployments(f.Namespace.Name).List(v1.ListOptions{}) + deployments, err := clientSet.Extensions().Deployments(f.Namespace.Name).List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list deployments: %v", err) } @@ -130,7 +130,7 @@ func verifyRemainingDeploymentsAndReplicaSets( // communication with the API server fails. func verifyRemainingObjects(f *framework.Framework, clientSet clientset.Interface, rcNum, podNum int) (bool, error) { rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name) - pods, err := clientSet.Core().Pods(f.Namespace.Name).List(v1.ListOptions{}) + pods, err := clientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list pods: %v", err) } @@ -139,7 +139,7 @@ func verifyRemainingObjects(f *framework.Framework, clientSet clientset.Interfac ret = false By(fmt.Sprintf("expected %d pods, got %d pods", podNum, len(pods.Items))) } - rcs, err := rcClient.List(v1.ListOptions{}) + rcs, err := rcClient.List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list replication controllers: %v", err) } @@ -182,7 +182,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() { } // wait for rc to create some pods if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { - pods, err := podClient.List(v1.ListOptions{}) + pods, err := podClient.List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list pods: %v", err) } @@ -210,7 +210,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() { return verifyRemainingObjects(f, clientSet, 0, 0) }); err != nil { framework.Failf("failed to wait for all pods to be deleted: %v", err) - remainingPods, err := podClient.List(v1.ListOptions{}) + remainingPods, err := podClient.List(metav1.ListOptions{}) if err != nil { framework.Failf("failed to list pods post mortem: %v", err) } else { @@ -255,7 +255,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() { } By("wait for the rc to be deleted") if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { - rcs, err := rcClient.List(v1.ListOptions{}) + rcs, err := rcClient.List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list rcs: %v", err) } @@ -268,7 +268,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() { } By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods") if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { - pods, err := podClient.List(v1.ListOptions{}) + pods, err := podClient.List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list pods: %v", err) } @@ -315,7 +315,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() { } By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods") if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { - pods, err := podClient.List(v1.ListOptions{}) + pods, err := podClient.List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list pods: %v", err) } @@ -343,7 +343,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() { // wait for deployment to create some rs By("Wait for the Deployment to create new ReplicaSet") err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) { - rsList, err := rsClient.List(v1.ListOptions{}) + rsList, err := rsClient.List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list rs: %v", err) } @@ -366,7 +366,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() { }) if err == wait.ErrWaitTimeout { err = fmt.Errorf("Failed to wait for all rs to be garbage collected: %v", err) - remainingRSs, err := rsClient.List(v1.ListOptions{}) + remainingRSs, err := rsClient.List(metav1.ListOptions{}) if err != nil { framework.Failf("failed to list RSs post mortem: %v", err) } else { @@ -392,7 +392,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() { // wait for deployment to create some rs By("Wait for the Deployment to create new ReplicaSet") err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) { - rsList, err := rsClient.List(v1.ListOptions{}) + rsList, err := rsClient.List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list rs: %v", err) } @@ -415,20 +415,20 @@ var _ = framework.KubeDescribe("Garbage collector", func() { }) if err != nil { err = fmt.Errorf("Failed to wait to see if the garbage collecter mistakenly deletes the rs: %v", err) - remainingRSs, err := rsClient.List(v1.ListOptions{}) + remainingRSs, err := rsClient.List(metav1.ListOptions{}) if err != nil { framework.Failf("failed to list RSs post mortem: %v", err) } else { framework.Failf("remaining rs post mortem: %#v", remainingRSs) } - remainingDSs, err := deployClient.List(v1.ListOptions{}) + remainingDSs, err := deployClient.List(metav1.ListOptions{}) if err != nil { framework.Failf("failed to list Deployments post mortem: %v", err) } else { framework.Failf("remaining deployment's post mortem: %#v", remainingDSs) } } - rs, err := clientSet.Extensions().ReplicaSets(f.Namespace.Name).List(v1.ListOptions{}) + rs, err := clientSet.Extensions().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{}) if err != nil { framework.Failf("Failed to list ReplicaSet %v", err) } diff --git a/test/e2e/generated_clientset.go b/test/e2e/generated_clientset.go index e4fc5268bad..e63b90034f7 100644 --- a/test/e2e/generated_clientset.go +++ b/test/e2e/generated_clientset.go @@ -130,13 +130,13 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() { pod := &podCopy By("setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String() - options := v1.ListOptions{LabelSelector: selector} + options := metav1.ListOptions{LabelSelector: selector} pods, err := podClient.List(options) if err != nil { framework.Failf("Failed to query for pods: %v", err) } Expect(len(pods.Items)).To(Equal(0)) - options = v1.ListOptions{ + options = metav1.ListOptions{ LabelSelector: selector, ResourceVersion: pods.ListMeta.ResourceVersion, } @@ -152,7 +152,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() { } By("verifying the pod is in kubernetes") - options = v1.ListOptions{ + options = metav1.ListOptions{ LabelSelector: selector, ResourceVersion: pod.ResourceVersion, } @@ -180,7 +180,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() { Expect(lastPod.DeletionTimestamp).ToNot(BeNil()) Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero()) - options = v1.ListOptions{LabelSelector: selector} + options = metav1.ListOptions{LabelSelector: selector} pods, err = podClient.List(options) if err != nil { framework.Failf("Failed to list pods to verify deletion: %v", err) @@ -264,13 +264,13 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() { cronJob := newTestingCronJob(name, value) By("setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String() - options := v1.ListOptions{LabelSelector: selector} + options := metav1.ListOptions{LabelSelector: selector} cronJobs, err := cronJobClient.List(options) if err != nil { framework.Failf("Failed to query for cronJobs: %v", err) } Expect(len(cronJobs.Items)).To(Equal(0)) - options = v1.ListOptions{ + options = metav1.ListOptions{ LabelSelector: selector, ResourceVersion: cronJobs.ListMeta.ResourceVersion, } @@ -286,7 +286,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() { } By("verifying the cronJob is in kubernetes") - options = v1.ListOptions{ + options = metav1.ListOptions{ LabelSelector: selector, ResourceVersion: cronJob.ResourceVersion, } @@ -304,7 +304,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() { framework.Failf("Failed to delete cronJob: %v", err) } - options = v1.ListOptions{LabelSelector: selector} + options = metav1.ListOptions{LabelSelector: selector} cronJobs, err = cronJobClient.List(options) if err != nil { framework.Failf("Failed to list cronJobs to verify deletion: %v", err) diff --git a/test/e2e/ingress_utils.go b/test/e2e/ingress_utils.go index b35e0f3039e..f2bf1377dea 100644 --- a/test/e2e/ingress_utils.go +++ b/test/e2e/ingress_utils.go @@ -894,7 +894,7 @@ func (j *testJig) curlServiceNodePort(ns, name string, port int) { // by default, so retrieve its nodePort as well. func (j *testJig) getIngressNodePorts() []string { nodePorts := []string{} - defaultSvc, err := j.client.Core().Services(api.NamespaceSystem).Get(defaultBackendName, metav1.GetOptions{}) + defaultSvc, err := j.client.Core().Services(metav1.NamespaceSystem).Get(defaultBackendName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) nodePorts = append(nodePorts, strconv.Itoa(int(defaultSvc.Spec.Ports[0].NodePort))) @@ -948,8 +948,8 @@ func ingFromManifest(fileName string) *extensions.Ingress { } func (cont *GCEIngressController) getL7AddonUID() (string, error) { - framework.Logf("Retrieving UID from config map: %v/%v", api.NamespaceSystem, uidConfigMap) - cm, err := cont.c.Core().ConfigMaps(api.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{}) + framework.Logf("Retrieving UID from config map: %v/%v", metav1.NamespaceSystem, uidConfigMap) + cm, err := cont.c.Core().ConfigMaps(metav1.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{}) if err != nil { return "", err } @@ -1010,7 +1010,7 @@ func (cont *NginxIngressController) init() { framework.Logf("waiting for pods with label %v", rc.Spec.Selector) sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector)) framework.ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.c, cont.ns, sel)) - pods, err := cont.c.Core().Pods(cont.ns).List(v1.ListOptions{LabelSelector: sel.String()}) + pods, err := cont.c.Core().Pods(cont.ns).List(metav1.ListOptions{LabelSelector: sel.String()}) framework.ExpectNoError(err) if len(pods.Items) == 0 { framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel) diff --git a/test/e2e/job.go b/test/e2e/job.go index 2f13275808f..b01cadfb7f0 100644 --- a/test/e2e/job.go +++ b/test/e2e/job.go @@ -296,7 +296,7 @@ func deleteJob(c clientset.Interface, ns, name string) error { func waitForAllPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error { label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName})) return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) { - options := v1.ListOptions{LabelSelector: label.String()} + options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.Core().Pods(ns).List(options) if err != nil { return false, err diff --git a/test/e2e/kibana_logging.go b/test/e2e/kibana_logging.go index 6e477fab79c..bf1565cf1e6 100644 --- a/test/e2e/kibana_logging.go +++ b/test/e2e/kibana_logging.go @@ -22,8 +22,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" @@ -57,7 +55,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { // Check for the existence of the Kibana service. By("Checking the Kibana service exists.") - s := f.ClientSet.Core().Services(api.NamespaceSystem) + s := f.ClientSet.Core().Services(metav1.NamespaceSystem) // Make a few attempts to connect. This makes the test robust against // being run as the first e2e test just after the e2e cluster has been created. var err error @@ -72,8 +70,8 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { // Wait for the Kibana pod(s) to enter the running state. By("Checking to make sure the Kibana pods are running") label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue})) - options := v1.ListOptions{LabelSelector: label.String()} - pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options) + options := metav1.ListOptions{LabelSelector: label.String()} + pods, err := f.ClientSet.Core().Pods(metav1.NamespaceSystem).List(options) Expect(err).NotTo(HaveOccurred()) for _, pod := range pods.Items { err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod) @@ -94,7 +92,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { defer cancel() // Query against the root URL for Kibana. - _, err = proxyRequest.Namespace(api.NamespaceSystem). + _, err = proxyRequest.Namespace(metav1.NamespaceSystem). Context(ctx). Name("kibana-logging"). DoRaw() diff --git a/test/e2e/kubectl.go b/test/e2e/kubectl.go index 25bb502b4b2..7d4635c2d33 100644 --- a/test/e2e/kubectl.go +++ b/test/e2e/kubectl.go @@ -764,7 +764,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { // Node // It should be OK to list unschedulable Nodes here. - nodes, err := c.Core().Nodes().List(v1.ListOptions{}) + nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) node := nodes.Items[0] output = framework.RunKubectlOrDie("describe", "node", node.Name) @@ -1736,7 +1736,7 @@ func forEachReplicationController(c clientset.Interface, ns, selectorKey, select var err error for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) { label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})) - options := v1.ListOptions{LabelSelector: label.String()} + options := metav1.ListOptions{LabelSelector: label.String()} rcs, err = c.Core().ReplicationControllers(ns).List(options) Expect(err).NotTo(HaveOccurred()) if len(rcs.Items) > 0 { diff --git a/test/e2e/kubelet_perf.go b/test/e2e/kubelet_perf.go index 069c5122906..5dd174bd3e7 100644 --- a/test/e2e/kubelet_perf.go +++ b/test/e2e/kubelet_perf.go @@ -21,8 +21,8 @@ import ( "strings" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/util/uuid" @@ -201,7 +201,7 @@ var _ = framework.KubeDescribe("Kubelet [Serial] [Slow]", func() { // Wait until image prepull pod has completed so that they wouldn't // affect the runtime cpu usage. Fail the test if prepulling cannot // finish in time. - if err := framework.WaitForPodsSuccess(f.ClientSet, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil { + if err := framework.WaitForPodsSuccess(f.ClientSet, metav1.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil { framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adultrated", imagePrePullingLongTimeout) } nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) diff --git a/test/e2e/load.go b/test/e2e/load.go index 569be07e89d..2939934728d 100644 --- a/test/e2e/load.go +++ b/test/e2e/load.go @@ -484,7 +484,7 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling fmt.Sprintf("scaling rc %s for the first time", config.GetName())) selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.GetName()})) - options := v1.ListOptions{ + options := metav1.ListOptions{ LabelSelector: selector.String(), ResourceVersion: "0", } diff --git a/test/e2e/mesos.go b/test/e2e/mesos.go index f00dbe754f5..09b87d8f190 100644 --- a/test/e2e/mesos.go +++ b/test/e2e/mesos.go @@ -45,7 +45,7 @@ var _ = framework.KubeDescribe("Mesos", func() { nodeClient := f.ClientSet.Core().Nodes() rackA := labels.SelectorFromSet(map[string]string{"k8s.mesosphere.io/attribute-rack": "1"}) - options := v1.ListOptions{LabelSelector: rackA.String()} + options := metav1.ListOptions{LabelSelector: rackA.String()} nodes, err := nodeClient.List(options) if err != nil { framework.Failf("Failed to query for node: %v", err) @@ -110,7 +110,7 @@ var _ = framework.KubeDescribe("Mesos", func() { rack2 := labels.SelectorFromSet(map[string]string{ "k8s.mesosphere.io/attribute-rack": "2", }) - options := v1.ListOptions{LabelSelector: rack2.String()} + options := metav1.ListOptions{LabelSelector: rack2.String()} nodes, err := nodeClient.List(options) framework.ExpectNoError(err) diff --git a/test/e2e/metrics_grabber_test.go b/test/e2e/metrics_grabber_test.go index ab3804a2e95..60d4af088e0 100644 --- a/test/e2e/metrics_grabber_test.go +++ b/test/e2e/metrics_grabber_test.go @@ -19,7 +19,7 @@ package e2e import ( "strings" - "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/metrics" "k8s.io/kubernetes/test/e2e/framework" @@ -59,7 +59,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() { It("should grab all metrics from a Scheduler.", func() { By("Proxying to Pod through the API server") // Check if master Node is registered - nodes, err := c.Core().Nodes().List(v1.ListOptions{}) + nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err) var masterRegistered = false @@ -80,7 +80,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() { It("should grab all metrics from a ControllerManager.", func() { By("Proxying to Pod through the API server") // Check if master Node is registered - nodes, err := c.Core().Nodes().List(v1.ListOptions{}) + nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err) var masterRegistered = false diff --git a/test/e2e/monitoring.go b/test/e2e/monitoring.go index 3d963f01588..0d0f2c9710d 100644 --- a/test/e2e/monitoring.go +++ b/test/e2e/monitoring.go @@ -24,9 +24,8 @@ import ( "time" influxdb "github.com/influxdata/influxdb/client" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/test/e2e/framework" @@ -110,16 +109,16 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, // is running (which would be an error except during a rolling update). for _, rcLabel := range rcLabels { selector := labels.Set{"k8s-app": rcLabel}.AsSelector() - options := v1.ListOptions{LabelSelector: selector.String()} - deploymentList, err := c.Extensions().Deployments(api.NamespaceSystem).List(options) + options := metav1.ListOptions{LabelSelector: selector.String()} + deploymentList, err := c.Extensions().Deployments(metav1.NamespaceSystem).List(options) if err != nil { return nil, err } - rcList, err := c.Core().ReplicationControllers(api.NamespaceSystem).List(options) + rcList, err := c.Core().ReplicationControllers(metav1.NamespaceSystem).List(options) if err != nil { return nil, err } - psList, err := c.Apps().StatefulSets(api.NamespaceSystem).List(options) + psList, err := c.Apps().StatefulSets(metav1.NamespaceSystem).List(options) if err != nil { return nil, err } @@ -130,8 +129,8 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, // Check all the replication controllers. for _, rc := range rcList.Items { selector := labels.Set(rc.Spec.Selector).AsSelector() - options := v1.ListOptions{LabelSelector: selector.String()} - podList, err := c.Core().Pods(api.NamespaceSystem).List(options) + options := metav1.ListOptions{LabelSelector: selector.String()} + podList, err := c.Core().Pods(metav1.NamespaceSystem).List(options) if err != nil { return nil, err } @@ -145,8 +144,8 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, // Do the same for all deployments. for _, rc := range deploymentList.Items { selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector() - options := v1.ListOptions{LabelSelector: selector.String()} - podList, err := c.Core().Pods(api.NamespaceSystem).List(options) + options := metav1.ListOptions{LabelSelector: selector.String()} + podList, err := c.Core().Pods(metav1.NamespaceSystem).List(options) if err != nil { return nil, err } @@ -160,8 +159,8 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, // And for pet sets. for _, ps := range psList.Items { selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector() - options := v1.ListOptions{LabelSelector: selector.String()} - podList, err := c.Core().Pods(api.NamespaceSystem).List(options) + options := metav1.ListOptions{LabelSelector: selector.String()} + podList, err := c.Core().Pods(metav1.NamespaceSystem).List(options) if err != nil { return nil, err } @@ -177,7 +176,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, } func expectedServicesExist(c clientset.Interface) error { - serviceList, err := c.Core().Services(api.NamespaceSystem).List(v1.ListOptions{}) + serviceList, err := c.Core().Services(metav1.NamespaceSystem).List(metav1.ListOptions{}) if err != nil { return err } @@ -196,7 +195,7 @@ func expectedServicesExist(c clientset.Interface) error { func getAllNodesInCluster(c clientset.Interface) ([]string, error) { // It should be OK to list unschedulable Nodes here. - nodeList, err := c.Core().Nodes().List(v1.ListOptions{}) + nodeList, err := c.Core().Nodes().List(metav1.ListOptions{}) if err != nil { return nil, err } @@ -290,8 +289,8 @@ func testMonitoringUsingHeapsterInfluxdb(c clientset.Interface) { func printDebugInfo(c clientset.Interface) { set := labels.Set{"k8s-app": "heapster"} - options := v1.ListOptions{LabelSelector: set.AsSelector().String()} - podList, err := c.Core().Pods(api.NamespaceSystem).List(options) + options := metav1.ListOptions{LabelSelector: set.AsSelector().String()} + podList, err := c.Core().Pods(metav1.NamespaceSystem).List(options) if err != nil { framework.Logf("Error while listing pods %v", err) return diff --git a/test/e2e/namespace.go b/test/e2e/namespace.go index bd1686c7be9..2e609d31eb0 100644 --- a/test/e2e/namespace.go +++ b/test/e2e/namespace.go @@ -61,7 +61,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second, func() (bool, error) { var cnt = 0 - nsList, err := f.ClientSet.Core().Namespaces().List(v1.ListOptions{}) + nsList, err := f.ClientSet.Core().Namespaces().List(metav1.ListOptions{}) if err != nil { return false, err } diff --git a/test/e2e/network_partition.go b/test/e2e/network_partition.go index f4172d9bed8..8d4c07b5d2e 100644 --- a/test/e2e/network_partition.go +++ b/test/e2e/network_partition.go @@ -160,16 +160,16 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() { It("All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+ "AND all pods should be mark back to Ready when the node get back to Ready before pod eviction timeout", func() { By("choose a node - we will block all network traffic on this node") - var podOpts v1.ListOptions - nodeOpts := v1.ListOptions{} + var podOpts metav1.ListOptions + nodeOpts := metav1.ListOptions{} nodes, err := c.Core().Nodes().List(nodeOpts) Expect(err).NotTo(HaveOccurred()) framework.FilterNodes(nodes, func(node v1.Node) bool { if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) { return false } - podOpts = v1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} - pods, err := c.Core().Pods(v1.NamespaceAll).List(podOpts) + podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} + pods, err := c.Core().Pods(metav1.NamespaceAll).List(podOpts) if err != nil || len(pods.Items) <= 0 { return false } @@ -179,7 +179,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() { framework.Failf("No eligible node were found: %d", len(nodes.Items)) } node := nodes.Items[0] - podOpts = v1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} + podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil { framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) } @@ -191,12 +191,12 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() { var controller cache.Controller _, controller = cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = nodeSelector.String() obj, err := f.ClientSet.Core().Nodes().List(options) return runtime.Object(obj), err }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = nodeSelector.String() return f.ClientSet.Core().Nodes().Watch(options) }, @@ -264,7 +264,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() { By("choose a node with at least one pod - we will block some network traffic on this node") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - options := v1.ListOptions{LabelSelector: label.String()} + options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled Expect(err).NotTo(HaveOccurred()) nodeName := pods.Items[0].Spec.NodeName @@ -329,7 +329,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() { By("choose a node with at least one pod - we will block some network traffic on this node") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - options := v1.ListOptions{LabelSelector: label.String()} + options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled Expect(err).NotTo(HaveOccurred()) nodeName := pods.Items[0].Spec.NodeName @@ -449,7 +449,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() { Expect(err).NotTo(HaveOccurred()) By("choose a node with at least one pod - we will block some network traffic on this node") - options := v1.ListOptions{LabelSelector: label.String()} + options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled Expect(err).NotTo(HaveOccurred()) nodeName := pods.Items[0].Spec.NodeName diff --git a/test/e2e/node_problem_detector.go b/test/e2e/node_problem_detector.go index cfd8bca8052..3477dff1cec 100644 --- a/test/e2e/node_problem_detector.go +++ b/test/e2e/node_problem_detector.go @@ -59,7 +59,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() { name = "node-problem-detector-" + uid configName = "node-problem-detector-config-" + uid // There is no namespace for Node, event recorder will set default namespace for node events. - eventNamespace = v1.NamespaceDefault + eventNamespace = metav1.NamespaceDefault // this test wants extra permissions. Since the namespace names are unique, we can leave this // lying around so we don't have to race any caches @@ -104,7 +104,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() { ) var source, config, tmpDir string var node *v1.Node - var eventListOptions v1.ListOptions + var eventListOptions metav1.ListOptions injectCommand := func(timestamp time.Time, log string, num int) string { var commands []string for i := 0; i < num; i++ { @@ -147,7 +147,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() { ] }` By("Get a non master node to run the pod") - nodes, err := c.Core().Nodes().List(v1.ListOptions{}) + nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) node = nil for _, n := range nodes.Items { @@ -161,10 +161,10 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() { selector := fields.Set{ "involvedObject.kind": "Node", "involvedObject.name": node.Name, - "involvedObject.namespace": v1.NamespaceAll, + "involvedObject.namespace": metav1.NamespaceAll, "source": source, }.AsSelector().String() - eventListOptions = v1.ListOptions{FieldSelector: selector} + eventListOptions = metav1.ListOptions{FieldSelector: selector} By("Create the test log file") tmpDir = "/tmp/" + name cmd := fmt.Sprintf("mkdir %s; > %s/%s", tmpDir, tmpDir, logFile) @@ -393,7 +393,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() { }) // verifyEvents verifies there are num specific events generated -func verifyEvents(e coreclientset.EventInterface, options v1.ListOptions, num int, reason, message string) error { +func verifyEvents(e coreclientset.EventInterface, options metav1.ListOptions, num int, reason, message string) error { events, err := e.List(options) if err != nil { return err @@ -412,7 +412,7 @@ func verifyEvents(e coreclientset.EventInterface, options v1.ListOptions, num in } // verifyNoEvents verifies there is no event generated -func verifyNoEvents(e coreclientset.EventInterface, options v1.ListOptions) error { +func verifyNoEvents(e coreclientset.EventInterface, options metav1.ListOptions) error { events, err := e.List(options) if err != nil { return err diff --git a/test/e2e/nodeoutofdisk.go b/test/e2e/nodeoutofdisk.go index 089790777ee..a8d08d89e16 100644 --- a/test/e2e/nodeoutofdisk.go +++ b/test/e2e/nodeoutofdisk.go @@ -140,7 +140,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu "source": v1.DefaultSchedulerName, "reason": "FailedScheduling", }.AsSelector().String() - options := v1.ListOptions{FieldSelector: selector} + options := metav1.ListOptions{FieldSelector: selector} schedEvents, err := c.Core().Events(ns).List(options) framework.ExpectNoError(err) @@ -199,10 +199,10 @@ func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64) // availCpu calculates the available CPU on a given node by subtracting the CPU requested by // all the pods from the total available CPU capacity on the node. func availCpu(c clientset.Interface, node *v1.Node) (int64, error) { - podClient := c.Core().Pods(v1.NamespaceAll) + podClient := c.Core().Pods(metav1.NamespaceAll) selector := fields.Set{"spec.nodeName": node.Name}.AsSelector().String() - options := v1.ListOptions{FieldSelector: selector} + options := metav1.ListOptions{FieldSelector: selector} pods, err := podClient.List(options) if err != nil { return 0, fmt.Errorf("failed to retrieve all the pods on node %s: %v", node.Name, err) diff --git a/test/e2e/opaque_resource.go b/test/e2e/opaque_resource.go index e3ead8a9cd4..8d63acd7cf7 100644 --- a/test/e2e/opaque_resource.go +++ b/test/e2e/opaque_resource.go @@ -46,7 +46,7 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun BeforeEach(func() { if node == nil { // Priming invocation; select the first non-master node. - nodes, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{}) + nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) for _, n := range nodes.Items { if !system.IsMasterNode(n.Name) { @@ -275,12 +275,12 @@ func observeNodeUpdateAfterAction(f *framework.Framework, nodeName string, nodeP _, controller := cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = nodeSelector.String() ls, err := f.ClientSet.Core().Nodes().List(options) return ls, err }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = nodeSelector.String() w, err := f.ClientSet.Core().Nodes().Watch(options) // Signal parent goroutine that watching has begun. @@ -331,11 +331,11 @@ func observeEventAfterAction(f *framework.Framework, eventPredicate func(*v1.Eve // Create an informer to list/watch events from the test framework namespace. _, controller := cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { ls, err := f.ClientSet.Core().Events(f.Namespace.Name).List(options) return ls, err }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { w, err := f.ClientSet.Core().Events(f.Namespace.Name).Watch(options) return w, err }, diff --git a/test/e2e/persistent_volumes-disruptive.go b/test/e2e/persistent_volumes-disruptive.go index 2ef8fff6c8f..b7ad48e3a39 100644 --- a/test/e2e/persistent_volumes-disruptive.go +++ b/test/e2e/persistent_volumes-disruptive.go @@ -60,7 +60,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Disruptive]", func() { ) nfsServerConfig := VolumeTestConfig{ - namespace: v1.NamespaceDefault, + namespace: metav1.NamespaceDefault, prefix: "nfs", serverImage: NfsServerImage, serverPorts: []int{2049}, diff --git a/test/e2e/persistent_volumes.go b/test/e2e/persistent_volumes.go index 23e452eb1c7..b492ece97a7 100644 --- a/test/e2e/persistent_volumes.go +++ b/test/e2e/persistent_volumes.go @@ -507,7 +507,7 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() { // config for the nfs-server pod in the default namespace NFSconfig = VolumeTestConfig{ - namespace: v1.NamespaceDefault, + namespace: metav1.NamespaceDefault, prefix: "nfs", serverImage: NfsServerImage, serverPorts: []int{2049}, diff --git a/test/e2e/pod_gc.go b/test/e2e/pod_gc.go index c6b1c84f691..796e92d53b2 100644 --- a/test/e2e/pod_gc.go +++ b/test/e2e/pod_gc.go @@ -62,7 +62,7 @@ var _ = framework.KubeDescribe("Pod garbage collector [Feature:PodGarbageCollect By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold)) pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) { - pods, err = f.ClientSet.Core().Pods(f.Namespace.Name).List(v1.ListOptions{}) + pods, err = f.ClientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{}) if err != nil { framework.Logf("Failed to list pod %v", err) return false, nil diff --git a/test/e2e/pods.go b/test/e2e/pods.go index 26e209cc470..09f0d5c816b 100644 --- a/test/e2e/pods.go +++ b/test/e2e/pods.go @@ -69,11 +69,11 @@ var _ = framework.KubeDescribe("Pods Extended", func() { By("setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) - options := v1.ListOptions{LabelSelector: selector.String()} + options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) Expect(err).NotTo(HaveOccurred(), "failed to query for pod") Expect(len(pods.Items)).To(Equal(0)) - options = v1.ListOptions{ + options = metav1.ListOptions{ LabelSelector: selector.String(), ResourceVersion: pods.ListMeta.ResourceVersion, } @@ -85,7 +85,7 @@ var _ = framework.KubeDescribe("Pods Extended", func() { By("verifying the pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) - options = v1.ListOptions{LabelSelector: selector.String()} + options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) Expect(err).NotTo(HaveOccurred(), "failed to query for pod") Expect(len(pods.Items)).To(Equal(1)) @@ -184,7 +184,7 @@ var _ = framework.KubeDescribe("Pods Extended", func() { Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero()) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) - options = v1.ListOptions{LabelSelector: selector.String()} + options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) Expect(err).NotTo(HaveOccurred(), "failed to query for pods") Expect(len(pods.Items)).To(Equal(0)) diff --git a/test/e2e/reboot.go b/test/e2e/reboot.go index 73de148acb2..6980a9ae7fe 100644 --- a/test/e2e/reboot.go +++ b/test/e2e/reboot.go @@ -65,9 +65,9 @@ var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { if CurrentGinkgoTestDescription().Failed { // Most of the reboot tests just make sure that addon/system pods are running, so dump // events for the kube-system namespace on failures - namespaceName := api.NamespaceSystem + namespaceName := metav1.NamespaceSystem By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName)) - events, err := f.ClientSet.Core().Events(namespaceName).List(v1.ListOptions{}) + events, err := f.ClientSet.Core().Events(namespaceName).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) for _, e := range events.Items { @@ -218,7 +218,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName // failed step, it will return false through result and not run the rest. func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { // Setup - ns := api.NamespaceSystem + ns := metav1.NamespaceSystem ps := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name)) defer ps.Stop() diff --git a/test/e2e/rescheduler.go b/test/e2e/rescheduler.go index 7b524d4ce13..48af919dd22 100644 --- a/test/e2e/rescheduler.go +++ b/test/e2e/rescheduler.go @@ -20,8 +20,8 @@ import ( "fmt" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/test/e2e/framework" testutils "k8s.io/kubernetes/test/utils" @@ -55,16 +55,16 @@ var _ = framework.KubeDescribe("Rescheduler [Serial]", func() { By("creating a new instance of Dashboard and waiting for Dashboard to be scheduled") label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kubernetes-dashboard"})) - listOpts := v1.ListOptions{LabelSelector: label.String()} - deployments, err := f.ClientSet.Extensions().Deployments(api.NamespaceSystem).List(listOpts) + listOpts := metav1.ListOptions{LabelSelector: label.String()} + deployments, err := f.ClientSet.Extensions().Deployments(metav1.NamespaceSystem).List(listOpts) framework.ExpectNoError(err) Expect(len(deployments.Items)).Should(Equal(1)) deployment := deployments.Items[0] replicas := uint(*(deployment.Spec.Replicas)) - err = framework.ScaleDeployment(f.ClientSet, f.InternalClientset, api.NamespaceSystem, deployment.Name, replicas+1, true) - defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.InternalClientset, api.NamespaceSystem, deployment.Name, replicas, true)) + err = framework.ScaleDeployment(f.ClientSet, f.InternalClientset, metav1.NamespaceSystem, deployment.Name, replicas+1, true) + defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.InternalClientset, metav1.NamespaceSystem, deployment.Name, replicas, true)) framework.ExpectNoError(err) }) diff --git a/test/e2e/resize_nodes.go b/test/e2e/resize_nodes.go index 9e8fdd2a3c7..e15841cd074 100644 --- a/test/e2e/resize_nodes.go +++ b/test/e2e/resize_nodes.go @@ -24,7 +24,6 @@ import ( "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/util/intstr" @@ -238,10 +237,10 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { // Many e2e tests assume that the cluster is fully healthy before they start. Wait until // the cluster is restored to health. By("waiting for system pods to successfully restart") - err := framework.WaitForPodsRunningReady(c, api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, ignoreLabels, true) + err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, ignoreLabels, true) Expect(err).NotTo(HaveOccurred()) By("waiting for image prepulling pods to complete") - framework.WaitForPodsSuccess(c, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingTimeout) + framework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingTimeout) }) It("should be able to delete nodes", func() { diff --git a/test/e2e/resource_quota.go b/test/e2e/resource_quota.go index 4e5f07be17b..d18f14886b1 100644 --- a/test/e2e/resource_quota.go +++ b/test/e2e/resource_quota.go @@ -93,7 +93,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { It("should create a ResourceQuota and capture the life of a secret.", func() { By("Discovering how many secrets are in namespace by default") - secrets, err := f.ClientSet.Core().Secrets(f.Namespace.Name).List(v1.ListOptions{}) + secrets, err := f.ClientSet.Core().Secrets(f.Namespace.Name).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) defaultSecrets := fmt.Sprintf("%d", len(secrets.Items)) hardSecrets := fmt.Sprintf("%d", len(secrets.Items)+1) diff --git a/test/e2e/restart.go b/test/e2e/restart.go index ce8f973fdd4..a710e68d19c 100644 --- a/test/e2e/restart.go +++ b/test/e2e/restart.go @@ -24,7 +24,6 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" kubepod "k8s.io/kubernetes/pkg/kubelet/pod" "k8s.io/kubernetes/test/e2e/framework" @@ -65,7 +64,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() { // check must be identical to that call. framework.SkipUnlessProviderIs("gce", "gke") - ps = testutils.NewPodStore(f.ClientSet, api.NamespaceSystem, labels.Everything(), fields.Everything()) + ps = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything()) }) AfterEach(func() { @@ -90,7 +89,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() { for i, p := range pods { podNamesBefore[i] = p.ObjectMeta.Name } - ns := api.NamespaceSystem + ns := metav1.NamespaceSystem if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, ns, podNamesBefore, framework.PodReadyBeforeTimeout) { framework.Failf("At least one pod wasn't running and ready or succeeded at test start.") } diff --git a/test/e2e/scheduler_predicates.go b/test/e2e/scheduler_predicates.go index f69797d8bcd..2800540bdc2 100644 --- a/test/e2e/scheduler_predicates.go +++ b/test/e2e/scheduler_predicates.go @@ -23,7 +23,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -91,7 +90,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { } } - err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels, true) + err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels, true) Expect(err).NotTo(HaveOccurred()) for _, node := range nodeList.Items { @@ -158,7 +157,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { } framework.WaitForStableCluster(cs, masterNodes) - pods, err := cs.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{}) + pods, err := cs.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) framework.ExpectNoError(err) for _, pod := range pods.Items { _, found := nodeToCapacityMap[pod.Spec.NodeName] @@ -506,7 +505,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { By("Launching two pods on two distinct nodes to get two node names") CreateHostPortPods(f, "host-port", 2, true) defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, "host-port") - podList, err := cs.Core().Pods(ns).List(v1.ListOptions{}) + podList, err := cs.Core().Pods(ns).List(metav1.ListOptions{}) framework.ExpectNoError(err) Expect(len(podList.Items)).To(Equal(2)) nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName} @@ -917,7 +916,7 @@ func waitForScheduler() { // TODO: upgrade calls in PodAffinity tests when we're able to run them func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) { - allPods, err := c.Core().Pods(ns).List(v1.ListOptions{}) + allPods, err := c.Core().Pods(ns).List(metav1.ListOptions{}) framework.ExpectNoError(err) scheduledPods, notScheduledPods := framework.GetPodsScheduled(masterNodes, allPods) diff --git a/test/e2e/service.go b/test/e2e/service.go index d04d7fc2589..25fd6b3f5dd 100644 --- a/test/e2e/service.go +++ b/test/e2e/service.go @@ -66,7 +66,7 @@ var _ = framework.KubeDescribe("Services", func() { // TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here. It("should provide secure master service [Conformance]", func() { - _, err := cs.Core().Services(v1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) + _, err := cs.Core().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) }) @@ -1137,7 +1137,7 @@ var _ = framework.KubeDescribe("Services", func() { By("Remove pods immediately") label := labels.SelectorFromSet(labels.Set(t.Labels)) - options := v1.ListOptions{LabelSelector: label.String()} + options := metav1.ListOptions{LabelSelector: label.String()} podClient := t.Client.Core().Pods(f.Namespace.Name) pods, err := podClient.List(options) if err != nil { diff --git a/test/e2e/service_latency.go b/test/e2e/service_latency.go index 2a40459afde..d61fc9b5989 100644 --- a/test/e2e/service_latency.go +++ b/test/e2e/service_latency.go @@ -279,11 +279,11 @@ func (eq *endpointQueries) added(e *v1.Endpoints) { func startEndpointWatcher(f *framework.Framework, q *endpointQueries) { _, controller := cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { obj, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(options) return runtime.Object(obj), err }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options) }, }, diff --git a/test/e2e/serviceloadbalancers.go b/test/e2e/serviceloadbalancers.go index 8d1688ac2eb..6192c34c9b0 100644 --- a/test/e2e/serviceloadbalancers.go +++ b/test/e2e/serviceloadbalancers.go @@ -20,6 +20,7 @@ import ( "fmt" "net/http" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -112,7 +113,7 @@ func (h *haproxyControllerTester) start(namespace string) (err error) { // Find the pods of the rc we just created. labelSelector := labels.SelectorFromSet( labels.Set(map[string]string{"name": h.rcName})) - options := v1.ListOptions{LabelSelector: labelSelector.String()} + options := metav1.ListOptions{LabelSelector: labelSelector.String()} pods, err := h.client.Core().Pods(h.rcNamespace).List(options) if err != nil { return err diff --git a/test/e2e/statefulset.go b/test/e2e/statefulset.go index 3b439e44e10..538b0fd5979 100644 --- a/test/e2e/statefulset.go +++ b/test/e2e/statefulset.go @@ -242,7 +242,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() { expectedPodName := ss.Name + "-1" expectedPod, err := f.ClientSet.Core().Pods(ns).Get(expectedPodName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - watcher, err := f.ClientSet.Core().Pods(ns).Watch(v1.SingleObject( + watcher, err := f.ClientSet.Core().Pods(ns).Watch(metav1.SingleObject( metav1.ObjectMeta{ Name: expectedPod.Name, ResourceVersion: expectedPod.ResourceVersion, @@ -273,7 +273,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() { It("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", func() { psLabels := klabels.Set(labels) By("Initializing watcher for selector " + psLabels.String()) - watcher, err := f.ClientSet.Core().Pods(ns).Watch(v1.ListOptions{ + watcher, err := f.ClientSet.Core().Pods(ns).Watch(metav1.ListOptions{ LabelSelector: psLabels.AsSelector().String(), }) Expect(err).NotTo(HaveOccurred()) @@ -317,7 +317,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() { Expect(err).NotTo(HaveOccurred()) By("Scale down will halt with unhealthy stateful pod") - watcher, err = f.ClientSet.Core().Pods(ns).Watch(v1.ListOptions{ + watcher, err = f.ClientSet.Core().Pods(ns).Watch(metav1.ListOptions{ LabelSelector: psLabels.AsSelector().String(), }) Expect(err).NotTo(HaveOccurred()) @@ -389,7 +389,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() { var initialStatefulPodUID types.UID By("Waiting until stateful pod " + statefulPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name) - w, err := f.ClientSet.Core().Pods(f.Namespace.Name).Watch(v1.SingleObject(metav1.ObjectMeta{Name: statefulPodName})) + w, err := f.ClientSet.Core().Pods(f.Namespace.Name).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName})) framework.ExpectNoError(err) // we need to get UID from pod in any state and wait until stateful set controller will remove pod atleast once _, err = watch.Until(statefulPodTimeout, w, func(event watch.Event) (bool, error) { @@ -472,7 +472,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() { }) func dumpDebugInfo(c clientset.Interface, ns string) { - sl, _ := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: labels.Everything().String()}) + sl, _ := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) for _, s := range sl.Items { desc, _ := framework.RunKubectl("describe", "po", s.Name, fmt.Sprintf("--namespace=%v", ns)) framework.Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc) @@ -849,7 +849,7 @@ func (s *statefulSetTester) update(ns, name string, update func(ss *apps.Statefu func (s *statefulSetTester) getPodList(ss *apps.StatefulSet) *v1.PodList { selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector) framework.ExpectNoError(err) - podList, err := s.c.Core().Pods(ss.Namespace).List(v1.ListOptions{LabelSelector: selector.String()}) + podList, err := s.c.Core().Pods(ss.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) framework.ExpectNoError(err) return podList } @@ -965,7 +965,7 @@ func (s *statefulSetTester) waitForStatus(ss *apps.StatefulSet, expectedReplicas func deleteAllStatefulSets(c clientset.Interface, ns string) { sst := &statefulSetTester{c: c} - ssList, err := c.Apps().StatefulSets(ns).List(v1.ListOptions{LabelSelector: labels.Everything().String()}) + ssList, err := c.Apps().StatefulSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) framework.ExpectNoError(err) // Scale down each statefulset, then delete it completely. @@ -987,7 +987,7 @@ func deleteAllStatefulSets(c clientset.Interface, ns string) { pvNames := sets.NewString() // TODO: Don't assume all pvcs in the ns belong to a statefulset pvcPollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { - pvcList, err := c.Core().PersistentVolumeClaims(ns).List(v1.ListOptions{LabelSelector: labels.Everything().String()}) + pvcList, err := c.Core().PersistentVolumeClaims(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { framework.Logf("WARNING: Failed to list pvcs, retrying %v", err) return false, nil @@ -1007,7 +1007,7 @@ func deleteAllStatefulSets(c clientset.Interface, ns string) { } pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { - pvList, err := c.Core().PersistentVolumes().List(v1.ListOptions{LabelSelector: labels.Everything().String()}) + pvList, err := c.Core().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { framework.Logf("WARNING: Failed to list pvs, retrying %v", err) return false, nil diff --git a/test/e2e/ubernetes_lite.go b/test/e2e/ubernetes_lite.go index 3d17444247d..0b5933aeaf2 100644 --- a/test/e2e/ubernetes_lite.go +++ b/test/e2e/ubernetes_lite.go @@ -127,7 +127,7 @@ func getZoneNameForNode(node v1.Node) (string, error) { // Find the names of all zones in which we have nodes in this cluster. func getZoneNames(c clientset.Interface) ([]string, error) { zoneNames := sets.NewString() - nodes, err := c.Core().Nodes().List(v1.ListOptions{}) + nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) if err != nil { return nil, err } diff --git a/test/e2e_federation/federated-daemonset.go b/test/e2e_federation/federated-daemonset.go index d7ace94792c..aded799d9b1 100644 --- a/test/e2e_federation/federated-daemonset.go +++ b/test/e2e_federation/federated-daemonset.go @@ -104,7 +104,7 @@ var _ = framework.KubeDescribe("Federation daemonsets [Feature:Federation]", fun // deleteAllDaemonSetsOrFail deletes all DaemonSets in the given namespace name. func deleteAllDaemonSetsOrFail(clientset *fedclientset.Clientset, nsName string) { - DaemonSetList, err := clientset.Extensions().DaemonSets(nsName).List(v1.ListOptions{}) + DaemonSetList, err := clientset.Extensions().DaemonSets(nsName).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) orphanDependents := false for _, daemonSet := range DaemonSetList.Items { diff --git a/test/e2e_federation/federated-deployment.go b/test/e2e_federation/federated-deployment.go index 2da2f041de1..2bd7318822b 100644 --- a/test/e2e_federation/federated-deployment.go +++ b/test/e2e_federation/federated-deployment.go @@ -137,7 +137,7 @@ var _ = framework.KubeDescribe("Federation deployments [Feature:Federation]", fu // deleteAllDeploymentsOrFail deletes all deployments in the given namespace name. func deleteAllDeploymentsOrFail(clientset *fedclientset.Clientset, nsName string) { - deploymentList, err := clientset.Extensions().Deployments(nsName).List(v1.ListOptions{}) + deploymentList, err := clientset.Extensions().Deployments(nsName).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) orphanDependents := false for _, deployment := range deploymentList.Items { diff --git a/test/e2e_federation/federated-ingress.go b/test/e2e_federation/federated-ingress.go index 171d8a05fd9..00e39303bbf 100644 --- a/test/e2e_federation/federated-ingress.go +++ b/test/e2e_federation/federated-ingress.go @@ -205,7 +205,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func( // Deletes all Ingresses in the given namespace name. func deleteAllIngressesOrFail(clientset *fedclientset.Clientset, nsName string) { orphanDependents := false - err := clientset.Extensions().Ingresses(nsName).DeleteCollection(&v1.DeleteOptions{OrphanDependents: &orphanDependents}, v1.ListOptions{}) + err := clientset.Extensions().Ingresses(nsName).DeleteCollection(&v1.DeleteOptions{OrphanDependents: &orphanDependents}, metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Error in deleting ingresses in namespace: %s", nsName)) } diff --git a/test/e2e_federation/federated-namespace.go b/test/e2e_federation/federated-namespace.go index b082456042f..7b72f8c8e2b 100644 --- a/test/e2e_federation/federated-namespace.go +++ b/test/e2e_federation/federated-namespace.go @@ -197,8 +197,8 @@ func createNamespace(nsClient clientset.NamespaceInterface) string { return ns.Name } -func deleteAllTestNamespaces(orphanDependents *bool, lister func(api_v1.ListOptions) (*api_v1.NamespaceList, error), deleter func(string, *api_v1.DeleteOptions) error) { - list, err := lister(api_v1.ListOptions{}) +func deleteAllTestNamespaces(orphanDependents *bool, lister func(metav1.ListOptions) (*api_v1.NamespaceList, error), deleter func(string, *api_v1.DeleteOptions) error) { + list, err := lister(metav1.ListOptions{}) if err != nil { framework.Failf("Failed to get all namespaes: %v", err) return @@ -215,9 +215,9 @@ func deleteAllTestNamespaces(orphanDependents *bool, lister func(api_v1.ListOpti waitForNoTestNamespaces(lister) } -func waitForNoTestNamespaces(lister func(api_v1.ListOptions) (*api_v1.NamespaceList, error)) { +func waitForNoTestNamespaces(lister func(metav1.ListOptions) (*api_v1.NamespaceList, error)) { err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) { - list, err := lister(api_v1.ListOptions{}) + list, err := lister(metav1.ListOptions{}) if err != nil { return false, err } diff --git a/test/e2e_federation/federated-replicaset.go b/test/e2e_federation/federated-replicaset.go index 4544ab7221e..31882a14702 100644 --- a/test/e2e_federation/federated-replicaset.go +++ b/test/e2e_federation/federated-replicaset.go @@ -139,7 +139,7 @@ var _ = framework.KubeDescribe("Federation replicasets [Feature:Federation]", fu // deleteAllReplicaSetsOrFail deletes all replicasets in the given namespace name. func deleteAllReplicaSetsOrFail(clientset *fedclientset.Clientset, nsName string) { - replicasetList, err := clientset.Extensions().ReplicaSets(nsName).List(v1.ListOptions{}) + replicasetList, err := clientset.Extensions().ReplicaSets(nsName).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) orphanDependents := false for _, replicaset := range replicasetList.Items { diff --git a/test/e2e_federation/federated-secret.go b/test/e2e_federation/federated-secret.go index 548e62a2618..5bc42c19b6c 100644 --- a/test/e2e_federation/federated-secret.go +++ b/test/e2e_federation/federated-secret.go @@ -100,7 +100,7 @@ var _ = framework.KubeDescribe("Federation secrets [Feature:Federation]", func() // deleteAllSecretsOrFail deletes all secrets in the given namespace name. func deleteAllSecretsOrFail(clientset *fedclientset.Clientset, nsName string) { - SecretList, err := clientset.Core().Secrets(nsName).List(v1.ListOptions{}) + SecretList, err := clientset.Core().Secrets(nsName).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) orphanDependents := false for _, Secret := range SecretList.Items { diff --git a/test/e2e_federation/federation-apiserver.go b/test/e2e_federation/federation-apiserver.go index 9b0702e48a1..00578160521 100644 --- a/test/e2e_federation/federation-apiserver.go +++ b/test/e2e_federation/federation-apiserver.go @@ -41,7 +41,7 @@ var _ = framework.KubeDescribe("Federation apiserver [Feature:Federation]", func // Delete registered clusters. // This is if a test failed, it should not affect other tests. - clusterList, err := f.FederationClientset.Federation().Clusters().List(v1.ListOptions{}) + clusterList, err := f.FederationClientset.Federation().Clusters().List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) for _, cluster := range clusterList.Items { err := f.FederationClientset.Federation().Clusters().Delete(cluster.Name, &v1.DeleteOptions{}) @@ -76,7 +76,7 @@ var _ = framework.KubeDescribe("Federation apiserver [Feature:Federation]", func // There should not be any remaining cluster. framework.Logf("Verifying that zero clusters remain") - clusterList, err := f.FederationClientset.Federation().Clusters().List(v1.ListOptions{}) + clusterList, err := f.FederationClientset.Federation().Clusters().List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) if len(clusterList.Items) != 0 { framework.Failf("there should not have been any remaining clusters. Found: %+v", clusterList) diff --git a/test/e2e_federation/federation-event.go b/test/e2e_federation/federation-event.go index 9a7ada6450e..82bfe15c8c8 100644 --- a/test/e2e_federation/federation-event.go +++ b/test/e2e_federation/federation-event.go @@ -43,7 +43,7 @@ var _ = framework.KubeDescribe("Federation events [Feature:Federation]", func() nsName := f.FederationNamespace.Name // Delete registered events. - eventList, err := f.FederationClientset.Core().Events(nsName).List(v1.ListOptions{}) + eventList, err := f.FederationClientset.Core().Events(nsName).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) for _, event := range eventList.Items { err := f.FederationClientset.Core().Events(nsName).Delete(event.Name, &v1.DeleteOptions{}) diff --git a/test/e2e_federation/federation-util.go b/test/e2e_federation/federation-util.go index 4e405d09e42..bcbfe28c826 100644 --- a/test/e2e_federation/federation-util.go +++ b/test/e2e_federation/federation-util.go @@ -106,7 +106,7 @@ func waitForAllClustersReady(f *fedframework.Framework, clusterCount int) *feder var clusterList *federationapi.ClusterList if err := wait.PollImmediate(framework.Poll, FederatedServiceTimeout, func() (bool, error) { var err error - clusterList, err = f.FederationClientset.Federation().Clusters().List(v1.ListOptions{}) + clusterList, err = f.FederationClientset.Federation().Clusters().List(metav1.ListOptions{}) if err != nil { return false, err } @@ -178,7 +178,7 @@ func unregisterClusters(clusters map[string]*cluster, f *fedframework.Framework) } // Delete the registered clusters in the federation API server. - clusterList, err := f.FederationClientset.Federation().Clusters().List(v1.ListOptions{}) + clusterList, err := f.FederationClientset.Federation().Clusters().List(metav1.ListOptions{}) framework.ExpectNoError(err, "Error listing clusters") for _, cluster := range clusterList.Items { err := f.FederationClientset.Federation().Clusters().Delete(cluster.Name, &v1.DeleteOptions{}) diff --git a/test/e2e_federation/framework/framework.go b/test/e2e_federation/framework/framework.go index bf1633bca88..0fdc9db95d3 100644 --- a/test/e2e_federation/framework/framework.go +++ b/test/e2e_federation/framework/framework.go @@ -126,7 +126,7 @@ func (f *Framework) FederationAfterEach() { framework.Logf("Warning: framework is marked federated, but has no federation 1.5 clientset") return } - if err := f.FederationClientset.Federation().Clusters().DeleteCollection(nil, v1.ListOptions{}); err != nil { + if err := f.FederationClientset.Federation().Clusters().DeleteCollection(nil, metav1.ListOptions{}); err != nil { framework.Logf("Error: failed to delete Clusters: %+v", err) } }() @@ -134,7 +134,7 @@ func (f *Framework) FederationAfterEach() { // Print events if the test failed. if CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure { // Dump federation events in federation namespace. - framework.DumpEventsInNamespace(func(opts v1.ListOptions, ns string) (*v1.EventList, error) { + framework.DumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) { return f.FederationClientset.Core().Events(ns).List(opts) }, f.FederationNamespace.Name) // Print logs of federation control plane pods (federation-apiserver and federation-controller-manager) diff --git a/test/e2e_federation/framework/util.go b/test/e2e_federation/framework/util.go index b292e486dd1..55f583b62aa 100644 --- a/test/e2e_federation/framework/util.go +++ b/test/e2e_federation/framework/util.go @@ -31,7 +31,6 @@ import ( federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1" "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/test/e2e/framework" @@ -58,7 +57,7 @@ func SkipUnlessFederated(c clientset.Interface) { // It tests the readiness by sending a GET request and expecting a non error response. func WaitForFederationApiserverReady(c *federation_clientset.Clientset) error { return wait.PollImmediate(time.Second, 1*time.Minute, func() (bool, error) { - _, err := c.Federation().Clusters().List(v1.ListOptions{}) + _, err := c.Federation().Clusters().List(metav1.ListOptions{}) if err != nil { return false, nil } diff --git a/test/e2e_node/apparmor_test.go b/test/e2e_node/apparmor_test.go index 4d167baf26d..792f971b067 100644 --- a/test/e2e_node/apparmor_test.go +++ b/test/e2e_node/apparmor_test.go @@ -147,7 +147,7 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1. f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout)) } else { // Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor". - w, err := f.PodClient().Watch(v1.SingleObject(metav1.ObjectMeta{Name: pod.Name})) + w, err := f.PodClient().Watch(metav1.SingleObject(metav1.ObjectMeta{Name: pod.Name})) framework.ExpectNoError(err) _, err = watch.Until(framework.PodStartTimeout, w, func(e watch.Event) (bool, error) { switch e.Type { diff --git a/test/e2e_node/density_test.go b/test/e2e_node/density_test.go index 4ce9f07e024..b1f3968586f 100644 --- a/test/e2e_node/density_test.go +++ b/test/e2e_node/density_test.go @@ -492,12 +492,12 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m _, controller := cache.NewInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String() obj, err := f.ClientSet.Core().Pods(ns).List(options) return runtime.Object(obj), err }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String() return f.ClientSet.Core().Pods(ns).Watch(options) }, diff --git a/test/e2e_node/e2e_node_suite_test.go b/test/e2e_node/e2e_node_suite_test.go index c5296a9d84c..596dd3e9bbe 100644 --- a/test/e2e_node/e2e_node_suite_test.go +++ b/test/e2e_node/e2e_node_suite_test.go @@ -31,6 +31,7 @@ import ( "testing" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" commontest "k8s.io/kubernetes/test/e2e/common" @@ -252,7 +253,7 @@ func updateTestContext() error { // getNode gets node object from the apiserver. func getNode(c *clientset.Clientset) (*v1.Node, error) { - nodes, err := c.Nodes().List(v1.ListOptions{}) + nodes, err := c.Nodes().List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred(), "should be able to list nodes.") if nodes == nil { return nil, fmt.Errorf("the node list is nil.") diff --git a/test/e2e_node/inode_eviction_test.go b/test/e2e_node/inode_eviction_test.go index 43421204c3e..e4581159df6 100644 --- a/test/e2e_node/inode_eviction_test.go +++ b/test/e2e_node/inode_eviction_test.go @@ -161,7 +161,7 @@ func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs Eventually(func() error { // Gather current information - updatedPodList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(v1.ListOptions{}) + updatedPodList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{}) updatedPods := updatedPodList.Items for _, p := range updatedPods { framework.Logf("fetching pod %s; phase= %v", p.Name, p.Status.Phase) @@ -288,7 +288,7 @@ func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs // Returns TRUE if the node has disk pressure due to inodes exists on the node, FALSE otherwise func hasInodePressure(f *framework.Framework, testCondition string) (bool, error) { - nodeList, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{}) + nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err, "getting node list") if len(nodeList.Items) != 1 { return false, fmt.Errorf("expected 1 node, but see %d. List: %v", len(nodeList.Items), nodeList.Items) diff --git a/test/e2e_node/memory_eviction_test.go b/test/e2e_node/memory_eviction_test.go index 69f61c45c1b..71d13e427d4 100644 --- a/test/e2e_node/memory_eviction_test.go +++ b/test/e2e_node/memory_eviction_test.go @@ -51,7 +51,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu // Wait for the memory pressure condition to disappear from the node status before continuing. By("waiting for the memory pressure condition on the node to disappear before ending the test.") Eventually(func() error { - nodeList, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{}) + nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{}) if err != nil { return fmt.Errorf("tried to get node list but got error: %v", err) } @@ -175,7 +175,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu // see the eviction manager reporting a pressure condition for a while without the besteffort failing, // and we see that the manager did in fact evict the besteffort (this should be in the Kubelet log), we // will have more reason to believe the phase is out of date. - nodeList, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{}) + nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{}) if err != nil { glog.Errorf("tried to get node list but got error: %v", err) } diff --git a/test/e2e_node/restart_test.go b/test/e2e_node/restart_test.go index 13dea0ac5e9..572634cc7aa 100644 --- a/test/e2e_node/restart_test.go +++ b/test/e2e_node/restart_test.go @@ -21,6 +21,7 @@ package e2e_node import ( "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" "fmt" @@ -35,7 +36,7 @@ import ( // If the timeout is hit, it returns the list of currently running pods. func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (runningPods []*v1.Pod) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { - podList, err := f.PodClient().List(v1.ListOptions{}) + podList, err := f.PodClient().List(metav1.ListOptions{}) if err != nil { framework.Logf("Failed to list pods on node: %v", err) continue diff --git a/test/images/clusterapi-tester/main.go b/test/images/clusterapi-tester/main.go index 0b25680c306..1e24ec6ffbd 100644 --- a/test/images/clusterapi-tester/main.go +++ b/test/images/clusterapi-tester/main.go @@ -24,10 +24,8 @@ import ( "fmt" "net/http" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" restclient "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/api" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" ) @@ -41,7 +39,7 @@ func main() { if err != nil { log.Fatalf("Failed to create client: %v", err) } - listAll := api.ListOptions{LabelSelector: labels.Everything(), FieldSelector: fields.Everything()} + listAll := metav1.ListOptions{} nodes, err := kubeClient.Core().Nodes().List(listAll) if err != nil { log.Fatalf("Failed to list nodes: %v", err) @@ -50,7 +48,7 @@ func main() { for _, node := range nodes.Items { log.Printf("\t%v", node.Name) } - services, err := kubeClient.Core().Services(api.NamespaceDefault).List(listAll) + services, err := kubeClient.Core().Services(metav1.NamespaceDefault).List(listAll) if err != nil { log.Fatalf("Failed to list services: %v", err) } diff --git a/test/integration/auth/auth_test.go b/test/integration/auth/auth_test.go index 9e81f299b8b..fd9d05b11fb 100644 --- a/test/integration/auth/auth_test.go +++ b/test/integration/auth/auth_test.go @@ -36,6 +36,7 @@ import ( "testing" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/group" "k8s.io/apiserver/pkg/authentication/request/bearertoken" @@ -968,10 +969,10 @@ func TestNamespaceAuthorization(t *testing.T) { {"GET", path("pods", "foo", "a"), "bar", "", integration.Code403}, {"DELETE", timeoutPath("pods", "foo", "a"), "bar", "", integration.Code403}, - {"POST", timeoutPath("pods", api.NamespaceDefault, ""), "", aPod, integration.Code403}, + {"POST", timeoutPath("pods", metav1.NamespaceDefault, ""), "", aPod, integration.Code403}, {"GET", path("pods", "", ""), "", "", integration.Code403}, - {"GET", path("pods", api.NamespaceDefault, "a"), "", "", integration.Code403}, - {"DELETE", timeoutPath("pods", api.NamespaceDefault, "a"), "", "", integration.Code403}, + {"GET", path("pods", metav1.NamespaceDefault, "a"), "", "", integration.Code403}, + {"DELETE", timeoutPath("pods", metav1.NamespaceDefault, "a"), "", "", integration.Code403}, } for _, r := range requests { @@ -1139,7 +1140,7 @@ func TestReadOnlyAuthorization(t *testing.T) { }{ {"POST", path("pods", ns.Name, ""), aPod, integration.Code403}, {"GET", path("pods", ns.Name, ""), "", integration.Code200}, - {"GET", path("pods", api.NamespaceDefault, "a"), "", integration.Code404}, + {"GET", path("pods", metav1.NamespaceDefault, "a"), "", integration.Code404}, } for _, r := range requests { diff --git a/test/integration/auth/rbac_test.go b/test/integration/auth/rbac_test.go index f4bc322dbae..0664afdb750 100644 --- a/test/integration/auth/rbac_test.go +++ b/test/integration/auth/rbac_test.go @@ -508,7 +508,7 @@ func TestBootstrapping(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{BearerToken: superUser, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(api.GroupName).GroupVersion}}) - watcher, err := clientset.Rbac().ClusterRoles().Watch(api.ListOptions{ResourceVersion: "0"}) + watcher, err := clientset.Rbac().ClusterRoles().Watch(metav1.ListOptions{ResourceVersion: "0"}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -522,7 +522,7 @@ func TestBootstrapping(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - clusterRoles, err := clientset.Rbac().ClusterRoles().List(api.ListOptions{}) + clusterRoles, err := clientset.Rbac().ClusterRoles().List(metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/test/integration/client/client_test.go b/test/integration/client/client_test.go index f26e59b736d..89d1013d7fd 100644 --- a/test/integration/client/client_test.go +++ b/test/integration/client/client_test.go @@ -61,7 +61,7 @@ func TestClient(t *testing.T) { t.Errorf("expected %#v, got %#v", e, a) } - pods, err := client.Core().Pods(ns.Name).List(v1.ListOptions{}) + pods, err := client.Core().Pods(ns.Name).List(metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -100,7 +100,7 @@ func TestClient(t *testing.T) { } // pod is shown, but not scheduled - pods, err = client.Core().Pods(ns.Name).List(v1.ListOptions{}) + pods, err = client.Core().Pods(ns.Name).List(metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -588,7 +588,7 @@ func TestMultiWatch(t *testing.T) { t.Fatalf("Couldn't make %v: %v", name, err) } go func(name, rv string) { - options := v1.ListOptions{ + options := metav1.ListOptions{ LabelSelector: labels.Set{"watchlabel": name}.AsSelector().String(), ResourceVersion: rv, } @@ -764,7 +764,7 @@ func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace s t.Errorf("Failed listing pod with supplied self link '%v': %v", pod.SelfLink, err) } - podList, err := c.Core().Pods(namespace).List(v1.ListOptions{}) + podList, err := c.Core().Pods(namespace).List(metav1.ListOptions{}) if err != nil { t.Errorf("Failed listing pods: %v", err) } diff --git a/test/integration/client/dynamic_client_test.go b/test/integration/client/dynamic_client_test.go index 88f58a7b07c..c48b1e7ea13 100644 --- a/test/integration/client/dynamic_client_test.go +++ b/test/integration/client/dynamic_client_test.go @@ -93,7 +93,7 @@ func TestDynamicClient(t *testing.T) { } // check dynamic list - obj, err := dynamicClient.Resource(&resource, ns.Name).List(&v1.ListOptions{}) + obj, err := dynamicClient.Resource(&resource, ns.Name).List(&metav1.ListOptions{}) unstructuredList, ok := obj.(*unstructured.UnstructuredList) if !ok { t.Fatalf("expected *unstructured.UnstructuredList, got %#v", obj) @@ -136,7 +136,7 @@ func TestDynamicClient(t *testing.T) { t.Fatalf("unexpected error when deleting pod: %v", err) } - list, err := client.Core().Pods(ns.Name).List(v1.ListOptions{}) + list, err := client.Core().Pods(ns.Name).List(metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected error when listing pods: %v", err) } diff --git a/test/integration/framework/master_utils.go b/test/integration/framework/master_utils.go index 1f2a09b1cd9..f803c80e44c 100644 --- a/test/integration/framework/master_utils.go +++ b/test/integration/framework/master_utils.go @@ -267,7 +267,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv if masterConfig.EnableCoreControllers { // TODO Once /healthz is updated for posthooks, we'll wait for good health coreClient := coreclient.NewForConfigOrDie(&cfg) - svcWatch, err := coreClient.Services(v1.NamespaceDefault).Watch(v1.ListOptions{}) + svcWatch, err := coreClient.Services(metav1.NamespaceDefault).Watch(metav1.ListOptions{}) if err != nil { glog.Fatal(err) } diff --git a/test/integration/garbagecollector/garbage_collector_test.go b/test/integration/garbagecollector/garbage_collector_test.go index 6002ac86ceb..f1df8768cf5 100644 --- a/test/integration/garbagecollector/garbage_collector_test.go +++ b/test/integration/garbagecollector/garbage_collector_test.go @@ -172,7 +172,7 @@ func TestCascadingDeletion(t *testing.T) { t.Fatalf("Failed to create replication controller: %v", err) } - rcs, err := rcClient.List(v1.ListOptions{}) + rcs, err := rcClient.List(metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list replication controllers: %v", err) } @@ -205,7 +205,7 @@ func TestCascadingDeletion(t *testing.T) { } // set up watch - pods, err := podClient.List(v1.ListOptions{}) + pods, err := podClient.List(metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list pods: %v", err) } @@ -257,7 +257,7 @@ func TestCreateWithNonExistentOwner(t *testing.T) { } // set up watch - pods, err := podClient.List(v1.ListOptions{}) + pods, err := podClient.List(metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list pods: %v", err) } @@ -313,7 +313,7 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rcNum, podNum int) (bool, error) { rcClient := clientSet.Core().ReplicationControllers(namespace) podClient := clientSet.Core().Pods(namespace) - pods, err := podClient.List(v1.ListOptions{}) + pods, err := podClient.List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list pods: %v", err) } @@ -322,7 +322,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa ret = false t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items)) } - rcs, err := rcClient.List(v1.ListOptions{}) + rcs, err := rcClient.List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list replication controllers: %v", err) } @@ -377,7 +377,7 @@ func TestStressingCascadingDeletion(t *testing.T) { // verify the remaining pods all have "orphan" in their names. podClient := clientSet.Core().Pods(ns.Name) - pods, err := podClient.List(v1.ListOptions{}) + pods, err := podClient.List(metav1.ListOptions{}) if err != nil { t.Fatal(err) } @@ -454,7 +454,7 @@ func TestOrphaning(t *testing.T) { } // verify the toBeDeleteRC is deleted if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) { - rcs, err := rcClient.List(v1.ListOptions{}) + rcs, err := rcClient.List(metav1.ListOptions{}) if err != nil { return false, err } @@ -468,7 +468,7 @@ func TestOrphaning(t *testing.T) { } // verify pods don't have the ownerPod as an owner anymore - pods, err := podClient.List(v1.ListOptions{}) + pods, err := podClient.List(metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list pods: %v", err) } diff --git a/test/integration/master/master_benchmark_test.go b/test/integration/master/master_benchmark_test.go index 243e671bce7..56b290dcc37 100644 --- a/test/integration/master/master_benchmark_test.go +++ b/test/integration/master/master_benchmark_test.go @@ -26,6 +26,7 @@ import ( "time" "github.com/golang/glog" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/pkg/api" @@ -137,7 +138,7 @@ func BenchmarkPodList(b *testing.B) { defer func() { glog.V(3).Infof("Worker %d: Node %v listing pods took %v", id, host, time.Since(now)) }() - if pods, err := m.ClientSet.Core().Pods(ns.Name).List(api.ListOptions{ + if pods, err := m.ClientSet.Core().Pods(ns.Name).List(metav1.ListOptions{ LabelSelector: labels.Everything(), FieldSelector: fields.OneTermEqualSelector(api.PodHostField, host), }); err != nil { @@ -180,7 +181,7 @@ func BenchmarkPodListEtcd(b *testing.B) { defer func() { glog.V(3).Infof("Worker %d: listing pods took %v", id, time.Since(now)) }() - pods, err := m.ClientSet.Core().Pods(ns.Name).List(api.ListOptions{ + pods, err := m.ClientSet.Core().Pods(ns.Name).List(metav1.ListOptions{ LabelSelector: labels.Everything(), FieldSelector: fields.Everything(), }) diff --git a/test/integration/master/master_test.go b/test/integration/master/master_test.go index 33291ff54da..63aded0e4cb 100644 --- a/test/integration/master/master_test.go +++ b/test/integration/master/master_test.go @@ -162,9 +162,9 @@ func TestAutoscalingGroupBackwardCompatibility(t *testing.T) { expectedStatusCodes map[int]bool expectedVersion string }{ - {"POST", autoscalingPath("horizontalpodautoscalers", api.NamespaceDefault, ""), hpaV1, integration.Code201, ""}, - {"GET", autoscalingPath("horizontalpodautoscalers", api.NamespaceDefault, ""), "", integration.Code200, testapi.Autoscaling.GroupVersion().String()}, - {"GET", extensionsPath("horizontalpodautoscalers", api.NamespaceDefault, ""), "", integration.Code200, testapi.Extensions.GroupVersion().String()}, + {"POST", autoscalingPath("horizontalpodautoscalers", metav1.NamespaceDefault, ""), hpaV1, integration.Code201, ""}, + {"GET", autoscalingPath("horizontalpodautoscalers", metav1.NamespaceDefault, ""), "", integration.Code200, testapi.Autoscaling.GroupVersion().String()}, + {"GET", extensionsPath("horizontalpodautoscalers", metav1.NamespaceDefault, ""), "", integration.Code200, testapi.Extensions.GroupVersion().String()}, } for _, r := range requests { @@ -281,7 +281,7 @@ func TestMasterService(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(api.GroupName).GroupVersion}}) err := wait.Poll(time.Second, time.Minute, func() (bool, error) { - svcList, err := client.Core().Services(api.NamespaceDefault).List(api.ListOptions{}) + svcList, err := client.Core().Services(metav1.NamespaceDefault).List(metav1.ListOptions{}) if err != nil { t.Errorf("unexpected error: %v", err) return false, nil @@ -294,7 +294,7 @@ func TestMasterService(t *testing.T) { } } if found { - ep, err := client.Core().Endpoints(api.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) + ep, err := client.Core().Endpoints(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) if err != nil { return false, nil } @@ -338,7 +338,7 @@ func TestServiceAlloc(t *testing.T) { // Wait until the default "kubernetes" service is created. if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.Core().Services(api.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) + _, err := client.Core().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { return false, err } @@ -349,18 +349,18 @@ func TestServiceAlloc(t *testing.T) { // make 5 more services to take up all IPs for i := 0; i < 5; i++ { - if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(i)); err != nil { + if _, err := client.Core().Services(metav1.NamespaceDefault).Create(svc(i)); err != nil { t.Error(err) } } // Make another service. It will fail because we're out of cluster IPs - if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(8)); err != nil { + if _, err := client.Core().Services(metav1.NamespaceDefault).Create(svc(8)); err != nil { if !strings.Contains(err.Error(), "range is full") { t.Errorf("unexpected error text: %v", err) } } else { - svcs, err := client.Core().Services(api.NamespaceAll).List(api.ListOptions{}) + svcs, err := client.Core().Services(metav1.NamespaceAll).List(metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected success, and error getting the services: %v", err) } @@ -372,12 +372,12 @@ func TestServiceAlloc(t *testing.T) { } // Delete the first service. - if err := client.Core().Services(api.NamespaceDefault).Delete(svc(1).ObjectMeta.Name, nil); err != nil { + if err := client.Core().Services(metav1.NamespaceDefault).Delete(svc(1).ObjectMeta.Name, nil); err != nil { t.Fatalf("got unexpected error: %v", err) } // This time creating the second service should work. - if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(8)); err != nil { + if _, err := client.Core().Services(metav1.NamespaceDefault).Create(svc(8)); err != nil { t.Fatalf("got unexpected error: %v", err) } } @@ -420,7 +420,7 @@ func TestUpdateNodeObjects(t *testing.T) { for k := 0; k < listers; k++ { go func(lister int) { for i := 0; i < iterations; i++ { - _, err := c.Nodes().List(v1.ListOptions{}) + _, err := c.Nodes().List(metav1.ListOptions{}) if err != nil { fmt.Printf("[list:%d] error after %d: %v\n", lister, i, err) break @@ -432,7 +432,7 @@ func TestUpdateNodeObjects(t *testing.T) { for k := 0; k < watchers; k++ { go func(lister int) { - w, err := c.Nodes().Watch(v1.ListOptions{}) + w, err := c.Nodes().Watch(metav1.ListOptions{}) if err != nil { fmt.Printf("[watch:%d] error: %v", k, err) return @@ -462,14 +462,14 @@ func TestUpdateNodeObjects(t *testing.T) { fmt.Printf("[%d] iteration %d ...\n", node, i) } if i%20 == 0 { - _, err := c.Nodes().List(v1.ListOptions{}) + _, err := c.Nodes().List(metav1.ListOptions{}) if err != nil { fmt.Printf("[%d] error after %d: %v\n", node, i, err) break } } - r, err := c.Nodes().List(v1.ListOptions{ + r, err := c.Nodes().List(metav1.ListOptions{ FieldSelector: fmt.Sprintf("metadata.name=node-%d", node), ResourceVersion: "0", }) diff --git a/test/integration/metrics/metrics_test.go b/test/integration/metrics/metrics_test.go index b5703493fdb..bb7b76f5095 100644 --- a/test/integration/metrics/metrics_test.go +++ b/test/integration/metrics/metrics_test.go @@ -25,6 +25,7 @@ import ( "net/http/httptest" "testing" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" @@ -109,7 +110,7 @@ func TestApiserverMetrics(t *testing.T) { // Make a request to the apiserver to ensure there's at least one data point // for the metrics we're expecting -- otherwise, they won't be exported. client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) - if _, err := client.Core().Pods(v1.NamespaceDefault).List(v1.ListOptions{}); err != nil { + if _, err := client.Core().Pods(metav1.NamespaceDefault).List(metav1.ListOptions{}); err != nil { t.Fatalf("unexpected error getting pods: %v", err) } diff --git a/test/integration/quota/quota_test.go b/test/integration/quota/quota_test.go index d4be5a9c59b..c5b58ce1db2 100644 --- a/test/integration/quota/quota_test.go +++ b/test/integration/quota/quota_test.go @@ -131,7 +131,7 @@ func TestQuota(t *testing.T) { } func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Clientset) { - w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(v1.SingleObject(metav1.ObjectMeta{Name: quota.Name})) + w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: quota.Name})) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -189,7 +189,7 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) { }, } - w, err := clientset.Core().ReplicationControllers(namespace).Watch(v1.SingleObject(metav1.ObjectMeta{Name: rc.Name})) + w, err := clientset.Core().ReplicationControllers(namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: rc.Name})) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -216,7 +216,7 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) { return false, nil }) if err != nil { - pods, _ := clientset.Core().Pods(namespace).List(v1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()}) + pods, _ := clientset.Core().Pods(namespace).List(metav1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()}) t.Fatalf("unexpected error: %v, ended with %v pods", err, len(pods.Items)) } } diff --git a/test/integration/replicaset/replicaset_test.go b/test/integration/replicaset/replicaset_test.go index 812f9cd3979..987d49becdf 100644 --- a/test/integration/replicaset/replicaset_test.go +++ b/test/integration/replicaset/replicaset_test.go @@ -106,7 +106,7 @@ func newMatchingPod(podName, namespace string) *v1.Pod { func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rsNum, podNum int) (bool, error) { rsClient := clientSet.Extensions().ReplicaSets(namespace) podClient := clientSet.Core().Pods(namespace) - pods, err := podClient.List(v1.ListOptions{}) + pods, err := podClient.List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list pods: %v", err) } @@ -115,7 +115,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa ret = false t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items)) } - rss, err := rsClient.List(v1.ListOptions{}) + rss, err := rsClient.List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list replica sets: %v", err) } diff --git a/test/integration/replicationcontroller/replicationcontroller_test.go b/test/integration/replicationcontroller/replicationcontroller_test.go index 170c69a156a..c5aca7927aa 100644 --- a/test/integration/replicationcontroller/replicationcontroller_test.go +++ b/test/integration/replicationcontroller/replicationcontroller_test.go @@ -103,7 +103,7 @@ func newMatchingPod(podName, namespace string) *v1.Pod { func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rcNum, podNum int) (bool, error) { rcClient := clientSet.Core().ReplicationControllers(namespace) podClient := clientSet.Core().Pods(namespace) - pods, err := podClient.List(v1.ListOptions{}) + pods, err := podClient.List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list pods: %v", err) } @@ -112,7 +112,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa ret = false t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items)) } - rcs, err := rcClient.List(v1.ListOptions{}) + rcs, err := rcClient.List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list replication controllers: %v", err) } diff --git a/test/integration/scheduler/extender_test.go b/test/integration/scheduler/extender_test.go index 72dd0bdd301..0fdeedc59b0 100644 --- a/test/integration/scheduler/extender_test.go +++ b/test/integration/scheduler/extender_test.go @@ -256,7 +256,7 @@ func TestSchedulerExtender(t *testing.T) { func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (Nodes). - defer cs.Core().Nodes().DeleteCollection(nil, v1.ListOptions{}) + defer cs.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{}) goodCondition := v1.NodeCondition{ Type: v1.NodeReady, diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index be4399a7709..69440a86a0b 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -123,7 +123,7 @@ func waitForReflection(t *testing.T, s cache.Store, key string, passFunc func(n func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Namespace, nodeStore cache.Store) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (Nodes). - defer cs.Core().Nodes().DeleteCollection(nil, v1.ListOptions{}) + defer cs.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{}) goodCondition := v1.NodeCondition{ Type: v1.NodeReady, @@ -326,7 +326,7 @@ func TestMultiScheduler(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (Nodes). - defer clientSet.Core().Nodes().DeleteCollection(nil, v1.ListOptions{}) + defer clientSet.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{}) schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains) schedulerConfig, err := schedulerConfigFactory.Create() @@ -492,7 +492,7 @@ func TestAllocatable(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (Nodes). - defer clientSet.Core().Nodes().DeleteCollection(nil, v1.ListOptions{}) + defer clientSet.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{}) schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains) schedulerConfig, err := schedulerConfigFactory.Create() diff --git a/test/integration/serviceaccount/service_account_test.go b/test/integration/serviceaccount/service_account_test.go index a6b138e74e0..07dff8ab649 100644 --- a/test/integration/serviceaccount/service_account_test.go +++ b/test/integration/serviceaccount/service_account_test.go @@ -172,7 +172,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { tokensToCleanup := sets.NewString(token1Name, token2Name, token3Name) err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) { // Get all secrets in the namespace - secrets, err := c.Core().Secrets(ns).List(v1.ListOptions{}) + secrets, err := c.Core().Secrets(ns).List(metav1.ListOptions{}) // Retrieval errors should fail if err != nil { return false, err @@ -519,11 +519,11 @@ func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string readOps := []testOperation{ func() error { - _, err := c.Core().Secrets(ns).List(v1.ListOptions{}) + _, err := c.Core().Secrets(ns).List(metav1.ListOptions{}) return err }, func() error { - _, err := c.Core().Pods(ns).List(v1.ListOptions{}) + _, err := c.Core().Pods(ns).List(metav1.ListOptions{}) return err }, } diff --git a/test/integration/volume/persistent_volumes_test.go b/test/integration/volume/persistent_volumes_test.go index c8b52b43ea1..5451fcc8521 100644 --- a/test/integration/volume/persistent_volumes_test.go +++ b/test/integration/volume/persistent_volumes_test.go @@ -116,7 +116,7 @@ func TestPersistentVolumeRecycler(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{}) + defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) go ctrl.Run(stopCh) @@ -170,7 +170,7 @@ func TestPersistentVolumeDeleter(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{}) + defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) go ctrl.Run(stopCh) @@ -229,7 +229,7 @@ func TestPersistentVolumeBindRace(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{}) + defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) go ctrl.Run(stopCh) @@ -300,7 +300,7 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{}) + defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) go controller.Run(stopCh) @@ -380,7 +380,7 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{}) + defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) go controller.Run(stopCh) @@ -479,7 +479,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{}) + defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) go controller.Run(stopCh) @@ -568,7 +568,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{}) + defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) controllerStopCh := make(chan struct{}) go binder.Run(controllerStopCh) @@ -637,7 +637,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { // Modify PVC i := rand.Intn(objCount) name := "pvc-" + strconv.Itoa(i) - pvc, err := testClient.PersistentVolumeClaims(v1.NamespaceDefault).Get(name, metav1.GetOptions{}) + pvc, err := testClient.PersistentVolumeClaims(metav1.NamespaceDefault).Get(name, metav1.GetOptions{}) if err != nil { // Silently ignore error, the PVC may have be already // deleted or not exists yet. @@ -649,7 +649,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { } else { pvc.Annotations["TestAnnotation"] = fmt.Sprint(rand.Int()) } - _, err = testClient.PersistentVolumeClaims(v1.NamespaceDefault).Update(pvc) + _, err = testClient.PersistentVolumeClaims(metav1.NamespaceDefault).Update(pvc) if err != nil { // Silently ignore error, the PVC may have been updated by // the controller. @@ -856,8 +856,8 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes and StorageClasses). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{}) - defer testClient.Storage().StorageClasses().DeleteCollection(nil, v1.ListOptions{}) + defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.Storage().StorageClasses().DeleteCollection(nil, metav1.ListOptions{}) storageClass := storage.StorageClass{ TypeMeta: metav1.TypeMeta{ @@ -901,7 +901,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { glog.V(2).Infof("TestPersistentVolumeProvisionMultiPVCs: claims are bound") // check that we have enough bound PVs - pvList, err := testClient.PersistentVolumes().List(v1.ListOptions{}) + pvList, err := testClient.PersistentVolumes().List(metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list volumes: %s", err) } @@ -924,7 +924,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { // Wait for the PVs to get deleted by listing remaining volumes // (delete events were unreliable) for { - volumes, err := testClient.PersistentVolumes().List(v1.ListOptions{}) + volumes, err := testClient.PersistentVolumes().List(metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list volumes: %v", err) } @@ -953,7 +953,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{}) + defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) go controller.Run(stopCh) @@ -1128,11 +1128,11 @@ func createClients(ns *v1.Namespace, t *testing.T, s *httptest.Server, syncPerio EnableDynamicProvisioning: true, }) - watchPV, err := testClient.PersistentVolumes().Watch(v1.ListOptions{}) + watchPV, err := testClient.PersistentVolumes().Watch(metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to watch PersistentVolumes: %v", err) } - watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(v1.ListOptions{}) + watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to watch PersistentVolumeClaims: %v", err) } diff --git a/test/soak/cauldron/cauldron.go b/test/soak/cauldron/cauldron.go index 2b4f711a4ed..cfe73f329d3 100644 --- a/test/soak/cauldron/cauldron.go +++ b/test/soak/cauldron/cauldron.go @@ -73,7 +73,7 @@ func main() { var nodes *api.NodeList for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) { - nodes, err = client.Core().Nodes().List(api.ListOptions{}) + nodes, err = client.Core().Nodes().List(metav1.ListOptions{}) if err == nil { break } diff --git a/test/soak/serve_hostnames/serve_hostnames.go b/test/soak/serve_hostnames/serve_hostnames.go index 0b0f630876f..90ca2fecd89 100644 --- a/test/soak/serve_hostnames/serve_hostnames.go +++ b/test/soak/serve_hostnames/serve_hostnames.go @@ -92,7 +92,7 @@ func main() { var nodes *v1.NodeList for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) { - nodes, err = client.Nodes().List(v1.ListOptions{}) + nodes, err = client.Nodes().List(metav1.ListOptions{}) if err == nil { break } diff --git a/test/utils/pod_store.go b/test/utils/pod_store.go index d0bd2fa2f2d..76763181c5d 100644 --- a/test/utils/pod_store.go +++ b/test/utils/pod_store.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -35,13 +36,13 @@ type PodStore struct { func NewPodStore(c clientset.Interface, namespace string, label labels.Selector, field fields.Selector) *PodStore { lw := &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = label.String() options.FieldSelector = field.String() obj, err := c.Core().Pods(namespace).List(options) return runtime.Object(obj), err }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = label.String() options.FieldSelector = field.String() return c.Core().Pods(namespace).Watch(options) diff --git a/test/utils/runners.go b/test/utils/runners.go index 7c6ef55cad6..45ed3c629c1 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -689,8 +689,8 @@ func (config *RCConfig) start() error { if oldRunning != config.Replicas { // List only pods from a given replication controller. - options := v1.ListOptions{LabelSelector: label.String()} - if pods, err := config.Client.Core().Pods(v1.NamespaceAll).List(options); err == nil { + options := metav1.ListOptions{LabelSelector: label.String()} + if pods, err := config.Client.Core().Pods(metav1.NamespaceAll).List(options); err == nil { for _, pod := range pods.Items { config.RCConfigLog("Pod %s\t%s\t%s\t%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, pod.DeletionTimestamp) @@ -1116,7 +1116,7 @@ func (config *DaemonConfig) Run() error { var nodes *v1.NodeList for i := 0; i < retries; i++ { // Wait for all daemons to be running - nodes, err = config.Client.Core().Nodes().List(v1.ListOptions{ResourceVersion: "0"}) + nodes, err = config.Client.Core().Nodes().List(metav1.ListOptions{ResourceVersion: "0"}) if err == nil { break } else if i+1 == retries {