diff --git a/cmd/kubelet/app/auth.go b/cmd/kubelet/app/auth.go index 53100c9aa15..aa2a496e8da 100644 --- a/cmd/kubelet/app/auth.go +++ b/cmd/kubelet/app/auth.go @@ -26,9 +26,9 @@ import ( "k8s.io/kubernetes/pkg/auth/authenticator/bearertoken" "k8s.io/kubernetes/pkg/auth/authorizer" "k8s.io/kubernetes/pkg/auth/group" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - authenticationclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion" - authorizationclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" + authenticationclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/authentication/v1beta1" + authorizationclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/authorization/v1beta1" alwaysallowauthorizer "k8s.io/kubernetes/pkg/genericapiserver/authorizer" "k8s.io/kubernetes/pkg/kubelet/server" "k8s.io/kubernetes/pkg/types" @@ -40,7 +40,7 @@ import ( webhooksar "k8s.io/kubernetes/plugin/pkg/auth/authorizer/webhook" ) -func buildAuth(nodeName types.NodeName, client internalclientset.Interface, config componentconfig.KubeletConfiguration) (server.AuthInterface, error) { +func buildAuth(nodeName types.NodeName, client clientset.Interface, config componentconfig.KubeletConfiguration) (server.AuthInterface, error) { // Get clients, if provided var ( tokenClient authenticationclient.TokenReviewInterface diff --git a/cmd/kubelet/app/bootstrap.go b/cmd/kubelet/app/bootstrap.go index e1d1593e207..b0569a03178 100644 --- a/cmd/kubelet/app/bootstrap.go +++ b/cmd/kubelet/app/bootstrap.go @@ -25,7 +25,7 @@ import ( "github.com/golang/glog" - unversionedcertificates "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion" + unversionedcertificates "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/certificates/v1alpha1" "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 86f2a32ca2a..df98b4bb871 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -38,12 +38,13 @@ import ( "k8s.io/kubernetes/cmd/kubelet/app/options" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/componentconfig" componentconfigv1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" "k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/client/chaosclient" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" + v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/restclient" clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth" @@ -170,7 +171,7 @@ func getRemoteKubeletConfig(s *options.KubeletServer, kubeDeps *kubelet.KubeletD return "", err } - configmap, err := func() (*api.ConfigMap, error) { + configmap, err := func() (*v1.ConfigMap, error) { var nodename types.NodeName hostname := nodeutil.GetHostname(s.HostnameOverride) @@ -186,14 +187,14 @@ func getRemoteKubeletConfig(s *options.KubeletServer, kubeDeps *kubelet.KubeletD return nil, err } // look for kubelet- configmap from "kube-system" - configmap, err := kubeClient.CoreClient.ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", nodename)) + configmap, err := kubeClient.CoreV1Client.ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", nodename)) if err != nil { return nil, err } return configmap, nil } // No cloud provider yet, so can't get the nodename via Cloud.Instances().CurrentNodeName(hostname), try just using the hostname - configmap, err := kubeClient.CoreClient.ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", hostname)) + configmap, err := kubeClient.CoreV1Client.ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", hostname)) if err != nil { return nil, fmt.Errorf("cloud provider was nil, and attempt to use hostname to find config resulted in: %v", err) } @@ -660,11 +661,11 @@ func RunKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet } eventBroadcaster := record.NewBroadcaster() - kubeDeps.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: string(nodeName)}) + kubeDeps.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: "kubelet", Host: string(nodeName)}) eventBroadcaster.StartLogging(glog.V(3).Infof) if kubeDeps.EventClient != nil { glog.V(4).Infof("Sending events to api server.") - eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeDeps.EventClient.Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeDeps.EventClient.Events("")}) } else { glog.Warning("No api server defined - no events will be sent to API server.") } diff --git a/pkg/kubelet/active_deadline.go b/pkg/kubelet/active_deadline.go index 154bbd3bad1..9356f0e31ca 100644 --- a/pkg/kubelet/active_deadline.go +++ b/pkg/kubelet/active_deadline.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/status" @@ -61,22 +61,22 @@ func newActiveDeadlineHandler( } // ShouldSync returns true if the pod is past its active deadline. -func (m *activeDeadlineHandler) ShouldSync(pod *api.Pod) bool { +func (m *activeDeadlineHandler) ShouldSync(pod *v1.Pod) bool { return m.pastActiveDeadline(pod) } // ShouldEvict returns true if the pod is past its active deadline. // It dispatches an event that the pod should be evicted if it is past its deadline. -func (m *activeDeadlineHandler) ShouldEvict(pod *api.Pod) lifecycle.ShouldEvictResponse { +func (m *activeDeadlineHandler) ShouldEvict(pod *v1.Pod) lifecycle.ShouldEvictResponse { if !m.pastActiveDeadline(pod) { return lifecycle.ShouldEvictResponse{Evict: false} } - m.recorder.Eventf(pod, api.EventTypeNormal, reason, message) + m.recorder.Eventf(pod, v1.EventTypeNormal, reason, message) return lifecycle.ShouldEvictResponse{Evict: true, Reason: reason, Message: message} } // pastActiveDeadline returns true if the pod has been active for more than its ActiveDeadlineSeconds -func (m *activeDeadlineHandler) pastActiveDeadline(pod *api.Pod) bool { +func (m *activeDeadlineHandler) pastActiveDeadline(pod *v1.Pod) bool { // no active deadline was specified if pod.Spec.ActiveDeadlineSeconds == nil { return false diff --git a/pkg/kubelet/active_deadline_test.go b/pkg/kubelet/active_deadline_test.go index 52ebeafdcb5..c3842f75072 100644 --- a/pkg/kubelet/active_deadline_test.go +++ b/pkg/kubelet/active_deadline_test.go @@ -20,8 +20,8 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/clock" @@ -29,17 +29,17 @@ import ( // mockPodStatusProvider returns the status on the specified pod type mockPodStatusProvider struct { - pods []*api.Pod + pods []*v1.Pod } // GetPodStatus returns the status on the associated pod with matching uid (if found) -func (m *mockPodStatusProvider) GetPodStatus(uid types.UID) (api.PodStatus, bool) { +func (m *mockPodStatusProvider) GetPodStatus(uid types.UID) (v1.PodStatus, bool) { for _, pod := range m.pods { if pod.UID == uid { return pod.Status, true } } - return api.PodStatus{}, false + return v1.PodStatus{}, false } // TestActiveDeadlineHandler verifies the active deadline handler functions as expected. @@ -71,7 +71,7 @@ func TestActiveDeadlineHandler(t *testing.T) { pods[2].Spec.ActiveDeadlineSeconds = nil testCases := []struct { - pod *api.Pod + pod *v1.Pod expected bool }{{pods[0], true}, {pods[1], false}, {pods[2], false}, {pods[3], false}} diff --git a/pkg/kubelet/api/v1alpha1/runtime/api.pb.go b/pkg/kubelet/api/v1alpha1/runtime/api.pb.go index ec3d90b5b9b..0c43321358d 100644 --- a/pkg/kubelet/api/v1alpha1/runtime/api.pb.go +++ b/pkg/kubelet/api/v1alpha1/runtime/api.pb.go @@ -15,14 +15,14 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. -// source: api.proto +// source: v1.proto // DO NOT EDIT! /* Package runtime is a generated protocol buffer package. It is generated from these files: - api.proto + v1.proto It has these top-level messages: VersionRequest @@ -1028,7 +1028,7 @@ type PodSandboxFilter struct { // State of the sandbox. State *PodSandboxState `protobuf:"varint,2,opt,name=state,enum=runtime.PodSandboxState" json:"state,omitempty"` // LabelSelector to select matches. - // Only api.MatchLabels is supported for now and the requirements + // Only v1.MatchLabels is supported for now and the requirements // are ANDed. MatchExpressions is not supported yet. LabelSelector map[string]string `protobuf:"bytes,3,rep,name=label_selector,json=labelSelector" json:"label_selector,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_unrecognized []byte `json:"-"` @@ -1847,7 +1847,7 @@ type ContainerFilter struct { // ID of the PodSandbox. PodSandboxId *string `protobuf:"bytes,3,opt,name=pod_sandbox_id,json=podSandboxId" json:"pod_sandbox_id,omitempty"` // LabelSelector to select matches. - // Only api.MatchLabels is supported for now and the requirements + // Only v1.MatchLabels is supported for now and the requirements // are ANDed. MatchExpressions is not supported yet. LabelSelector map[string]string `protobuf:"bytes,4,rep,name=label_selector,json=labelSelector" json:"label_selector,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_unrecognized []byte `json:"-"` diff --git a/pkg/kubelet/cadvisor/util.go b/pkg/kubelet/cadvisor/util.go index 5fa528a5853..2ab3be4aa9d 100644 --- a/pkg/kubelet/cadvisor/util.go +++ b/pkg/kubelet/cadvisor/util.go @@ -18,16 +18,16 @@ package cadvisor import ( cadvisorApi "github.com/google/cadvisor/info/v1" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" ) -func CapacityFromMachineInfo(info *cadvisorApi.MachineInfo) api.ResourceList { - c := api.ResourceList{ - api.ResourceCPU: *resource.NewMilliQuantity( +func CapacityFromMachineInfo(info *cadvisorApi.MachineInfo) v1.ResourceList { + c := v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity( int64(info.NumCores*1000), resource.DecimalSI), - api.ResourceMemory: *resource.NewQuantity( + v1.ResourceMemory: *resource.NewQuantity( int64(info.MemoryCapacity), resource.BinarySI), } diff --git a/pkg/kubelet/client/kubelet_client.go b/pkg/kubelet/client/kubelet_client.go index aa9644b6c96..0e593ccc9aa 100644 --- a/pkg/kubelet/client/kubelet_client.go +++ b/pkg/kubelet/client/kubelet_client.go @@ -23,6 +23,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/transport" "k8s.io/kubernetes/pkg/types" @@ -102,13 +103,13 @@ func (c *KubeletClientConfig) transportConfig() *transport.Config { // NodeGetter defines an interface for looking up a node by name type NodeGetter interface { - Get(name string) (*api.Node, error) + Get(name string) (*v1.Node, error) } // NodeGetterFunc allows implementing NodeGetter with a function -type NodeGetterFunc func(name string) (*api.Node, error) +type NodeGetterFunc func(name string) (*v1.Node, error) -func (f NodeGetterFunc) Get(name string) (*api.Node, error) { +func (f NodeGetterFunc) Get(name string) (*v1.Node, error) { return f(name) } @@ -123,7 +124,7 @@ type NodeConnectionInfoGetter struct { // transport is the transport to use to send a request to all kubelets transport http.RoundTripper // preferredAddressTypes specifies the preferred order to use to find a node address - preferredAddressTypes []api.NodeAddressType + preferredAddressTypes []v1.NodeAddressType } func NewNodeConnectionInfoGetter(nodes NodeGetter, config KubeletClientConfig) (ConnectionInfoGetter, error) { @@ -137,9 +138,9 @@ func NewNodeConnectionInfoGetter(nodes NodeGetter, config KubeletClientConfig) ( return nil, err } - types := []api.NodeAddressType{} + types := []v1.NodeAddressType{} for _, t := range config.PreferredAddressTypes { - types = append(types, api.NodeAddressType(t)) + types = append(types, v1.NodeAddressType(t)) } return &NodeConnectionInfoGetter{ diff --git a/pkg/kubelet/client/kubelet_client_test.go b/pkg/kubelet/client/kubelet_client_test.go index c2be5581299..3123c527706 100644 --- a/pkg/kubelet/client/kubelet_client_test.go +++ b/pkg/kubelet/client/kubelet_client_test.go @@ -19,13 +19,13 @@ package client import ( "testing" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" + v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1" "k8s.io/kubernetes/pkg/client/restclient" ) // Ensure a node client can be used as a NodeGetter. // This allows anyone with a node client to easily construct a NewNodeConnectionInfoGetter. -var _ = NodeGetter(internalversion.NodeInterface(nil)) +var _ = NodeGetter(v1core.NodeInterface(nil)) func TestMakeTransportInvalid(t *testing.T) { config := &KubeletClientConfig{ diff --git a/pkg/kubelet/cm/container_manager.go b/pkg/kubelet/cm/container_manager.go index 1c232c036f2..3e743502658 100644 --- a/pkg/kubelet/cm/container_manager.go +++ b/pkg/kubelet/cm/container_manager.go @@ -16,20 +16,18 @@ limitations under the License. package cm -import ( - "k8s.io/kubernetes/pkg/api" -) +import "k8s.io/kubernetes/pkg/api/v1" // Manages the containers running on a machine. type ContainerManager interface { // Runs the container manager's housekeeping. // - Ensures that the Docker daemon is in a container. // - Creates the system container where all non-containerized processes run. - Start(*api.Node) error + Start(*v1.Node) error // Returns resources allocated to system cgroups in the machine. // These cgroups include the system and Kubernetes services. - SystemCgroupsLimit() api.ResourceList + SystemCgroupsLimit() v1.ResourceList // Returns a NodeConfig that is being used by the container manager. GetNodeConfig() NodeConfig diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index f7546c6e0e6..b12e0e4772d 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -34,8 +34,8 @@ import ( "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fs" "github.com/opencontainers/runc/libcontainer/configs" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubelet/cadvisor" cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util" "k8s.io/kubernetes/pkg/kubelet/qos" @@ -104,7 +104,7 @@ type containerManagerImpl struct { periodicTasks []func() // holds all the mounted cgroup subsystems subsystems *CgroupSubsystems - nodeInfo *api.Node + nodeInfo *v1.Node } type features struct { @@ -392,7 +392,7 @@ func (cm *containerManagerImpl) setupNode() error { }) } else if cm.RuntimeCgroupsName != "" { cont := newSystemCgroups(cm.RuntimeCgroupsName) - var capacity = api.ResourceList{} + var capacity = v1.ResourceList{} if info, err := cm.cadvisorInterface.MachineInfo(); err == nil { capacity = cadvisor.CapacityFromMachineInfo(info) } @@ -523,7 +523,7 @@ func (cm *containerManagerImpl) Status() Status { return cm.status } -func (cm *containerManagerImpl) Start(node *api.Node) error { +func (cm *containerManagerImpl) Start(node *v1.Node) error { // cache the node Info including resource capacity and // allocatable of the node cm.nodeInfo = node @@ -566,7 +566,7 @@ func (cm *containerManagerImpl) Start(node *api.Node) error { return nil } -func (cm *containerManagerImpl) SystemCgroupsLimit() api.ResourceList { +func (cm *containerManagerImpl) SystemCgroupsLimit() v1.ResourceList { cpuLimit := int64(0) // Sum up resources of all external containers. @@ -574,8 +574,8 @@ func (cm *containerManagerImpl) SystemCgroupsLimit() api.ResourceList { cpuLimit += cont.cpuMillicores } - return api.ResourceList{ - api.ResourceCPU: *resource.NewMilliQuantity( + return v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity( cpuLimit, resource.DecimalSI), } diff --git a/pkg/kubelet/cm/container_manager_stub.go b/pkg/kubelet/cm/container_manager_stub.go index 186d773dbe9..cce42afcd91 100644 --- a/pkg/kubelet/cm/container_manager_stub.go +++ b/pkg/kubelet/cm/container_manager_stub.go @@ -18,20 +18,20 @@ package cm import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) type containerManagerStub struct{} var _ ContainerManager = &containerManagerStub{} -func (cm *containerManagerStub) Start(_ *api.Node) error { +func (cm *containerManagerStub) Start(_ *v1.Node) error { glog.V(2).Infof("Starting stub container manager") return nil } -func (cm *containerManagerStub) SystemCgroupsLimit() api.ResourceList { - return api.ResourceList{} +func (cm *containerManagerStub) SystemCgroupsLimit() v1.ResourceList { + return v1.ResourceList{} } func (cm *containerManagerStub) GetNodeConfig() NodeConfig { diff --git a/pkg/kubelet/cm/container_manager_unsupported.go b/pkg/kubelet/cm/container_manager_unsupported.go index d57bca69003..5199f6d483f 100644 --- a/pkg/kubelet/cm/container_manager_unsupported.go +++ b/pkg/kubelet/cm/container_manager_unsupported.go @@ -21,7 +21,7 @@ package cm import ( "fmt" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/util/mount" ) @@ -31,12 +31,12 @@ type unsupportedContainerManager struct { var _ ContainerManager = &unsupportedContainerManager{} -func (unsupportedContainerManager) Start(_ *api.Node) error { +func (unsupportedContainerManager) Start(_ *v1.Node) error { return fmt.Errorf("Container Manager is unsupported in this build") } -func (unsupportedContainerManager) SystemCgroupsLimit() api.ResourceList { - return api.ResourceList{} +func (unsupportedContainerManager) SystemCgroupsLimit() v1.ResourceList { + return v1.ResourceList{} } func (unsupportedContainerManager) GetNodeConfig() NodeConfig { diff --git a/pkg/kubelet/cm/container_manager_windows.go b/pkg/kubelet/cm/container_manager_windows.go index 573df34624e..0578085ca4c 100644 --- a/pkg/kubelet/cm/container_manager_windows.go +++ b/pkg/kubelet/cm/container_manager_windows.go @@ -21,7 +21,7 @@ package cm import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/util/mount" ) @@ -32,7 +32,7 @@ type containerManagerImpl struct { var _ ContainerManager = &containerManagerImpl{} -func (cm *containerManagerImpl) Start(_ *api.Node) error { +func (cm *containerManagerImpl) Start(_ *v1.Node) error { glog.V(2).Infof("Starting Windows stub container manager") return nil } diff --git a/pkg/kubelet/cm/helpers_linux.go b/pkg/kubelet/cm/helpers_linux.go index cee55737eec..47d5a2e63b3 100644 --- a/pkg/kubelet/cm/helpers_linux.go +++ b/pkg/kubelet/cm/helpers_linux.go @@ -25,7 +25,7 @@ import ( libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubelet/qos" ) @@ -83,7 +83,7 @@ func MilliCPUToShares(milliCPU int64) int64 { } // ResourceConfigForPod takes the input pod and outputs the cgroup resource config. -func ResourceConfigForPod(pod *api.Pod) *ResourceConfig { +func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig { // sum requests and limits, track if limits were applied for each resource. cpuRequests := int64(0) cpuLimits := int64(0) diff --git a/pkg/kubelet/cm/helpers_linux_test.go b/pkg/kubelet/cm/helpers_linux_test.go index 511cd79d05f..4f8038b4492 100644 --- a/pkg/kubelet/cm/helpers_linux_test.go +++ b/pkg/kubelet/cm/helpers_linux_test.go @@ -22,26 +22,26 @@ import ( "reflect" "testing" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" ) // getResourceList returns a ResourceList with the // specified cpu and memory resource values -func getResourceList(cpu, memory string) api.ResourceList { - res := api.ResourceList{} +func getResourceList(cpu, memory string) v1.ResourceList { + res := v1.ResourceList{} if cpu != "" { - res[api.ResourceCPU] = resource.MustParse(cpu) + res[v1.ResourceCPU] = resource.MustParse(cpu) } if memory != "" { - res[api.ResourceMemory] = resource.MustParse(memory) + res[v1.ResourceMemory] = resource.MustParse(memory) } return res } // getResourceRequirements returns a ResourceRequirements object -func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements { - res := api.ResourceRequirements{} +func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements { + res := v1.ResourceRequirements{} res.Requests = requests res.Limits = limits return res @@ -59,13 +59,13 @@ func TestResourceConfigForPod(t *testing.T) { memoryQuantity = resource.MustParse("100Mi") guaranteedMemory := memoryQuantity.Value() testCases := map[string]struct { - pod *api.Pod + pod *v1.Pod expected *ResourceConfig }{ "besteffort": { - pod: &api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Resources: getResourceRequirements(getResourceList("", ""), getResourceList("", "")), }, @@ -75,9 +75,9 @@ func TestResourceConfigForPod(t *testing.T) { expected: &ResourceConfig{CpuShares: &minShares}, }, "burstable-no-limits": { - pod: &api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")), }, @@ -87,9 +87,9 @@ func TestResourceConfigForPod(t *testing.T) { expected: &ResourceConfig{CpuShares: &burstableShares}, }, "burstable-with-limits": { - pod: &api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")), }, @@ -99,9 +99,9 @@ func TestResourceConfigForPod(t *testing.T) { expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &burstablePeriod, Memory: &burstableMemory}, }, "burstable-partial-limits": { - pod: &api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")), }, @@ -114,9 +114,9 @@ func TestResourceConfigForPod(t *testing.T) { expected: &ResourceConfig{CpuShares: &burstablePartialShares}, }, "guaranteed": { - pod: &api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), }, diff --git a/pkg/kubelet/cm/helpers_unsupported.go b/pkg/kubelet/cm/helpers_unsupported.go index f60149cb854..ca82984a742 100644 --- a/pkg/kubelet/cm/helpers_unsupported.go +++ b/pkg/kubelet/cm/helpers_unsupported.go @@ -18,7 +18,7 @@ limitations under the License. package cm -import "k8s.io/kubernetes/pkg/api" +import "k8s.io/kubernetes/pkg/api/v1" const ( MinShares = 0 @@ -40,7 +40,7 @@ func MilliCPUToShares(milliCPU int64) int64 { } // ResourceConfigForPod takes the input pod and outputs the cgroup resource config. -func ResourceConfigForPod(pod *api.Pod) *ResourceConfig { +func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig { return nil } diff --git a/pkg/kubelet/cm/pod_container_manager_linux.go b/pkg/kubelet/cm/pod_container_manager_linux.go index e6c54579c1e..eb7e2789669 100644 --- a/pkg/kubelet/cm/pod_container_manager_linux.go +++ b/pkg/kubelet/cm/pod_container_manager_linux.go @@ -24,7 +24,7 @@ import ( "strings" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/types" utilerrors "k8s.io/kubernetes/pkg/util/errors" @@ -39,7 +39,7 @@ const ( // management if qos Cgroup is enabled. type podContainerManagerImpl struct { // nodeInfo stores information about the node resource capacity - nodeInfo *api.Node + nodeInfo *v1.Node // qosContainersInfo hold absolute paths of the top level qos containers qosContainersInfo QOSContainersInfo // Stores the mounted cgroup subsystems @@ -54,14 +54,14 @@ var _ PodContainerManager = &podContainerManagerImpl{} // applyLimits sets pod cgroup resource limits // It also updates the resource limits on top level qos containers. -func (m *podContainerManagerImpl) applyLimits(pod *api.Pod) error { +func (m *podContainerManagerImpl) applyLimits(pod *v1.Pod) error { // This function will house the logic for setting the resource parameters // on the pod container config and updating top level qos container configs return nil } // Exists checks if the pod's cgroup already exists -func (m *podContainerManagerImpl) Exists(pod *api.Pod) bool { +func (m *podContainerManagerImpl) Exists(pod *v1.Pod) bool { podContainerName, _ := m.GetPodContainerName(pod) return m.cgroupManager.Exists(podContainerName) } @@ -69,7 +69,7 @@ func (m *podContainerManagerImpl) Exists(pod *api.Pod) bool { // EnsureExists takes a pod as argument and makes sure that // pod cgroup exists if qos cgroup hierarchy flag is enabled. // If the pod level container doesen't already exist it is created. -func (m *podContainerManagerImpl) EnsureExists(pod *api.Pod) error { +func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error { podContainerName, _ := m.GetPodContainerName(pod) // check if container already exist alreadyExists := m.Exists(pod) @@ -94,7 +94,7 @@ func (m *podContainerManagerImpl) EnsureExists(pod *api.Pod) error { } // GetPodContainerName returns the CgroupName identifer, and its literal cgroupfs form on the host. -func (m *podContainerManagerImpl) GetPodContainerName(pod *api.Pod) (CgroupName, string) { +func (m *podContainerManagerImpl) GetPodContainerName(pod *v1.Pod) (CgroupName, string) { podQOS := qos.GetPodQOS(pod) // Get the parent QOS container name var parentContainer string @@ -233,19 +233,19 @@ type podContainerManagerNoop struct { // Make sure that podContainerManagerStub implements the PodContainerManager interface var _ PodContainerManager = &podContainerManagerNoop{} -func (m *podContainerManagerNoop) Exists(_ *api.Pod) bool { +func (m *podContainerManagerNoop) Exists(_ *v1.Pod) bool { return true } -func (m *podContainerManagerNoop) EnsureExists(_ *api.Pod) error { +func (m *podContainerManagerNoop) EnsureExists(_ *v1.Pod) error { return nil } -func (m *podContainerManagerNoop) GetPodContainerName(_ *api.Pod) (CgroupName, string) { +func (m *podContainerManagerNoop) GetPodContainerName(_ *v1.Pod) (CgroupName, string) { return m.cgroupRoot, string(m.cgroupRoot) } -func (m *podContainerManagerNoop) GetPodContainerNameForDriver(_ *api.Pod) string { +func (m *podContainerManagerNoop) GetPodContainerNameForDriver(_ *v1.Pod) string { return "" } diff --git a/pkg/kubelet/cm/pod_container_manager_stub.go b/pkg/kubelet/cm/pod_container_manager_stub.go index dec1c7b1bad..893ecbd1159 100644 --- a/pkg/kubelet/cm/pod_container_manager_stub.go +++ b/pkg/kubelet/cm/pod_container_manager_stub.go @@ -17,7 +17,7 @@ limitations under the License. package cm import ( - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/types" ) @@ -26,15 +26,15 @@ type podContainerManagerStub struct { var _ PodContainerManager = &podContainerManagerStub{} -func (m *podContainerManagerStub) Exists(_ *api.Pod) bool { +func (m *podContainerManagerStub) Exists(_ *v1.Pod) bool { return true } -func (m *podContainerManagerStub) EnsureExists(_ *api.Pod) error { +func (m *podContainerManagerStub) EnsureExists(_ *v1.Pod) error { return nil } -func (m *podContainerManagerStub) GetPodContainerName(_ *api.Pod) (CgroupName, string) { +func (m *podContainerManagerStub) GetPodContainerName(_ *v1.Pod) (CgroupName, string) { return "", "" } diff --git a/pkg/kubelet/cm/pod_container_manager_unsupported.go b/pkg/kubelet/cm/pod_container_manager_unsupported.go index 164278ccb2f..c97d793f398 100644 --- a/pkg/kubelet/cm/pod_container_manager_unsupported.go +++ b/pkg/kubelet/cm/pod_container_manager_unsupported.go @@ -19,7 +19,7 @@ limitations under the License. package cm import ( - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/types" ) @@ -28,15 +28,15 @@ type unsupportedPodContainerManager struct { var _ PodContainerManager = &unsupportedPodContainerManager{} -func (m *unsupportedPodContainerManager) Exists(_ *api.Pod) bool { +func (m *unsupportedPodContainerManager) Exists(_ *v1.Pod) bool { return true } -func (m *unsupportedPodContainerManager) EnsureExists(_ *api.Pod) error { +func (m *unsupportedPodContainerManager) EnsureExists(_ *v1.Pod) error { return nil } -func (m *unsupportedPodContainerManager) GetPodContainerName(_ *api.Pod) (CgroupName, string) { +func (m *unsupportedPodContainerManager) GetPodContainerName(_ *v1.Pod) (CgroupName, string) { return "", "" } diff --git a/pkg/kubelet/cm/types.go b/pkg/kubelet/cm/types.go index 48f940f72e6..d7dbaed2957 100644 --- a/pkg/kubelet/cm/types.go +++ b/pkg/kubelet/cm/types.go @@ -17,7 +17,7 @@ limitations under the License. package cm import ( - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/types" ) @@ -86,15 +86,15 @@ type QOSContainersInfo struct { // containers for the pod. type PodContainerManager interface { // GetPodContainerName returns the CgroupName identifer, and its literal cgroupfs form on the host. - GetPodContainerName(*api.Pod) (CgroupName, string) + GetPodContainerName(*v1.Pod) (CgroupName, string) // EnsureExists takes a pod as argument and makes sure that // pod cgroup exists if qos cgroup hierarchy flag is enabled. // If the pod cgroup doesen't already exist this method creates it. - EnsureExists(*api.Pod) error + EnsureExists(*v1.Pod) error // Exists returns true if the pod cgroup exists. - Exists(*api.Pod) bool + Exists(*v1.Pod) bool // Destroy takes a pod Cgroup name as argument and destroys the pod's container. Destroy(name CgroupName) error diff --git a/pkg/kubelet/config/apiserver.go b/pkg/kubelet/config/apiserver.go index 4061d9c7330..ae59ecb434c 100644 --- a/pkg/kubelet/config/apiserver.go +++ b/pkg/kubelet/config/apiserver.go @@ -18,9 +18,12 @@ limitations under the License. package config import ( + "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/client/cache" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/fields" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/types" @@ -28,18 +31,22 @@ import ( // NewSourceApiserver creates a config source that watches and pulls from the apiserver. func NewSourceApiserver(c *clientset.Clientset, nodeName types.NodeName, updates chan<- interface{}) { - lw := cache.NewListWatchFromClient(c.Core().RESTClient(), "pods", api.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName))) + lw := cache.NewListWatchFromClient(c.Core().RESTClient(), "pods", v1.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName))) newSourceApiserverFromLW(lw, updates) } // newSourceApiserverFromLW holds creates a config source that watches and pulls from the apiserver. func newSourceApiserverFromLW(lw cache.ListerWatcher, updates chan<- interface{}) { send := func(objs []interface{}) { - var pods []*api.Pod + var pods []*v1.Pod for _, o := range objs { - pods = append(pods, o.(*api.Pod)) + pod := o.(*v1.Pod) + if err := podutil.SetInitContainersAndStatuses(pod); err != nil { + glog.Error(err) + } + pods = append(pods, pod) } updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.ApiserverSource} } - cache.NewReflector(lw, &api.Pod{}, cache.NewUndeltaStore(send, cache.MetaNamespaceKeyFunc), 0).Run() + cache.NewReflector(lw, &v1.Pod{}, cache.NewUndeltaStore(send, cache.MetaNamespaceKeyFunc), 0).Run() } diff --git a/pkg/kubelet/config/apiserver_test.go b/pkg/kubelet/config/apiserver_test.go index f05ebd5c3a0..e1b30f65591 100644 --- a/pkg/kubelet/config/apiserver_test.go +++ b/pkg/kubelet/config/apiserver_test.go @@ -19,7 +19,7 @@ package config import ( "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/cache" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/runtime" @@ -31,31 +31,31 @@ type fakePodLW struct { watchResp watch.Interface } -func (lw fakePodLW) List(options api.ListOptions) (runtime.Object, error) { +func (lw fakePodLW) List(options v1.ListOptions) (runtime.Object, error) { return lw.listResp, nil } -func (lw fakePodLW) Watch(options api.ListOptions) (watch.Interface, error) { +func (lw fakePodLW) Watch(options v1.ListOptions) (watch.Interface, error) { return lw.watchResp, nil } var _ cache.ListerWatcher = fakePodLW{} func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) { - pod1v1 := &api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "p"}, - Spec: api.PodSpec{Containers: []api.Container{{Image: "image/one"}}}} - pod1v2 := &api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "p"}, - Spec: api.PodSpec{Containers: []api.Container{{Image: "image/two"}}}} - pod2 := &api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "q"}, - Spec: api.PodSpec{Containers: []api.Container{{Image: "image/blah"}}}} + pod1v1 := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{Name: "p"}, + Spec: v1.PodSpec{Containers: []v1.Container{{Image: "image/one"}}}} + pod1v2 := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{Name: "p"}, + Spec: v1.PodSpec{Containers: []v1.Container{{Image: "image/two"}}}} + pod2 := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{Name: "q"}, + Spec: v1.PodSpec{Containers: []v1.Container{{Image: "image/blah"}}}} // Setup fake api client. fakeWatch := watch.NewFake() lw := fakePodLW{ - listResp: &api.PodList{Items: []api.Pod{*pod1v1}}, + listResp: &v1.PodList{Items: []v1.Pod{*pod1v1}}, watchResp: fakeWatch, } @@ -69,7 +69,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) { } update := got.(kubetypes.PodUpdate) expected := CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod1v1) - if !api.Semantic.DeepEqual(expected, update) { + if !v1.Semantic.DeepEqual(expected, update) { t.Errorf("Expected %#v; Got %#v", expected, update) } @@ -84,7 +84,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) { expectedA := CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod1v1, pod2) expectedB := CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod2, pod1v1) - if !api.Semantic.DeepEqual(expectedA, update) && !api.Semantic.DeepEqual(expectedB, update) { + if !v1.Semantic.DeepEqual(expectedA, update) && !v1.Semantic.DeepEqual(expectedB, update) { t.Errorf("Expected %#v or %#v, Got %#v", expectedA, expectedB, update) } @@ -98,7 +98,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) { expectedA = CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod1v2, pod2) expectedB = CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod2, pod1v2) - if !api.Semantic.DeepEqual(expectedA, update) && !api.Semantic.DeepEqual(expectedB, update) { + if !v1.Semantic.DeepEqual(expectedA, update) && !v1.Semantic.DeepEqual(expectedB, update) { t.Errorf("Expected %#v or %#v, Got %#v", expectedA, expectedB, update) } @@ -110,7 +110,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) { } update = got.(kubetypes.PodUpdate) expected = CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod2) - if !api.Semantic.DeepEqual(expected, update) { + if !v1.Semantic.DeepEqual(expected, update) { t.Errorf("Expected %#v, Got %#v", expected, update) } @@ -122,23 +122,23 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) { } update = got.(kubetypes.PodUpdate) expected = CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource) - if !api.Semantic.DeepEqual(expected, update) { + if !v1.Semantic.DeepEqual(expected, update) { t.Errorf("Expected %#v, Got %#v", expected, update) } } func TestNewSourceApiserver_TwoNamespacesSameName(t *testing.T) { - pod1 := api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "p", Namespace: "one"}, - Spec: api.PodSpec{Containers: []api.Container{{Image: "image/one"}}}} - pod2 := api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "p", Namespace: "two"}, - Spec: api.PodSpec{Containers: []api.Container{{Image: "image/blah"}}}} + pod1 := v1.Pod{ + ObjectMeta: v1.ObjectMeta{Name: "p", Namespace: "one"}, + Spec: v1.PodSpec{Containers: []v1.Container{{Image: "image/one"}}}} + pod2 := v1.Pod{ + ObjectMeta: v1.ObjectMeta{Name: "p", Namespace: "two"}, + Spec: v1.PodSpec{Containers: []v1.Container{{Image: "image/blah"}}}} // Setup fake api client. fakeWatch := watch.NewFake() lw := fakePodLW{ - listResp: &api.PodList{Items: []api.Pod{pod1, pod2}}, + listResp: &v1.PodList{Items: []v1.Pod{pod1, pod2}}, watchResp: fakeWatch, } @@ -172,7 +172,7 @@ func TestNewSourceApiserverInitialEmptySendsEmptyPodUpdate(t *testing.T) { // Setup fake api client. fakeWatch := watch.NewFake() lw := fakePodLW{ - listResp: &api.PodList{Items: []api.Pod{}}, + listResp: &v1.PodList{Items: []v1.Pod{}}, watchResp: fakeWatch, } @@ -186,7 +186,7 @@ func TestNewSourceApiserverInitialEmptySendsEmptyPodUpdate(t *testing.T) { } update := got.(kubetypes.PodUpdate) expected := CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource) - if !api.Semantic.DeepEqual(expected, update) { + if !v1.Semantic.DeepEqual(expected, update) { t.Errorf("Expected %#v; Got %#v", expected, update) } } diff --git a/pkg/kubelet/config/common.go b/pkg/kubelet/config/common.go index 7d9861bbfd2..3c26bec1d9b 100644 --- a/pkg/kubelet/config/common.go +++ b/pkg/kubelet/config/common.go @@ -23,6 +23,7 @@ import ( "fmt" "k8s.io/kubernetes/pkg/api" +"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/apimachinery/registered" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" @@ -88,7 +89,7 @@ func getSelfLink(name, namespace string) string { type defaultFunc func(pod *api.Pod) error -func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *api.Pod, err error) { +func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *v1.Pod, err error) { // JSON is valid YAML, so this should work for everything. json, err := utilyaml.ToJSON(data) if err != nil { @@ -112,10 +113,14 @@ func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *a err = fmt.Errorf("invalid pod: %v", errs) return true, pod, err } - return true, newPod, nil + v1Pod := &v1.Pod{} + if err := v1.Convert_api_Pod_To_v1_Pod(newPod, v1Pod, nil); err != nil { + return true, nil, err + } + return true, v1Pod, nil } -func tryDecodePodList(data []byte, defaultFn defaultFunc) (parsed bool, pods api.PodList, err error) { +func tryDecodePodList(data []byte, defaultFn defaultFunc) (parsed bool, pods v1.PodList, err error) { obj, err := runtime.Decode(api.Codecs.UniversalDecoder(), data) if err != nil { return false, pods, err @@ -137,5 +142,9 @@ func tryDecodePodList(data []byte, defaultFn defaultFunc) (parsed bool, pods api return true, pods, err } } - return true, *newPods, err + v1Pods := &v1.PodList{} + if err := v1.Convert_api_PodList_To_v1_PodList(newPods, v1Pods, nil); err != nil { + return true, pods, err + } + return true, *v1Pods, err } diff --git a/pkg/kubelet/config/common_test.go b/pkg/kubelet/config/common_test.go index f1ee4ede87d..f611cbec093 100644 --- a/pkg/kubelet/config/common_test.go +++ b/pkg/kubelet/config/common_test.go @@ -23,6 +23,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/securitycontext" @@ -32,27 +33,27 @@ func noDefault(*api.Pod) error { return nil } func TestDecodeSinglePod(t *testing.T) { grace := int64(30) - pod := &api.Pod{ + pod := &v1.Pod{ TypeMeta: unversioned.TypeMeta{ APIVersion: "", }, - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ Name: "test", UID: "12345", Namespace: "mynamespace", }, - Spec: api.PodSpec{ - RestartPolicy: api.RestartPolicyAlways, - DNSPolicy: api.DNSClusterFirst, + Spec: v1.PodSpec{ + RestartPolicy: v1.RestartPolicyAlways, + DNSPolicy: v1.DNSClusterFirst, TerminationGracePeriodSeconds: &grace, - Containers: []api.Container{{ + Containers: []v1.Container{{ Name: "image", Image: "test/image", ImagePullPolicy: "IfNotPresent", TerminationMessagePath: "/dev/termination-log", SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), }}, - SecurityContext: &api.PodSecurityContext{}, + SecurityContext: &v1.PodSecurityContext{}, }, } json, err := runtime.Encode(testapi.Default.Codec(), pod) @@ -70,7 +71,7 @@ func TestDecodeSinglePod(t *testing.T) { t.Errorf("expected:\n%#v\ngot:\n%#v\n%s", pod, podOut, string(json)) } - for _, gv := range registered.EnabledVersionsForGroup(api.GroupName) { + for _, gv := range registered.EnabledVersionsForGroup(v1.GroupName) { info, _ := runtime.SerializerInfoForMediaType(api.Codecs.SupportedMediaTypes(), "application/yaml") encoder := api.Codecs.EncoderForVersion(info.Serializer, gv) yaml, err := runtime.Encode(encoder, pod) @@ -92,31 +93,31 @@ func TestDecodeSinglePod(t *testing.T) { func TestDecodePodList(t *testing.T) { grace := int64(30) - pod := &api.Pod{ + pod := &v1.Pod{ TypeMeta: unversioned.TypeMeta{ APIVersion: "", }, - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ Name: "test", UID: "12345", Namespace: "mynamespace", }, - Spec: api.PodSpec{ - RestartPolicy: api.RestartPolicyAlways, - DNSPolicy: api.DNSClusterFirst, + Spec: v1.PodSpec{ + RestartPolicy: v1.RestartPolicyAlways, + DNSPolicy: v1.DNSClusterFirst, TerminationGracePeriodSeconds: &grace, - Containers: []api.Container{{ + Containers: []v1.Container{{ Name: "image", Image: "test/image", ImagePullPolicy: "IfNotPresent", TerminationMessagePath: "/dev/termination-log", SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), }}, - SecurityContext: &api.PodSecurityContext{}, + SecurityContext: &v1.PodSecurityContext{}, }, } - podList := &api.PodList{ - Items: []api.Pod{*pod}, + podList := &v1.PodList{ + Items: []v1.Pod{*pod}, } json, err := runtime.Encode(testapi.Default.Codec(), podList) if err != nil { @@ -133,7 +134,7 @@ func TestDecodePodList(t *testing.T) { t.Errorf("expected:\n%#v\ngot:\n%#v\n%s", podList, &podListOut, string(json)) } - for _, gv := range registered.EnabledVersionsForGroup(api.GroupName) { + for _, gv := range registered.EnabledVersionsForGroup(v1.GroupName) { info, _ := runtime.SerializerInfoForMediaType(api.Codecs.SupportedMediaTypes(), "application/yaml") encoder := api.Codecs.EncoderForVersion(info.Serializer, gv) yaml, err := runtime.Encode(encoder, podList) diff --git a/pkg/kubelet/config/config.go b/pkg/kubelet/config/config.go index 795fec62aa0..f63c99e9e13 100644 --- a/pkg/kubelet/config/config.go +++ b/pkg/kubelet/config/config.go @@ -23,6 +23,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -116,7 +117,7 @@ func (c *PodConfig) Sync() { type podStorage struct { podLock sync.RWMutex // map of source name to pod name to pod reference - pods map[string]map[string]*api.Pod + pods map[string]map[string]*v1.Pod mode PodConfigNotificationMode // ensures that updates are delivered in strict order @@ -137,7 +138,7 @@ type podStorage struct { // TODO: allow initialization of the current state of the store with snapshotted version. func newPodStorage(updates chan<- kubetypes.PodUpdate, mode PodConfigNotificationMode, recorder record.EventRecorder) *podStorage { return &podStorage{ - pods: make(map[string]map[string]*api.Pod), + pods: make(map[string]map[string]*v1.Pod), mode: mode, updates: updates, sourcesSeen: sets.String{}, @@ -184,7 +185,7 @@ func (s *podStorage) Merge(source string, change interface{}) error { case PodConfigNotificationSnapshotAndUpdates: if len(removes.Pods) > 0 || len(adds.Pods) > 0 || firstSet { - s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubetypes.SET, Source: source} + s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*v1.Pod), Op: kubetypes.SET, Source: source} } if len(updates.Pods) > 0 { s.updates <- *updates @@ -195,7 +196,7 @@ func (s *podStorage) Merge(source string, change interface{}) error { case PodConfigNotificationSnapshot: if len(updates.Pods) > 0 || len(deletes.Pods) > 0 || len(adds.Pods) > 0 || len(removes.Pods) > 0 || firstSet { - s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubetypes.SET, Source: source} + s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*v1.Pod), Op: kubetypes.SET, Source: source} } case PodConfigNotificationUnknown: @@ -211,21 +212,21 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de s.podLock.Lock() defer s.podLock.Unlock() - addPods := []*api.Pod{} - updatePods := []*api.Pod{} - deletePods := []*api.Pod{} - removePods := []*api.Pod{} - reconcilePods := []*api.Pod{} + addPods := []*v1.Pod{} + updatePods := []*v1.Pod{} + deletePods := []*v1.Pod{} + removePods := []*v1.Pod{} + reconcilePods := []*v1.Pod{} pods := s.pods[source] if pods == nil { - pods = make(map[string]*api.Pod) + pods = make(map[string]*v1.Pod) } // updatePodFunc is the local function which updates the pod cache *oldPods* with new pods *newPods*. // After updated, new pod will be stored in the pod cache *pods*. // Notice that *pods* and *oldPods* could be the same cache. - updatePodsFunc := func(newPods []*api.Pod, oldPods, pods map[string]*api.Pod) { + updatePodsFunc := func(newPods []*v1.Pod, oldPods, pods map[string]*v1.Pod) { filtered := filterInvalidPods(newPods, source, s.recorder) for _, ref := range filtered { name := kubecontainer.GetPodFullName(ref) @@ -282,7 +283,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de s.markSourceSet(source) // Clear the old map entries by just creating a new map oldPods := pods - pods = make(map[string]*api.Pod) + pods = make(map[string]*v1.Pod) updatePodsFunc(update.Pods, oldPods, pods) for name, existing := range oldPods { if _, found := pods[name]; !found { @@ -319,11 +320,19 @@ func (s *podStorage) seenSources(sources ...string) bool { return s.sourcesSeen.HasAll(sources...) } -func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) { +func filterInvalidPods(pods []*v1.Pod, source string, recorder record.EventRecorder) (filtered []*v1.Pod) { names := sets.String{} for i, pod := range pods { var errlist field.ErrorList - if errs := validation.ValidatePod(pod); len(errs) != 0 { +// TODO: remove the conversion when validation is performed on versioned objects. +internalPod := &api.Pod{} + if err := v1.Convert_v1_Pod_To_api_Pod(pod, internalPod, nil); err != nil { + name := kubecontainer.GetPodFullName(pod) + glog.Warningf("Pod[%d] (%s) from %s failed to convert to v1, ignoring: %v", i+1, name, source, err) + recorder.Eventf(pod, v1.EventTypeWarning, "FailedConversion", "Error converting pod %s from %s, ignoring: %v", name, source, err) + continue + } + if errs := validation.ValidatePod(internalPod); len(errs) != 0 { errlist = append(errlist, errs...) // If validation fails, don't trust it any further - // even Name could be bad. @@ -341,7 +350,7 @@ func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventReco name := bestPodIdentString(pod) err := errlist.ToAggregate() glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err) - recorder.Eventf(pod, api.EventTypeWarning, events.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err) + recorder.Eventf(pod, v1.EventTypeWarning, events.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err) continue } filtered = append(filtered, pod) @@ -393,14 +402,14 @@ func isAnnotationMapEqual(existingMap, candidateMap map[string]string) bool { } // recordFirstSeenTime records the first seen time of this pod. -func recordFirstSeenTime(pod *api.Pod) { +func recordFirstSeenTime(pod *v1.Pod) { glog.V(4).Infof("Receiving a new pod %q", format.Pod(pod)) pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] = kubetypes.NewTimestamp().GetString() } // updateAnnotations returns an Annotation map containing the api annotation map plus // locally managed annotations -func updateAnnotations(existing, ref *api.Pod) { +func updateAnnotations(existing, ref *v1.Pod) { annotations := make(map[string]string, len(ref.Annotations)+len(localAnnotations)) for k, v := range ref.Annotations { annotations[k] = v @@ -413,7 +422,7 @@ func updateAnnotations(existing, ref *api.Pod) { existing.Annotations = annotations } -func podsDifferSemantically(existing, ref *api.Pod) bool { +func podsDifferSemantically(existing, ref *v1.Pod) bool { if reflect.DeepEqual(existing.Spec, ref.Spec) && reflect.DeepEqual(existing.Labels, ref.Labels) && reflect.DeepEqual(existing.DeletionTimestamp, ref.DeletionTimestamp) && @@ -430,7 +439,7 @@ func podsDifferSemantically(existing, ref *api.Pod) bool { // * if ref makes no meaningful change, but changes the pod status, returns needReconcile=true // * else return all false // Now, needUpdate, needGracefulDelete and needReconcile should never be both true -func checkAndUpdatePod(existing, ref *api.Pod) (needUpdate, needReconcile, needGracefulDelete bool) { +func checkAndUpdatePod(existing, ref *v1.Pod) (needUpdate, needReconcile, needGracefulDelete bool) { // 1. this is a reconcile // TODO: it would be better to update the whole object and only preserve certain things @@ -474,27 +483,27 @@ func checkAndUpdatePod(existing, ref *api.Pod) (needUpdate, needReconcile, needG func (s *podStorage) Sync() { s.updateLock.Lock() defer s.updateLock.Unlock() - s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubetypes.SET, Source: kubetypes.AllSource} + s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*v1.Pod), Op: kubetypes.SET, Source: kubetypes.AllSource} } // Object implements config.Accessor func (s *podStorage) MergedState() interface{} { s.podLock.RLock() defer s.podLock.RUnlock() - pods := make([]*api.Pod, 0) + pods := make([]*v1.Pod, 0) for _, sourcePods := range s.pods { for _, podRef := range sourcePods { pod, err := api.Scheme.Copy(podRef) if err != nil { glog.Errorf("unable to copy pod: %v", err) } - pods = append(pods, pod.(*api.Pod)) + pods = append(pods, pod.(*v1.Pod)) } } return pods } -func bestPodIdentString(pod *api.Pod) string { +func bestPodIdentString(pod *v1.Pod) string { namespace := pod.Namespace if namespace == "" { namespace = "" @@ -506,15 +515,15 @@ func bestPodIdentString(pod *api.Pod) string { return fmt.Sprintf("%s.%s", name, namespace) } -func copyPods(sourcePods []*api.Pod) []*api.Pod { - pods := []*api.Pod{} +func copyPods(sourcePods []*v1.Pod) []*v1.Pod { + pods := []*v1.Pod{} for _, source := range sourcePods { // Use a deep copy here just in case pod, err := api.Scheme.Copy(source) if err != nil { glog.Errorf("unable to copy pod: %v", err) } - pods = append(pods, pod.(*api.Pod)) + pods = append(pods, pod.(*v1.Pod)) } return pods } diff --git a/pkg/kubelet/config/config_test.go b/pkg/kubelet/config/config_test.go index 4b4b06f4f49..d3ccccf019f 100644 --- a/pkg/kubelet/config/config_test.go +++ b/pkg/kubelet/config/config_test.go @@ -24,8 +24,8 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/conversion" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" @@ -45,7 +45,7 @@ func expectEmptyChannel(t *testing.T, ch <-chan interface{}) { } } -type sortedPods []*api.Pod +type sortedPods []*v1.Pod func (s sortedPods) Len() int { return len(s) @@ -57,17 +57,17 @@ func (s sortedPods) Less(i, j int) bool { return s[i].Namespace < s[j].Namespace } -func CreateValidPod(name, namespace string) *api.Pod { - return &api.Pod{ - ObjectMeta: api.ObjectMeta{ +func CreateValidPod(name, namespace string) *v1.Pod { + return &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: types.UID(name), // for the purpose of testing, this is unique enough Name: name, Namespace: namespace, }, - Spec: api.PodSpec{ - RestartPolicy: api.RestartPolicyAlways, - DNSPolicy: api.DNSClusterFirst, - Containers: []api.Container{ + Spec: v1.PodSpec{ + RestartPolicy: v1.RestartPolicyAlways, + DNSPolicy: v1.DNSClusterFirst, + Containers: []v1.Container{ { Name: "ctr", Image: "image", @@ -79,13 +79,13 @@ func CreateValidPod(name, namespace string) *api.Pod { } } -func CreatePodUpdate(op kubetypes.PodOperation, source string, pods ...*api.Pod) kubetypes.PodUpdate { +func CreatePodUpdate(op kubetypes.PodOperation, source string, pods ...*v1.Pod) kubetypes.PodUpdate { return kubetypes.PodUpdate{Pods: pods, Op: op, Source: source} } func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubetypes.PodUpdate, *PodConfig) { eventBroadcaster := record.NewBroadcaster() - config := NewPodConfig(mode, eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet"})) + config := NewPodConfig(mode, eventBroadcaster.NewRecorder(v1.EventSource{Component: "kubelet"})) channel := config.Channel(TestSource) ch := config.Updates() return channel, ch, config @@ -100,7 +100,7 @@ func expectPodUpdate(t *testing.T, ch <-chan kubetypes.PodUpdate, expected ...ku // except for "Pods", which are compared separately below. expectedCopy, updateCopy := expected[i], update expectedCopy.Pods, updateCopy.Pods = nil, nil - if !api.Semantic.DeepEqual(expectedCopy, updateCopy) { + if !v1.Semantic.DeepEqual(expectedCopy, updateCopy) { t.Fatalf("Expected %#v, Got %#v", expectedCopy, updateCopy) } @@ -186,7 +186,7 @@ func TestInvalidPodFiltered(t *testing.T) { expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"))) // add an invalid update - podUpdate = CreatePodUpdate(kubetypes.UPDATE, TestSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + podUpdate = CreatePodUpdate(kubetypes.UPDATE, TestSource, &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo"}}) channel <- podUpdate expectNoPodUpdate(t, ch) } @@ -204,7 +204,7 @@ func TestNewPodAddedSnapshotAndUpdates(t *testing.T) { // container updates are separated as UPDATE pod := *podUpdate.Pods[0] - pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} + pod.Spec.Containers = []v1.Container{{Name: "bar", Image: "test", ImagePullPolicy: v1.PullIfNotPresent}} channel <- CreatePodUpdate(kubetypes.ADD, TestSource, &pod) expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, &pod)) } @@ -222,7 +222,7 @@ func TestNewPodAddedSnapshot(t *testing.T) { // container updates are separated as UPDATE pod := *podUpdate.Pods[0] - pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} + pod.Spec.Containers = []v1.Container{{Name: "bar", Image: "test", ImagePullPolicy: v1.PullIfNotPresent}} channel <- CreatePodUpdate(kubetypes.ADD, TestSource, &pod) expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, TestSource, &pod)) } @@ -240,12 +240,12 @@ func TestNewPodAddedUpdatedRemoved(t *testing.T) { // an kubetypes.ADD should be converted to kubetypes.UPDATE pod := CreateValidPod("foo", "new") - pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} + pod.Spec.Containers = []v1.Container{{Name: "bar", Image: "test", ImagePullPolicy: v1.PullIfNotPresent}} podUpdate = CreatePodUpdate(kubetypes.ADD, TestSource, pod) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, pod)) - podUpdate = CreatePodUpdate(kubetypes.REMOVE, TestSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "new"}}) + podUpdate = CreatePodUpdate(kubetypes.REMOVE, TestSource, &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: "new"}}) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.REMOVE, TestSource, pod)) } @@ -282,7 +282,7 @@ func TestNewPodAddedUpdatedSet(t *testing.T) { // should be converted to an kubetypes.ADD, kubetypes.REMOVE, and kubetypes.UPDATE pod := CreateValidPod("foo2", "new") - pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} + pod.Spec.Containers = []v1.Container{{Name: "bar", Image: "test", ImagePullPolicy: v1.PullIfNotPresent}} podUpdate = CreatePodUpdate(kubetypes.SET, TestSource, pod, CreateValidPod("foo3", "new"), CreateValidPod("foo4", "new")) channel <- podUpdate expectPodUpdate(t, ch, @@ -294,14 +294,14 @@ func TestNewPodAddedUpdatedSet(t *testing.T) { func TestNewPodAddedSetReconciled(t *testing.T) { // Create and touch new test pods, return the new pods and touched pod. We should create new pod list // before touching to avoid data race. - newTestPods := func(touchStatus, touchSpec bool) ([]*api.Pod, *api.Pod) { - pods := []*api.Pod{ + newTestPods := func(touchStatus, touchSpec bool) ([]*v1.Pod, *v1.Pod) { + pods := []*v1.Pod{ CreateValidPod("changeable-pod-0", "new"), CreateValidPod("constant-pod-1", "new"), CreateValidPod("constant-pod-2", "new"), } if touchStatus { - pods[0].Status = api.PodStatus{Message: strconv.Itoa(rand.Int())} + pods[0].Status = v1.PodStatus{Message: strconv.Itoa(rand.Int())} } if touchSpec { pods[0].Spec.Containers[0].Name = strconv.Itoa(rand.Int()) @@ -312,7 +312,7 @@ func TestNewPodAddedSetReconciled(t *testing.T) { kubetypes.ADD, kubetypes.SET, } { - var podWithStatusChange *api.Pod + var podWithStatusChange *v1.Pod pods, _ := newTestPods(false, false) channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) @@ -373,7 +373,7 @@ func TestPodUpdateAnnotations(t *testing.T) { t.Fatalf("%v", err) } - podUpdate := CreatePodUpdate(kubetypes.SET, TestSource, CreateValidPod("foo1", "new"), clone.(*api.Pod), CreateValidPod("foo3", "new")) + podUpdate := CreatePodUpdate(kubetypes.SET, TestSource, CreateValidPod("foo1", "new"), clone.(*v1.Pod), CreateValidPod("foo3", "new")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new"))) @@ -405,7 +405,7 @@ func TestPodUpdateLabels(t *testing.T) { t.Fatalf("%v", err) } - podUpdate := CreatePodUpdate(kubetypes.SET, TestSource, clone.(*api.Pod)) + podUpdate := CreatePodUpdate(kubetypes.SET, TestSource, clone.(*v1.Pod)) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, pod)) diff --git a/pkg/kubelet/config/file.go b/pkg/kubelet/config/file.go index 262f360e636..2405b935ccf 100644 --- a/pkg/kubelet/config/file.go +++ b/pkg/kubelet/config/file.go @@ -28,6 +28,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" +"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/cache" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/types" @@ -50,9 +51,9 @@ func NewSourceFile(path string, nodeName types.NodeName, period time.Duration, u func new(path string, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) *sourceFile { send := func(objs []interface{}) { - var pods []*api.Pod + var pods []*v1.Pod for _, o := range objs { - pods = append(pods, o.(*api.Pod)) + pods = append(pods, o.(*v1.Pod)) } updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.FileSource} } @@ -84,7 +85,7 @@ func (s *sourceFile) resetStoreFromPath() error { return err } // Emit an update with an empty PodList to allow FileSource to be marked as seen - s.updates <- kubetypes.PodUpdate{Pods: []*api.Pod{}, Op: kubetypes.SET, Source: kubetypes.FileSource} + s.updates <- kubetypes.PodUpdate{Pods: []*v1.Pod{}, Op: kubetypes.SET, Source: kubetypes.FileSource} return fmt.Errorf("path does not exist, ignoring") } @@ -116,13 +117,13 @@ func (s *sourceFile) resetStoreFromPath() error { // Get as many pod configs as we can from a directory. Return an error if and only if something // prevented us from reading anything at all. Do not return an error if only some files // were problematic. -func (s *sourceFile) extractFromDir(name string) ([]*api.Pod, error) { +func (s *sourceFile) extractFromDir(name string) ([]*v1.Pod, error) { dirents, err := filepath.Glob(filepath.Join(name, "[^.]*")) if err != nil { return nil, fmt.Errorf("glob failed: %v", err) } - pods := make([]*api.Pod, 0) + pods := make([]*v1.Pod, 0) if len(dirents) == 0 { return pods, nil } @@ -152,7 +153,7 @@ func (s *sourceFile) extractFromDir(name string) ([]*api.Pod, error) { return pods, nil } -func (s *sourceFile) extractFromFile(filename string) (pod *api.Pod, err error) { +func (s *sourceFile) extractFromFile(filename string) (pod *v1.Pod, err error) { glog.V(3).Infof("Reading config file %q", filename) defer func() { if err == nil && pod != nil { @@ -192,7 +193,7 @@ func (s *sourceFile) extractFromFile(filename string) (pod *api.Pod, err error) filename, string(data), podErr) } -func (s *sourceFile) replaceStore(pods ...*api.Pod) (err error) { +func (s *sourceFile) replaceStore(pods ...*v1.Pod) (err error) { objs := []interface{}{} for _, pod := range pods { objs = append(objs, pod) diff --git a/pkg/kubelet/config/file_linux.go b/pkg/kubelet/config/file_linux.go index 0b936d66850..b13de49cce9 100644 --- a/pkg/kubelet/config/file_linux.go +++ b/pkg/kubelet/config/file_linux.go @@ -26,7 +26,7 @@ import ( "github.com/golang/glog" "golang.org/x/exp/inotify" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -45,7 +45,7 @@ func (s *sourceFile) watch() error { return err } // Emit an update with an empty PodList to allow FileSource to be marked as seen - s.updates <- kubetypes.PodUpdate{Pods: []*api.Pod{}, Op: kubetypes.SET, Source: kubetypes.FileSource} + s.updates <- kubetypes.PodUpdate{Pods: []*v1.Pod{}, Op: kubetypes.SET, Source: kubetypes.FileSource} return fmt.Errorf("path does not exist, ignoring") } diff --git a/pkg/kubelet/config/file_linux_test.go b/pkg/kubelet/config/file_linux_test.go index 381eb120b0b..c470fbdae11 100644 --- a/pkg/kubelet/config/file_linux_test.go +++ b/pkg/kubelet/config/file_linux_test.go @@ -29,9 +29,10 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api" +"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/validation" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/runtime" @@ -57,7 +58,7 @@ func TestUpdateOnNonExistentFile(t *testing.T) { case got := <-ch: update := got.(kubetypes.PodUpdate) expected := CreatePodUpdate(kubetypes.SET, kubetypes.FileSource) - if !api.Semantic.DeepDerivative(expected, update) { + if !v1.Semantic.DeepDerivative(expected, update) { t.Fatalf("expected %#v, Got %#v", expected, update) } @@ -85,11 +86,16 @@ func TestReadPodsFromFileExistAlready(t *testing.T) { case got := <-ch: update := got.(kubetypes.PodUpdate) for _, pod := range update.Pods { - if errs := validation.ValidatePod(pod); len(errs) > 0 { - t.Fatalf("%s: Invalid pod %#v, %#v", testCase.desc, pod, errs) + // TODO: remove the conversion when validation is performed on versioned objects. + internalPod := &api.Pod{} + if err := v1.Convert_v1_Pod_To_api_Pod(pod, internalPod, nil); err != nil { + t.Fatalf("%s: Cannot convert pod %#v, %#v", testCase.desc, pod, err) + } + if errs := validation.ValidatePod(internalPod); len(errs) > 0 { + t.Fatalf("%s: Invalid pod %#v, %#v", testCase.desc, internalPod, errs) } } - if !api.Semantic.DeepEqual(testCase.expected, update) { + if !v1.Semantic.DeepEqual(testCase.expected, update) { t.Fatalf("%s: Expected %#v, Got %#v", testCase.desc, testCase.expected, update) } case <-time.After(wait.ForeverTestTimeout): @@ -153,7 +159,7 @@ func TestExtractFromEmptyDir(t *testing.T) { update := (<-ch).(kubetypes.PodUpdate) expected := CreatePodUpdate(kubetypes.SET, kubetypes.FileSource) - if !api.Semantic.DeepEqual(expected, update) { + if !v1.Semantic.DeepEqual(expected, update) { t.Fatalf("expected %#v, Got %#v", expected, update) } } @@ -169,47 +175,47 @@ func getTestCases(hostname types.NodeName) []*testCase { return []*testCase{ { desc: "Simple pod", - pod: &api.Pod{ + pod: &v1.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", APIVersion: "", }, - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ Name: "test", UID: "12345", Namespace: "mynamespace", }, - Spec: api.PodSpec{ - Containers: []api.Container{{Name: "image", Image: "test/image", SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults()}}, - SecurityContext: &api.PodSecurityContext{}, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Name: "image", Image: "test/image", SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults()}}, + SecurityContext: &v1.PodSecurityContext{}, }, - Status: api.PodStatus{ - Phase: api.PodPending, + Status: v1.PodStatus{ + Phase: v1.PodPending, }, }, - expected: CreatePodUpdate(kubetypes.SET, kubetypes.FileSource, &api.Pod{ - ObjectMeta: api.ObjectMeta{ + expected: CreatePodUpdate(kubetypes.SET, kubetypes.FileSource, &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "test-" + string(hostname), UID: "12345", Namespace: "mynamespace", Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "12345"}, SelfLink: getSelfLink("test-"+string(hostname), "mynamespace"), }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ NodeName: string(hostname), - RestartPolicy: api.RestartPolicyAlways, - DNSPolicy: api.DNSClusterFirst, + RestartPolicy: v1.RestartPolicyAlways, + DNSPolicy: v1.DNSClusterFirst, TerminationGracePeriodSeconds: &grace, - Containers: []api.Container{{ + Containers: []v1.Container{{ Name: "image", Image: "test/image", TerminationMessagePath: "/dev/termination-log", ImagePullPolicy: "Always", SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults()}}, - SecurityContext: &api.PodSecurityContext{}, + SecurityContext: &v1.PodSecurityContext{}, }, - Status: api.PodStatus{ - Phase: api.PodPending, + Status: v1.PodStatus{ + Phase: v1.PodPending, }, }), }, @@ -312,7 +318,7 @@ func watchFileChanged(watchDir bool, t *testing.T) { lock.Lock() defer lock.Unlock() - pod := testCase.pod.(*api.Pod) + pod := testCase.pod.(*v1.Pod) pod.Spec.Containers[0].Name = "image2" testCase.expected.Pods[0].Spec.Containers[0].Name = "image2" @@ -355,12 +361,17 @@ func expectUpdate(t *testing.T, ch chan interface{}, testCase *testCase) { case got := <-ch: update := got.(kubetypes.PodUpdate) for _, pod := range update.Pods { - if errs := validation.ValidatePod(pod); len(errs) > 0 { - t.Fatalf("%s: Invalid pod %#v, %#v", testCase.desc, pod, errs) + // TODO: remove the conversion when validation is performed on versioned objects. + internalPod := &api.Pod{} + if err := v1.Convert_v1_Pod_To_api_Pod(pod, internalPod, nil); err != nil { + t.Fatalf("%s: Cannot convert pod %#v, %#v", testCase.desc, pod, err) + } + if errs := validation.ValidatePod(internalPod); len(errs) > 0 { + t.Fatalf("%s: Invalid pod %#v, %#v", testCase.desc, internalPod, errs) } } - if !api.Semantic.DeepEqual(testCase.expected, update) { + if !v1.Semantic.DeepEqual(testCase.expected, update) { t.Fatalf("%s: Expected: %#v, Got: %#v", testCase.desc, testCase.expected, update) } return diff --git a/pkg/kubelet/config/http.go b/pkg/kubelet/config/http.go index 41ee92c6e31..57f9f414fc4 100644 --- a/pkg/kubelet/config/http.go +++ b/pkg/kubelet/config/http.go @@ -25,6 +25,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" +"k8s.io/kubernetes/pkg/api/v1" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/util/wait" @@ -101,7 +102,7 @@ func (s *sourceURL) extractFromURL() error { } if len(data) == 0 { // Emit an update with an empty PodList to allow HTTPSource to be marked as seen - s.updates <- kubetypes.PodUpdate{Pods: []*api.Pod{}, Op: kubetypes.SET, Source: kubetypes.HTTPSource} + s.updates <- kubetypes.PodUpdate{Pods: []*v1.Pod{}, Op: kubetypes.SET, Source: kubetypes.HTTPSource} return fmt.Errorf("zero-length data received from %v", s.url) } // Short circuit if the data has not changed since the last time it was read. @@ -117,7 +118,7 @@ func (s *sourceURL) extractFromURL() error { // It parsed but could not be used. return singlePodErr } - s.updates <- kubetypes.PodUpdate{Pods: []*api.Pod{pod}, Op: kubetypes.SET, Source: kubetypes.HTTPSource} + s.updates <- kubetypes.PodUpdate{Pods: []*v1.Pod{pod}, Op: kubetypes.SET, Source: kubetypes.HTTPSource} return nil } @@ -128,7 +129,7 @@ func (s *sourceURL) extractFromURL() error { // It parsed but could not be used. return multiPodErr } - pods := make([]*api.Pod, 0) + pods := make([]*v1.Pod, 0) for i := range podList.Items { pods = append(pods, &podList.Items[i]) } diff --git a/pkg/kubelet/config/http_test.go b/pkg/kubelet/config/http_test.go index c40fc236c0f..43d9594dd8b 100644 --- a/pkg/kubelet/config/http_test.go +++ b/pkg/kubelet/config/http_test.go @@ -23,9 +23,10 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api" +"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/apimachinery/registered" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" @@ -56,49 +57,49 @@ func TestExtractFromHttpBadness(t *testing.T) { func TestExtractInvalidPods(t *testing.T) { var testCases = []struct { desc string - pod *api.Pod + pod *v1.Pod }{ { desc: "No version", - pod: &api.Pod{TypeMeta: unversioned.TypeMeta{APIVersion: ""}}, + pod: &v1.Pod{TypeMeta: unversioned.TypeMeta{APIVersion: ""}}, }, { desc: "Invalid version", - pod: &api.Pod{TypeMeta: unversioned.TypeMeta{APIVersion: "v1betta2"}}, + pod: &v1.Pod{TypeMeta: unversioned.TypeMeta{APIVersion: "v1betta2"}}, }, { desc: "Invalid volume name", - pod: &api.Pod{ - TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "_INVALID_"}}, + pod: &v1.Pod{ + TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()}, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{{Name: "_INVALID_"}}, }, }, }, { desc: "Duplicate volume names", - pod: &api.Pod{ - TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "repeated"}, {Name: "repeated"}}, + pod: &v1.Pod{ + TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()}, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{{Name: "repeated"}, {Name: "repeated"}}, }, }, }, { desc: "Unspecified container name", - pod: &api.Pod{ - TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()}, - Spec: api.PodSpec{ - Containers: []api.Container{{Name: ""}}, + pod: &v1.Pod{ + TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()}, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Name: ""}}, }, }, }, { desc: "Invalid container name", - pod: &api.Pod{ - TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()}, - Spec: api.PodSpec{ - Containers: []api.Container{{Name: "_INVALID_"}}, + pod: &v1.Pod{ + TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()}, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Name: "_INVALID_"}}, }, }, }, @@ -133,144 +134,144 @@ func TestExtractPodsFromHTTP(t *testing.T) { }{ { desc: "Single pod", - pods: &api.Pod{ + pods: &v1.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", APIVersion: "", }, - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ Name: "foo", UID: "111", Namespace: "mynamespace", }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ NodeName: string(nodeName), - Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}}, - SecurityContext: &api.PodSecurityContext{}, + Containers: []v1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1.PullAlways}}, + SecurityContext: &v1.PodSecurityContext{}, }, - Status: api.PodStatus{ - Phase: api.PodPending, + Status: v1.PodStatus{ + Phase: v1.PodPending, }, }, expected: CreatePodUpdate(kubetypes.SET, kubetypes.HTTPSource, - &api.Pod{ - ObjectMeta: api.ObjectMeta{ + &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "111", Name: "foo" + "-" + nodeName, Namespace: "mynamespace", Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "111"}, SelfLink: getSelfLink("foo-"+nodeName, "mynamespace"), }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ NodeName: nodeName, - RestartPolicy: api.RestartPolicyAlways, - DNSPolicy: api.DNSClusterFirst, - SecurityContext: &api.PodSecurityContext{}, + RestartPolicy: v1.RestartPolicyAlways, + DNSPolicy: v1.DNSClusterFirst, + SecurityContext: &v1.PodSecurityContext{}, TerminationGracePeriodSeconds: &grace, - Containers: []api.Container{{ + Containers: []v1.Container{{ Name: "1", Image: "foo", TerminationMessagePath: "/dev/termination-log", ImagePullPolicy: "Always", }}, }, - Status: api.PodStatus{ - Phase: api.PodPending, + Status: v1.PodStatus{ + Phase: v1.PodPending, }, }), }, { desc: "Multiple pods", - pods: &api.PodList{ + pods: &v1.PodList{ TypeMeta: unversioned.TypeMeta{ Kind: "PodList", APIVersion: "", }, - Items: []api.Pod{ + Items: []v1.Pod{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ Name: "foo", UID: "111", }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ NodeName: nodeName, - Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}}, - SecurityContext: &api.PodSecurityContext{}, + Containers: []v1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1.PullAlways}}, + SecurityContext: &v1.PodSecurityContext{}, }, - Status: api.PodStatus{ - Phase: api.PodPending, + Status: v1.PodStatus{ + Phase: v1.PodPending, }, }, { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ Name: "bar", UID: "222", }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ NodeName: nodeName, - Containers: []api.Container{{Name: "2", Image: "bar:bartag", ImagePullPolicy: ""}}, - SecurityContext: &api.PodSecurityContext{}, + Containers: []v1.Container{{Name: "2", Image: "bar:bartag", ImagePullPolicy: ""}}, + SecurityContext: &v1.PodSecurityContext{}, }, - Status: api.PodStatus{ - Phase: api.PodPending, + Status: v1.PodStatus{ + Phase: v1.PodPending, }, }, }, }, expected: CreatePodUpdate(kubetypes.SET, kubetypes.HTTPSource, - &api.Pod{ - ObjectMeta: api.ObjectMeta{ + &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "111", Name: "foo" + "-" + nodeName, Namespace: "default", Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "111"}, SelfLink: getSelfLink("foo-"+nodeName, kubetypes.NamespaceDefault), }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ NodeName: nodeName, - RestartPolicy: api.RestartPolicyAlways, - DNSPolicy: api.DNSClusterFirst, + RestartPolicy: v1.RestartPolicyAlways, + DNSPolicy: v1.DNSClusterFirst, TerminationGracePeriodSeconds: &grace, - SecurityContext: &api.PodSecurityContext{}, + SecurityContext: &v1.PodSecurityContext{}, - Containers: []api.Container{{ + Containers: []v1.Container{{ Name: "1", Image: "foo", TerminationMessagePath: "/dev/termination-log", ImagePullPolicy: "Always", }}, }, - Status: api.PodStatus{ - Phase: api.PodPending, + Status: v1.PodStatus{ + Phase: v1.PodPending, }, }, - &api.Pod{ - ObjectMeta: api.ObjectMeta{ + &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "222", Name: "bar" + "-" + nodeName, Namespace: "default", Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "222"}, SelfLink: getSelfLink("bar-"+nodeName, kubetypes.NamespaceDefault), }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ NodeName: nodeName, - RestartPolicy: api.RestartPolicyAlways, - DNSPolicy: api.DNSClusterFirst, + RestartPolicy: v1.RestartPolicyAlways, + DNSPolicy: v1.DNSClusterFirst, TerminationGracePeriodSeconds: &grace, - SecurityContext: &api.PodSecurityContext{}, + SecurityContext: &v1.PodSecurityContext{}, - Containers: []api.Container{{ + Containers: []v1.Container{{ Name: "2", Image: "bar:bartag", TerminationMessagePath: "/dev/termination-log", ImagePullPolicy: "IfNotPresent", }}, }, - Status: api.PodStatus{ - Phase: api.PodPending, + Status: v1.PodStatus{ + Phase: v1.PodPending, }, }), }, @@ -300,11 +301,16 @@ func TestExtractPodsFromHTTP(t *testing.T) { } update := (<-ch).(kubetypes.PodUpdate) - if !api.Semantic.DeepEqual(testCase.expected, update) { + if !v1.Semantic.DeepEqual(testCase.expected, update) { t.Errorf("%s: Expected: %#v, Got: %#v", testCase.desc, testCase.expected, update) } for _, pod := range update.Pods { - if errs := validation.ValidatePod(pod); len(errs) != 0 { + // TODO: remove the conversion when validation is performed on versioned objects. + internalPod := &api.Pod{} + if err := v1.Convert_v1_Pod_To_api_Pod(pod, internalPod, nil); err != nil { + t.Fatalf("%s: Cannot convert pod %#v, %#v", testCase.desc, pod, err) + } + if errs := validation.ValidatePod(internalPod); len(errs) != 0 { t.Errorf("%s: Expected no validation errors on %#v, Got %v", testCase.desc, pod, errs.ToAggregate()) } } @@ -312,19 +318,19 @@ func TestExtractPodsFromHTTP(t *testing.T) { } func TestURLWithHeader(t *testing.T) { - pod := &api.Pod{ + pod := &v1.Pod{ TypeMeta: unversioned.TypeMeta{ - APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), + APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(), Kind: "Pod", }, - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ Name: "foo", UID: "111", Namespace: "mynamespace", }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ NodeName: "localhost", - Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}}, + Containers: []v1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1.PullAlways}}, }, } data, err := json.Marshal(pod) diff --git a/pkg/kubelet/container/container_reference_manager.go b/pkg/kubelet/container/container_reference_manager.go index f37a7103ab2..1b6710688fd 100644 --- a/pkg/kubelet/container/container_reference_manager.go +++ b/pkg/kubelet/container/container_reference_manager.go @@ -19,7 +19,7 @@ package container import ( "sync" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) // RefManager manages the references for the containers. @@ -28,17 +28,17 @@ import ( // for the caller. type RefManager struct { sync.RWMutex - containerIDToRef map[ContainerID]*api.ObjectReference + containerIDToRef map[ContainerID]*v1.ObjectReference } // NewRefManager creates and returns a container reference manager // with empty contents. func NewRefManager() *RefManager { - return &RefManager{containerIDToRef: make(map[ContainerID]*api.ObjectReference)} + return &RefManager{containerIDToRef: make(map[ContainerID]*v1.ObjectReference)} } // SetRef stores a reference to a pod's container, associating it with the given container ID. -func (c *RefManager) SetRef(id ContainerID, ref *api.ObjectReference) { +func (c *RefManager) SetRef(id ContainerID, ref *v1.ObjectReference) { c.Lock() defer c.Unlock() c.containerIDToRef[id] = ref @@ -52,7 +52,7 @@ func (c *RefManager) ClearRef(id ContainerID) { } // GetRef returns the container reference of the given ID, or (nil, false) if none is stored. -func (c *RefManager) GetRef(id ContainerID) (ref *api.ObjectReference, ok bool) { +func (c *RefManager) GetRef(id ContainerID) (ref *v1.ObjectReference, ok bool) { c.RLock() defer c.RUnlock() ref, ok = c.containerIDToRef[id] diff --git a/pkg/kubelet/container/helpers.go b/pkg/kubelet/container/helpers.go index f60a5419d9a..f88e6c8a5cd 100644 --- a/pkg/kubelet/container/helpers.go +++ b/pkg/kubelet/container/helpers.go @@ -25,8 +25,8 @@ import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" "k8s.io/kubernetes/pkg/kubelet/util/format" @@ -39,25 +39,25 @@ import ( // HandlerRunner runs a lifecycle handler for a container. type HandlerRunner interface { - Run(containerID ContainerID, pod *api.Pod, container *api.Container, handler *api.Handler) (string, error) + Run(containerID ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.Handler) (string, error) } // RuntimeHelper wraps kubelet to make container runtime // able to get necessary informations like the RunContainerOptions, DNS settings. type RuntimeHelper interface { - GenerateRunContainerOptions(pod *api.Pod, container *api.Container, podIP string) (*RunContainerOptions, error) - GetClusterDNS(pod *api.Pod) (dnsServers []string, dnsSearches []string, err error) + GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*RunContainerOptions, error) + GetClusterDNS(pod *v1.Pod) (dnsServers []string, dnsSearches []string, err error) GetPodDir(podUID types.UID) string - GeneratePodHostNameAndDomain(pod *api.Pod) (hostname string, hostDomain string, err error) + GeneratePodHostNameAndDomain(pod *v1.Pod) (hostname string, hostDomain string, err error) // GetExtraSupplementalGroupsForPod returns a list of the extra // supplemental groups for the Pod. These extra supplemental groups come // from annotations on persistent volumes that the pod depends on. - GetExtraSupplementalGroupsForPod(pod *api.Pod) []int64 + GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 } // ShouldContainerBeRestarted checks whether a container needs to be restarted. // TODO(yifan): Think about how to refactor this. -func ShouldContainerBeRestarted(container *api.Container, pod *api.Pod, podStatus *PodStatus) bool { +func ShouldContainerBeRestarted(container *v1.Container, pod *v1.Pod, podStatus *PodStatus) bool { // Get latest container status. status := podStatus.FindContainerStatusByName(container.Name) // If the container was never started before, we should start it. @@ -74,11 +74,11 @@ func ShouldContainerBeRestarted(container *api.Container, pod *api.Pod, podStatu return true } // Check RestartPolicy for dead container - if pod.Spec.RestartPolicy == api.RestartPolicyNever { + if pod.Spec.RestartPolicy == v1.RestartPolicyNever { glog.V(4).Infof("Already ran container %q of pod %q, do nothing", container.Name, format.Pod(pod)) return false } - if pod.Spec.RestartPolicy == api.RestartPolicyOnFailure { + if pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure { // Check the exit code. if status.ExitCode == 0 { glog.V(4).Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, format.Pod(pod)) @@ -90,7 +90,7 @@ func ShouldContainerBeRestarted(container *api.Container, pod *api.Pod, podStatu // HashContainer returns the hash of the container. It is used to compare // the running container with its desired spec. -func HashContainer(container *api.Container) uint64 { +func HashContainer(container *v1.Container) uint64 { hash := adler32.New() hashutil.DeepHashObject(hash, *container) return uint64(hash.Sum32()) @@ -107,7 +107,7 @@ func EnvVarsToMap(envs []EnvVar) map[string]string { return result } -func ExpandContainerCommandAndArgs(container *api.Container, envs []EnvVar) (command []string, args []string) { +func ExpandContainerCommandAndArgs(container *v1.Container, envs []EnvVar) (command []string, args []string) { mapping := expansion.MappingFuncFor(EnvVarsToMap(envs)) if len(container.Command) != 0 { @@ -136,11 +136,11 @@ type innerEventRecorder struct { recorder record.EventRecorder } -func (irecorder *innerEventRecorder) shouldRecordEvent(object runtime.Object) (*api.ObjectReference, bool) { +func (irecorder *innerEventRecorder) shouldRecordEvent(object runtime.Object) (*v1.ObjectReference, bool) { if object == nil { return nil, false } - if ref, ok := object.(*api.ObjectReference); ok { + if ref, ok := object.(*v1.ObjectReference); ok { if !strings.HasPrefix(ref.FieldPath, ImplicitContainerPrefix) { return ref, true } @@ -168,8 +168,8 @@ func (irecorder *innerEventRecorder) PastEventf(object runtime.Object, timestamp } // Pod must not be nil. -func IsHostNetworkPod(pod *api.Pod) bool { - return pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostNetwork +func IsHostNetworkPod(pod *v1.Pod) bool { + return pod.Spec.HostNetwork } // TODO(random-liu): Convert PodStatus to running Pod, should be deprecated soon diff --git a/pkg/kubelet/container/helpers_test.go b/pkg/kubelet/container/helpers_test.go index 4856d79af22..23669158269 100644 --- a/pkg/kubelet/container/helpers_test.go +++ b/pkg/kubelet/container/helpers_test.go @@ -20,7 +20,7 @@ import ( "reflect" "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) func TestEnvVarsToMap(t *testing.T) { @@ -53,18 +53,18 @@ func TestEnvVarsToMap(t *testing.T) { func TestExpandCommandAndArgs(t *testing.T) { cases := []struct { name string - container *api.Container + container *v1.Container envs []EnvVar expectedCommand []string expectedArgs []string }{ { name: "none", - container: &api.Container{}, + container: &v1.Container{}, }, { name: "command expanded", - container: &api.Container{ + container: &v1.Container{ Command: []string{"foo", "$(VAR_TEST)", "$(VAR_TEST2)"}, }, envs: []EnvVar{ @@ -81,7 +81,7 @@ func TestExpandCommandAndArgs(t *testing.T) { }, { name: "args expanded", - container: &api.Container{ + container: &v1.Container{ Args: []string{"zap", "$(VAR_TEST)", "$(VAR_TEST2)"}, }, envs: []EnvVar{ @@ -98,7 +98,7 @@ func TestExpandCommandAndArgs(t *testing.T) { }, { name: "both expanded", - container: &api.Container{ + container: &v1.Container{ Command: []string{"$(VAR_TEST2)--$(VAR_TEST)", "foo", "$(VAR_TEST3)"}, Args: []string{"foo", "$(VAR_TEST)", "$(VAR_TEST2)"}, }, @@ -136,14 +136,14 @@ func TestExpandCommandAndArgs(t *testing.T) { } func TestShouldContainerBeRestarted(t *testing.T) { - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ {Name: "no-history"}, {Name: "alive"}, {Name: "succeed"}, @@ -187,10 +187,10 @@ func TestShouldContainerBeRestarted(t *testing.T) { }, }, } - policies := []api.RestartPolicy{ - api.RestartPolicyNever, - api.RestartPolicyOnFailure, - api.RestartPolicyAlways, + policies := []v1.RestartPolicy{ + v1.RestartPolicyNever, + v1.RestartPolicyOnFailure, + v1.RestartPolicyAlways, } expected := map[string][]bool{ "no-history": {true, true, true}, diff --git a/pkg/kubelet/container/ref.go b/pkg/kubelet/container/ref.go index 8fcdc95ad21..04d896dcb53 100644 --- a/pkg/kubelet/container/ref.go +++ b/pkg/kubelet/container/ref.go @@ -19,25 +19,25 @@ package container import ( "fmt" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) var ImplicitContainerPrefix string = "implicitly required container " -// GenerateContainerRef returns an *api.ObjectReference which references the given container +// GenerateContainerRef returns an *v1.ObjectReference which references the given container // within the given pod. Returns an error if the reference can't be constructed or the // container doesn't actually belong to the pod. // // This function will return an error if the provided Pod does not have a selfLink, // but we expect selfLink to be populated at all call sites for the function. -func GenerateContainerRef(pod *api.Pod, container *api.Container) (*api.ObjectReference, error) { +func GenerateContainerRef(pod *v1.Pod, container *v1.Container) (*v1.ObjectReference, error) { fieldPath, err := fieldPath(pod, container) if err != nil { // TODO: figure out intelligent way to refer to containers that we implicitly // start (like the pod infra container). This is not a good way, ugh. fieldPath = ImplicitContainerPrefix + container.Name } - ref, err := api.GetPartialReference(pod, fieldPath) + ref, err := v1.GetPartialReference(pod, fieldPath) if err != nil { return nil, err } @@ -46,7 +46,7 @@ func GenerateContainerRef(pod *api.Pod, container *api.Container) (*api.ObjectRe // fieldPath returns a fieldPath locating container within pod. // Returns an error if the container isn't part of the pod. -func fieldPath(pod *api.Pod, container *api.Container) (string, error) { +func fieldPath(pod *v1.Pod, container *v1.Container) (string, error) { for i := range pod.Spec.Containers { here := &pod.Spec.Containers[i] if here.Name == container.Name { diff --git a/pkg/kubelet/container/ref_test.go b/pkg/kubelet/container/ref_test.go index be1cd31bd0d..acb4fb6c855 100644 --- a/pkg/kubelet/container/ref_test.go +++ b/pkg/kubelet/container/ref_test.go @@ -19,29 +19,29 @@ package container import ( "testing" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apimachinery/registered" ) func TestFieldPath(t *testing.T) { - pod := &api.Pod{Spec: api.PodSpec{Containers: []api.Container{ + pod := &v1.Pod{Spec: v1.PodSpec{Containers: []v1.Container{ {Name: "foo"}, {Name: "bar"}, {Name: ""}, {Name: "baz"}, }}} table := map[string]struct { - pod *api.Pod - container *api.Container + pod *v1.Pod + container *v1.Container path string success bool }{ - "basic": {pod, &api.Container{Name: "foo"}, "spec.containers{foo}", true}, - "basic2": {pod, &api.Container{Name: "baz"}, "spec.containers{baz}", true}, - "emptyName": {pod, &api.Container{Name: ""}, "spec.containers[2]", true}, + "basic": {pod, &v1.Container{Name: "foo"}, "spec.containers{foo}", true}, + "basic2": {pod, &v1.Container{Name: "baz"}, "spec.containers{baz}", true}, + "emptyName": {pod, &v1.Container{Name: ""}, "spec.containers[2]", true}, "basicSamePointer": {pod, &pod.Spec.Containers[0], "spec.containers{foo}", true}, - "missing": {pod, &api.Container{Name: "qux"}, "", false}, + "missing": {pod, &v1.Container{Name: "qux"}, "", false}, } for name, item := range table { @@ -64,20 +64,20 @@ func TestFieldPath(t *testing.T) { func TestGenerateContainerRef(t *testing.T) { var ( - okPod = api.Pod{ + okPod = v1.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", - APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), + APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(), }, - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ Name: "ok", Namespace: "test-ns", UID: "bar", ResourceVersion: "42", - SelfLink: "/api/" + registered.GroupOrDie(api.GroupName).GroupVersion.String() + "/pods/foo", + SelfLink: "/api/" + registered.GroupOrDie(v1.GroupName).GroupVersion.String() + "/pods/foo", }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Name: "by-name", }, @@ -91,24 +91,24 @@ func TestGenerateContainerRef(t *testing.T) { noSelfLinkPod.Kind = "" noSelfLinkPod.APIVersion = "" noSelfLinkPod.ObjectMeta.SelfLink = "" - defaultedSelfLinkPod.ObjectMeta.SelfLink = "/api/" + registered.GroupOrDie(api.GroupName).GroupVersion.String() + "/pods/ok" + defaultedSelfLinkPod.ObjectMeta.SelfLink = "/api/" + registered.GroupOrDie(v1.GroupName).GroupVersion.String() + "/pods/ok" cases := []struct { name string - pod *api.Pod - container *api.Container - expected *api.ObjectReference + pod *v1.Pod + container *v1.Container + expected *v1.ObjectReference success bool }{ { name: "by-name", pod: &okPod, - container: &api.Container{ + container: &v1.Container{ Name: "by-name", }, - expected: &api.ObjectReference{ + expected: &v1.ObjectReference{ Kind: "Pod", - APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), + APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(), Name: "ok", Namespace: "test-ns", UID: "bar", @@ -120,10 +120,10 @@ func TestGenerateContainerRef(t *testing.T) { { name: "no-name", pod: &okPod, - container: &api.Container{}, - expected: &api.ObjectReference{ + container: &v1.Container{}, + expected: &v1.ObjectReference{ Kind: "Pod", - APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), + APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(), Name: "ok", Namespace: "test-ns", UID: "bar", @@ -135,19 +135,19 @@ func TestGenerateContainerRef(t *testing.T) { { name: "no-selflink", pod: &noSelfLinkPod, - container: &api.Container{}, + container: &v1.Container{}, expected: nil, success: false, }, { name: "defaulted-selflink", pod: &defaultedSelfLinkPod, - container: &api.Container{ + container: &v1.Container{ Name: "by-name", }, - expected: &api.ObjectReference{ + expected: &v1.ObjectReference{ Kind: "Pod", - APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), + APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(), Name: "ok", Namespace: "test-ns", UID: "bar", @@ -159,12 +159,12 @@ func TestGenerateContainerRef(t *testing.T) { { name: "implicitly-required", pod: &okPod, - container: &api.Container{ + container: &v1.Container{ Name: "net", }, - expected: &api.ObjectReference{ + expected: &v1.ObjectReference{ Kind: "Pod", - APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), + APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(), Name: "ok", Namespace: "test-ns", UID: "bar", diff --git a/pkg/kubelet/container/runtime.go b/pkg/kubelet/container/runtime.go index 94001216a17..7242d1d68b2 100644 --- a/pkg/kubelet/container/runtime.go +++ b/pkg/kubelet/container/runtime.go @@ -25,7 +25,7 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/flowcontrol" @@ -85,13 +85,13 @@ type Runtime interface { // TODO: Revisit this method and make it cleaner. GarbageCollect(gcPolicy ContainerGCPolicy, allSourcesReady bool) error // Syncs the running pod into the desired pod. - SyncPod(pod *api.Pod, apiPodStatus api.PodStatus, podStatus *PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) PodSyncResult + SyncPod(pod *v1.Pod, apiPodStatus v1.PodStatus, podStatus *PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult // KillPod kills all the containers of a pod. Pod may be nil, running pod must not be. // TODO(random-liu): Return PodSyncResult in KillPod. // gracePeriodOverride if specified allows the caller to override the pod default grace period. // only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data. // it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios. - KillPod(pod *api.Pod, runningPod Pod, gracePeriodOverride *int64) error + KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error // GetPodStatus retrieves the status of the pod, including the // information of all containers in the pod that are visble in Runtime. GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error) @@ -111,7 +111,7 @@ type Runtime interface { // default, it returns a snapshot of the container log. Set 'follow' to true to // stream the log. Set 'follow' to false and specify the number of lines (e.g. // "100" or "all") to tail the log. - GetContainerLogs(pod *api.Pod, containerID ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error) + GetContainerLogs(pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) // Delete a container. If the container is still running, an error is returned. DeleteContainer(containerID ContainerID) error // ImageService provides methods to image-related methods. @@ -147,7 +147,7 @@ type IndirectStreamingRuntime interface { type ImageService interface { // PullImage pulls an image from the network to local storage using the supplied // secrets if necessary. - PullImage(image ImageSpec, pullSecrets []api.Secret) error + PullImage(image ImageSpec, pullSecrets []v1.Secret) error // IsImagePresent checks whether the container image is already in the local storage. IsImagePresent(image ImageSpec) (bool, error) // Gets all images currently on the machine. @@ -188,8 +188,8 @@ type Pod struct { // PodPair contains both runtime#Pod and api#Pod type PodPair struct { - // APIPod is the api.Pod - APIPod *api.Pod + // APIPod is the v1.Pod + APIPod *v1.Pod // RunningPod is the pod defined defined in pkg/kubelet/container/runtime#Pod RunningPod *Pod } @@ -270,7 +270,7 @@ type Container struct { // a container. ID ContainerID // The name of the container, which should be the same as specified by - // api.Container. + // v1.Container. Name string // The image name of the container, this also includes the tag of the image, // the expected form is "NAME:TAG". @@ -285,7 +285,7 @@ type Container struct { } // PodStatus represents the status of the pod and its containers. -// api.PodStatus can be derived from examining PodStatus and api.Pod. +// v1.PodStatus can be derived from examining PodStatus and v1.Pod. type PodStatus struct { // ID of the pod. ID types.UID @@ -392,7 +392,7 @@ type PortMapping struct { // Name of the port mapping Name string // Protocol of the port mapping. - Protocol api.Protocol + Protocol v1.Protocol // The port number within the container. ContainerPort int // The port number on the host. @@ -570,16 +570,16 @@ func (p *Pod) FindSandboxByID(id ContainerID) *Container { return nil } -// ToAPIPod converts Pod to api.Pod. Note that if a field in api.Pod has no +// ToAPIPod converts Pod to v1.Pod. Note that if a field in v1.Pod has no // corresponding field in Pod, the field would not be populated. -func (p *Pod) ToAPIPod() *api.Pod { - var pod api.Pod +func (p *Pod) ToAPIPod() *v1.Pod { + var pod v1.Pod pod.UID = p.ID pod.Name = p.Name pod.Namespace = p.Namespace for _, c := range p.Containers { - var container api.Container + var container v1.Container container.Name = c.Name container.Image = c.Image pod.Spec.Containers = append(pod.Spec.Containers, container) @@ -593,7 +593,7 @@ func (p *Pod) IsEmpty() bool { } // GetPodFullName returns a name that uniquely identifies a pod. -func GetPodFullName(pod *api.Pod) string { +func GetPodFullName(pod *v1.Pod) string { // Use underscore as the delimiter because it is not allowed in pod name // (DNS subdomain format), while allowed in the container name format. return pod.Name + "_" + pod.Namespace diff --git a/pkg/kubelet/container/testing/fake_runtime.go b/pkg/kubelet/container/testing/fake_runtime.go index 6ff792a551c..3b996ea77ca 100644 --- a/pkg/kubelet/container/testing/fake_runtime.go +++ b/pkg/kubelet/container/testing/fake_runtime.go @@ -24,7 +24,7 @@ import ( "sync" "time" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" . "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/flowcontrol" @@ -44,7 +44,7 @@ type FakeRuntime struct { PodList []*FakePod AllPodList []*FakePod ImageList []Image - APIPodStatus api.PodStatus + APIPodStatus v1.PodStatus PodStatus PodStatus StartedPods []string KilledPods []string @@ -133,7 +133,7 @@ func (f *FakeRuntime) ClearCalls() { f.CalledFunctions = []string{} f.PodList = []*FakePod{} f.AllPodList = []*FakePod{} - f.APIPodStatus = api.PodStatus{} + f.APIPodStatus = v1.PodStatus{} f.StartedPods = []string{} f.KilledPods = []string{} f.StartedContainers = []string{} @@ -236,7 +236,7 @@ func (f *FakeRuntime) GetPods(all bool) ([]*Pod, error) { return pods, f.Err } -func (f *FakeRuntime) SyncPod(pod *api.Pod, _ api.PodStatus, _ *PodStatus, _ []api.Secret, backOff *flowcontrol.Backoff) (result PodSyncResult) { +func (f *FakeRuntime) SyncPod(pod *v1.Pod, _ v1.PodStatus, _ *PodStatus, _ []v1.Secret, backOff *flowcontrol.Backoff) (result PodSyncResult) { f.Lock() defer f.Unlock() @@ -252,7 +252,7 @@ func (f *FakeRuntime) SyncPod(pod *api.Pod, _ api.PodStatus, _ *PodStatus, _ []a return } -func (f *FakeRuntime) KillPod(pod *api.Pod, runningPod Pod, gracePeriodOverride *int64) error { +func (f *FakeRuntime) KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error { f.Lock() defer f.Unlock() @@ -264,7 +264,7 @@ func (f *FakeRuntime) KillPod(pod *api.Pod, runningPod Pod, gracePeriodOverride return f.Err } -func (f *FakeRuntime) RunContainerInPod(container api.Container, pod *api.Pod, volumeMap map[string]volume.VolumePlugin) error { +func (f *FakeRuntime) RunContainerInPod(container v1.Container, pod *v1.Pod, volumeMap map[string]volume.VolumePlugin) error { f.Lock() defer f.Unlock() @@ -281,14 +281,14 @@ func (f *FakeRuntime) RunContainerInPod(container api.Container, pod *api.Pod, v return f.Err } -func (f *FakeRuntime) KillContainerInPod(container api.Container, pod *api.Pod) error { +func (f *FakeRuntime) KillContainerInPod(container v1.Container, pod *v1.Pod) error { f.Lock() defer f.Unlock() f.CalledFunctions = append(f.CalledFunctions, "KillContainerInPod") f.KilledContainers = append(f.KilledContainers, container.Name) - var containers []api.Container + var containers []v1.Container for _, c := range pod.Spec.Containers { if c.Name == container.Name { continue @@ -336,7 +336,7 @@ func (f *FakeDirectStreamingRuntime) AttachContainer(containerID ContainerID, st return f.Err } -func (f *FakeRuntime) GetContainerLogs(pod *api.Pod, containerID ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error) { +func (f *FakeRuntime) GetContainerLogs(pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) { f.Lock() defer f.Unlock() @@ -344,7 +344,7 @@ func (f *FakeRuntime) GetContainerLogs(pod *api.Pod, containerID ContainerID, lo return f.Err } -func (f *FakeRuntime) PullImage(image ImageSpec, pullSecrets []api.Secret) error { +func (f *FakeRuntime) PullImage(image ImageSpec, pullSecrets []v1.Secret) error { f.Lock() defer f.Unlock() diff --git a/pkg/kubelet/container/testing/runtime_mock.go b/pkg/kubelet/container/testing/runtime_mock.go index b3edc2c6c06..6fb4acea64c 100644 --- a/pkg/kubelet/container/testing/runtime_mock.go +++ b/pkg/kubelet/container/testing/runtime_mock.go @@ -21,7 +21,7 @@ import ( "time" "github.com/stretchr/testify/mock" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" . "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/flowcontrol" @@ -65,22 +65,22 @@ func (r *Mock) GetPods(all bool) ([]*Pod, error) { return args.Get(0).([]*Pod), args.Error(1) } -func (r *Mock) SyncPod(pod *api.Pod, apiStatus api.PodStatus, status *PodStatus, secrets []api.Secret, backOff *flowcontrol.Backoff) PodSyncResult { +func (r *Mock) SyncPod(pod *v1.Pod, apiStatus v1.PodStatus, status *PodStatus, secrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult { args := r.Called(pod, apiStatus, status, secrets, backOff) return args.Get(0).(PodSyncResult) } -func (r *Mock) KillPod(pod *api.Pod, runningPod Pod, gracePeriodOverride *int64) error { +func (r *Mock) KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error { args := r.Called(pod, runningPod, gracePeriodOverride) return args.Error(0) } -func (r *Mock) RunContainerInPod(container api.Container, pod *api.Pod, volumeMap map[string]volume.VolumePlugin) error { +func (r *Mock) RunContainerInPod(container v1.Container, pod *v1.Pod, volumeMap map[string]volume.VolumePlugin) error { args := r.Called(pod, pod, volumeMap) return args.Error(0) } -func (r *Mock) KillContainerInPod(container api.Container, pod *api.Pod) error { +func (r *Mock) KillContainerInPod(container v1.Container, pod *v1.Pod) error { args := r.Called(pod, pod) return args.Error(0) } @@ -100,12 +100,12 @@ func (r *Mock) AttachContainer(containerID ContainerID, stdin io.Reader, stdout, return args.Error(0) } -func (r *Mock) GetContainerLogs(pod *api.Pod, containerID ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error) { +func (r *Mock) GetContainerLogs(pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) { args := r.Called(pod, containerID, logOptions, stdout, stderr) return args.Error(0) } -func (r *Mock) PullImage(image ImageSpec, pullSecrets []api.Secret) error { +func (r *Mock) PullImage(image ImageSpec, pullSecrets []v1.Secret) error { args := r.Called(image, pullSecrets) return args.Error(0) } diff --git a/pkg/kubelet/custommetrics/custom_metrics.go b/pkg/kubelet/custommetrics/custom_metrics.go index 314a3b5df00..1df5ff2a391 100644 --- a/pkg/kubelet/custommetrics/custom_metrics.go +++ b/pkg/kubelet/custommetrics/custom_metrics.go @@ -20,7 +20,7 @@ package custommetrics import ( "path" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) const ( @@ -31,7 +31,7 @@ const ( // Alpha implementation. // Returns a path to a cAdvisor-specific custom metrics configuration. -func GetCAdvisorCustomMetricsDefinitionPath(container *api.Container) (*string, error) { +func GetCAdvisorCustomMetricsDefinitionPath(container *v1.Container) (*string, error) { // Assuemes that the container has Custom Metrics enabled if it has "/etc/custom-metrics" directory // mounted as a volume. Custom Metrics definition is expected to be in "definition.json". if container.VolumeMounts != nil { diff --git a/pkg/kubelet/custommetrics/custom_metrics_test.go b/pkg/kubelet/custommetrics/custom_metrics_test.go index bcda7b056c9..49aa236b32a 100644 --- a/pkg/kubelet/custommetrics/custom_metrics_test.go +++ b/pkg/kubelet/custommetrics/custom_metrics_test.go @@ -20,18 +20,18 @@ import ( "testing" "github.com/stretchr/testify/assert" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) func TestGetCAdvisorCustomMetricsDefinitionPath(t *testing.T) { - regularContainer := &api.Container{ + regularContainer := &v1.Container{ Name: "test_container", } - cmContainer := &api.Container{ + cmContainer := &v1.Container{ Name: "test_container", - VolumeMounts: []api.VolumeMount{ + VolumeMounts: []v1.VolumeMount{ { Name: "cm", MountPath: CustomMetricsDefinitionDir, diff --git a/pkg/kubelet/dockershim/cm/container_manager_unsupported.go b/pkg/kubelet/dockershim/cm/container_manager_unsupported.go index 5a78017d076..ad0e5b80bfc 100644 --- a/pkg/kubelet/dockershim/cm/container_manager_unsupported.go +++ b/pkg/kubelet/dockershim/cm/container_manager_unsupported.go @@ -20,6 +20,7 @@ package cm import ( "fmt" + "k8s.io/kubernetes/pkg/kubelet/dockertools" ) diff --git a/pkg/kubelet/dockershim/doc.go b/pkg/kubelet/dockershim/doc.go index 619271d3467..5bc3318d5bb 100644 --- a/pkg/kubelet/dockershim/doc.go +++ b/pkg/kubelet/dockershim/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Docker integration using pkg/kubelet/api/v1alpha1/runtime/api.pb.go. +// Docker integration using pkg/kubelet/api/v1alpha1/runtime/v1.pb.go. package dockershim diff --git a/pkg/kubelet/dockershim/docker_sandbox.go b/pkg/kubelet/dockershim/docker_sandbox.go index 88fcb4716cd..0504fdb160c 100644 --- a/pkg/kubelet/dockershim/docker_sandbox.go +++ b/pkg/kubelet/dockershim/docker_sandbox.go @@ -305,7 +305,7 @@ func (ds *dockerService) makeSandboxDockerConfig(c *runtimeApi.PodSandboxConfig, labels := makeLabels(c.GetLabels(), c.GetAnnotations()) // Apply a label to distinguish sandboxes from regular containers. labels[containerTypeLabelKey] = containerTypeLabelSandbox - // Apply a container name label for infra container. This is used in summary api. + // Apply a container name label for infra container. This is used in summary v1. // TODO(random-liu): Deprecate this label once container metrics is directly got from CRI. labels[types.KubernetesContainerNameLabel] = sandboxContainerName diff --git a/pkg/kubelet/dockershim/helpers.go b/pkg/kubelet/dockershim/helpers.go index ee2440dbfa5..dd649e80d06 100644 --- a/pkg/kubelet/dockershim/helpers.go +++ b/pkg/kubelet/dockershim/helpers.go @@ -28,7 +28,7 @@ import ( dockernat "github.com/docker/go-connections/nat" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" "k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/kubelet/types" @@ -235,7 +235,7 @@ func getNetworkNamespace(c *dockertypes.ContainerJSON) string { func getSysctlsFromAnnotations(annotations map[string]string) (map[string]string, error) { var results map[string]string - sysctls, unsafeSysctls, err := api.SysctlsFromPodAnnotations(annotations) + sysctls, unsafeSysctls, err := v1.SysctlsFromPodAnnotations(annotations) if err != nil { return nil, err } diff --git a/pkg/kubelet/dockershim/helpers_test.go b/pkg/kubelet/dockershim/helpers_test.go index 9fb0daebc37..6c2710b63f5 100644 --- a/pkg/kubelet/dockershim/helpers_test.go +++ b/pkg/kubelet/dockershim/helpers_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" "k8s.io/kubernetes/pkg/security/apparmor" ) @@ -58,19 +58,19 @@ func TestGetContainerSecurityOpts(t *testing.T) { }, { msg: "Seccomp unconfined", config: makeConfig(map[string]string{ - api.SeccompContainerAnnotationKeyPrefix + containerName: "unconfined", + v1.SeccompContainerAnnotationKeyPrefix + containerName: "unconfined", }), expectedOpts: []string{"seccomp=unconfined"}, }, { msg: "Seccomp default", config: makeConfig(map[string]string{ - api.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default", + v1.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default", }), expectedOpts: nil, }, { msg: "Seccomp pod default", config: makeConfig(map[string]string{ - api.SeccompPodAnnotationKey: "docker/default", + v1.SeccompPodAnnotationKey: "docker/default", }), expectedOpts: nil, }, { @@ -88,8 +88,8 @@ func TestGetContainerSecurityOpts(t *testing.T) { }, { msg: "AppArmor and seccomp profile", config: makeConfig(map[string]string{ - api.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default", - apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileNamePrefix + "foo", + v1.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default", + apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileNamePrefix + "foo", }), expectedOpts: []string{"apparmor=foo"}, }} @@ -121,20 +121,20 @@ func TestGetSandboxSecurityOpts(t *testing.T) { }, { msg: "Seccomp default", config: makeConfig(map[string]string{ - api.SeccompPodAnnotationKey: "docker/default", + v1.SeccompPodAnnotationKey: "docker/default", }), expectedOpts: nil, }, { msg: "Seccomp unconfined", config: makeConfig(map[string]string{ - api.SeccompPodAnnotationKey: "unconfined", + v1.SeccompPodAnnotationKey: "unconfined", }), expectedOpts: []string{"seccomp=unconfined"}, }, { msg: "Seccomp pod and container profile", config: makeConfig(map[string]string{ - api.SeccompContainerAnnotationKeyPrefix + "test-container": "unconfined", - api.SeccompPodAnnotationKey: "docker/default", + v1.SeccompContainerAnnotationKeyPrefix + "test-container": "unconfined", + v1.SeccompPodAnnotationKey: "docker/default", }), expectedOpts: nil, }} @@ -156,8 +156,8 @@ func TestGetSystclsFromAnnotations(t *testing.T) { expectedSysctls map[string]string }{{ annotations: map[string]string{ - api.SysctlsPodAnnotationKey: "kernel.shmmni=32768,kernel.shmmax=1000000000", - api.UnsafeSysctlsPodAnnotationKey: "knet.ipv4.route.min_pmtu=1000", + v1.SysctlsPodAnnotationKey: "kernel.shmmni=32768,kernel.shmmax=1000000000", + v1.UnsafeSysctlsPodAnnotationKey: "knet.ipv4.route.min_pmtu=1000", }, expectedSysctls: map[string]string{ "kernel.shmmni": "32768", @@ -166,7 +166,7 @@ func TestGetSystclsFromAnnotations(t *testing.T) { }, }, { annotations: map[string]string{ - api.SysctlsPodAnnotationKey: "kernel.shmmni=32768,kernel.shmmax=1000000000", + v1.SysctlsPodAnnotationKey: "kernel.shmmni=32768,kernel.shmmax=1000000000", }, expectedSysctls: map[string]string{ "kernel.shmmni": "32768", @@ -174,7 +174,7 @@ func TestGetSystclsFromAnnotations(t *testing.T) { }, }, { annotations: map[string]string{ - api.UnsafeSysctlsPodAnnotationKey: "knet.ipv4.route.min_pmtu=1000", + v1.UnsafeSysctlsPodAnnotationKey: "knet.ipv4.route.min_pmtu=1000", }, expectedSysctls: map[string]string{ "knet.ipv4.route.min_pmtu": "1000", diff --git a/pkg/kubelet/dockershim/security_context.go b/pkg/kubelet/dockershim/security_context.go index b323db8e9f8..e1b74698d92 100644 --- a/pkg/kubelet/dockershim/security_context.go +++ b/pkg/kubelet/dockershim/security_context.go @@ -22,7 +22,7 @@ import ( dockercontainer "github.com/docker/engine-api/types/container" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" "k8s.io/kubernetes/pkg/securitycontext" ) @@ -100,7 +100,7 @@ func modifyHostConfig(sc *runtimeapi.LinuxContainerSecurityContext, sandboxID st if sc.SelinuxOptions != nil { hostConfig.SecurityOpt = securitycontext.ModifySecurityOptions( hostConfig.SecurityOpt, - &api.SELinuxOptions{ + &v1.SELinuxOptions{ User: sc.SelinuxOptions.GetUser(), Role: sc.SelinuxOptions.GetRole(), Type: sc.SelinuxOptions.GetType(), diff --git a/pkg/kubelet/dockertools/container_gc_test.go b/pkg/kubelet/dockertools/container_gc_test.go index f654b263a21..f46dc05bfb6 100644 --- a/pkg/kubelet/dockertools/container_gc_test.go +++ b/pkg/kubelet/dockertools/container_gc_test.go @@ -24,7 +24,7 @@ import ( "time" "github.com/stretchr/testify/assert" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/types" ) @@ -65,8 +65,8 @@ func makeUndefinedContainer(id string, running bool, created time.Time) *FakeCon func addPods(podGetter podGetter, podUIDs ...types.UID) { fakePodGetter := podGetter.(*fakePodGetter) for _, uid := range podUIDs { - fakePodGetter.pods[uid] = &api.Pod{ - ObjectMeta: api.ObjectMeta{ + fakePodGetter.pods[uid] = &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod" + string(uid), Namespace: "test", UID: uid, diff --git a/pkg/kubelet/dockertools/docker.go b/pkg/kubelet/dockertools/docker.go index e38b56e390f..5a8e9b9c2b8 100644 --- a/pkg/kubelet/dockertools/docker.go +++ b/pkg/kubelet/dockertools/docker.go @@ -31,7 +31,7 @@ import ( dockerapi "github.com/docker/engine-api/client" dockertypes "github.com/docker/engine-api/types" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/credentialprovider" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/images" @@ -93,7 +93,7 @@ func SetContainerNamePrefix(prefix string) { // DockerPuller is an abstract interface for testability. It abstracts image pull operations. type DockerPuller interface { - Pull(image string, secrets []api.Secret) error + Pull(image string, secrets []v1.Secret) error IsImagePresent(image string) (bool, error) } @@ -225,7 +225,7 @@ func matchImageIDOnly(inspected dockertypes.ImageInspect, image string) bool { return false } -func (p dockerPuller) Pull(image string, secrets []api.Secret) error { +func (p dockerPuller) Pull(image string, secrets []v1.Secret) error { keyring, err := credentialprovider.MakeDockerKeyring(secrets, p.keyring) if err != nil { return err @@ -293,7 +293,7 @@ func (p dockerPuller) IsImagePresent(image string) (bool, error) { // Although rand.Uint32() is not really unique, but it's enough for us because error will // only occur when instances of the same container in the same pod have the same UID. The // chance is really slim. -func BuildDockerName(dockerName KubeletContainerName, container *api.Container) (string, string, string) { +func BuildDockerName(dockerName KubeletContainerName, container *v1.Container) (string, string, string) { containerName := dockerName.ContainerName + "." + strconv.FormatUint(kubecontainer.HashContainer(container), 16) stableName := fmt.Sprintf("%s_%s_%s_%s", containerNamePrefix, diff --git a/pkg/kubelet/dockertools/docker_manager.go b/pkg/kubelet/dockertools/docker_manager.go index 423b18cb8ea..46a0ac283f1 100644 --- a/pkg/kubelet/dockertools/docker_manager.go +++ b/pkg/kubelet/dockertools/docker_manager.go @@ -45,6 +45,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -113,7 +114,7 @@ var ( _ kubecontainer.DirectStreamingRuntime = &DockerManager{} // TODO: make this a TTL based pull (if image older than X policy, pull) - podInfraContainerImagePullPolicy = api.PullIfNotPresent + podInfraContainerImagePullPolicy = v1.PullIfNotPresent // Default set of seccomp security options. defaultSeccompOpt = []dockerOpt{{"seccomp", "unconfined", ""}} @@ -129,7 +130,7 @@ type DockerManager struct { // The image name of the pod infra container. podInfraContainerImage string // (Optional) Additional environment variables to be set for the pod infra container. - podInfraContainerEnv []api.EnvVar + podInfraContainerEnv []v1.EnvVar // TODO(yifan): Record the pull failure so we can eliminate the image checking? // Lower level docker image puller. @@ -194,14 +195,14 @@ type DockerManager struct { // A subset of the pod.Manager interface extracted for testing purposes. type podGetter interface { - GetPodByUID(kubetypes.UID) (*api.Pod, bool) + GetPodByUID(kubetypes.UID) (*v1.Pod, bool) } func PodInfraContainerEnv(env map[string]string) kubecontainer.Option { return func(rt kubecontainer.Runtime) { dm := rt.(*DockerManager) for k, v := range env { - dm.podInfraContainerEnv = append(dm.podInfraContainerEnv, api.EnvVar{ + dm.podInfraContainerEnv = append(dm.podInfraContainerEnv, v1.EnvVar{ Name: k, Value: v, }) @@ -308,7 +309,7 @@ func NewDockerManager( // stream the log. Set 'follow' to false and specify the number of lines (e.g. // "100" or "all") to tail the log. // TODO: Make 'RawTerminal' option flagable. -func (dm *DockerManager) GetContainerLogs(pod *api.Pod, containerID kubecontainer.ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error { +func (dm *DockerManager) GetContainerLogs(pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error { container, err := dm.client.InspectContainer(containerID.ID) if err != nil { return err @@ -318,7 +319,7 @@ func (dm *DockerManager) GetContainerLogs(pod *api.Pod, containerID kubecontaine // Temporarily export this function to share with dockershim. // TODO: clean this up. -func GetContainerLogs(client DockerInterface, pod *api.Pod, containerID kubecontainer.ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer, rawTerm bool) error { +func GetContainerLogs(client DockerInterface, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer, rawTerm bool) error { var since int64 if logOptions.SinceSeconds != nil { t := unversioned.Now().Add(-time.Duration(*logOptions.SinceSeconds) * time.Second) @@ -584,10 +585,10 @@ func makePortsAndBindings(portMappings []kubecontainer.PortMapping) (map[dockern } func (dm *DockerManager) runContainer( - pod *api.Pod, - container *api.Container, + pod *v1.Pod, + container *v1.Container, opts *kubecontainer.RunContainerOptions, - ref *api.ObjectReference, + ref *v1.ObjectReference, netMode string, ipcMode string, utsMode string, @@ -620,7 +621,7 @@ func (dm *DockerManager) runContainer( // TODO: This is kind of hacky, we should really just encode the bits we need. // TODO: This is hacky because the Kubelet should be parameterized to encode a specific version // and needs to be able to migrate this whenever we deprecate v1. Should be a member of DockerManager. - if data, err := kruntime.Encode(api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: api.GroupName, Version: "v1"}), pod); err == nil { + if data, err := kruntime.Encode(api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: v1.GroupName, Version: "v1"}), pod); err == nil { labels[kubernetesPodLabel] = string(data) } else { glog.Errorf("Failed to encode pod: %s for prestop hook", pod.Name) @@ -711,9 +712,9 @@ func (dm *DockerManager) runContainer( // Set sysctls if requested if container.Name == PodInfraContainerName { - sysctls, unsafeSysctls, err := api.SysctlsFromPodAnnotations(pod.Annotations) + sysctls, unsafeSysctls, err := v1.SysctlsFromPodAnnotations(pod.Annotations) if err != nil { - dm.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToCreateContainer, "Failed to create docker container %q of pod %q with error: %v", container.Name, format.Pod(pod), err) + dm.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToCreateContainer, "Failed to create docker container %q of pod %q with error: %v", container.Name, format.Pod(pod), err) return kubecontainer.ContainerID{}, err } if len(sysctls)+len(unsafeSysctls) > 0 { @@ -789,7 +790,7 @@ func (dm *DockerManager) runContainer( securityContextProvider.ModifyHostConfig(pod, container, dockerOpts.HostConfig, supplementalGids) createResp, err := dm.client.CreateContainer(dockerOpts) if err != nil { - dm.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToCreateContainer, "Failed to create docker container %q of pod %q with error: %v", container.Name, format.Pod(pod), err) + dm.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToCreateContainer, "Failed to create docker container %q of pod %q with error: %v", container.Name, format.Pod(pod), err) return kubecontainer.ContainerID{}, err } if len(createResp.Warnings) != 0 { @@ -808,21 +809,21 @@ func (dm *DockerManager) runContainer( } createdEventMsg = fmt.Sprintf("%s; Security:[%s]", createdEventMsg, strings.Join(msgs, " ")) } - dm.recorder.Eventf(ref, api.EventTypeNormal, events.CreatedContainer, createdEventMsg) + dm.recorder.Eventf(ref, v1.EventTypeNormal, events.CreatedContainer, createdEventMsg) if err = dm.client.StartContainer(createResp.ID); err != nil { - dm.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToStartContainer, + dm.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToStartContainer, "Failed to start container with docker id %v with error: %v", utilstrings.ShortenString(createResp.ID, 12), err) return kubecontainer.ContainerID{}, err } - dm.recorder.Eventf(ref, api.EventTypeNormal, events.StartedContainer, "Started container with docker id %v", utilstrings.ShortenString(createResp.ID, 12)) + dm.recorder.Eventf(ref, v1.EventTypeNormal, events.StartedContainer, "Started container with docker id %v", utilstrings.ShortenString(createResp.ID, 12)) return kubecontainer.DockerID(createResp.ID).ContainerID(), nil } // setInfraContainerNetworkConfig sets the network configuration for the infra-container. We only set network configuration for infra-container, all // the user containers will share the same network namespace with infra-container. -func setInfraContainerNetworkConfig(pod *api.Pod, netMode string, opts *kubecontainer.RunContainerOptions, dockerOpts *dockertypes.ContainerCreateConfig) { +func setInfraContainerNetworkConfig(pod *v1.Pod, netMode string, opts *kubecontainer.RunContainerOptions, dockerOpts *dockertypes.ContainerCreateConfig) { exposedPorts, portBindings := makePortsAndBindings(opts.PortMappings) dockerOpts.Config.ExposedPorts = exposedPorts dockerOpts.HostConfig.PortBindings = dockernat.PortMap(portBindings) @@ -838,7 +839,7 @@ func setInfraContainerNetworkConfig(pod *api.Pod, netMode string, opts *kubecont } } -func setEntrypointAndCommand(container *api.Container, opts *kubecontainer.RunContainerOptions, dockerOpts dockertypes.ContainerCreateConfig) { +func setEntrypointAndCommand(container *v1.Container, opts *kubecontainer.RunContainerOptions, dockerOpts dockertypes.ContainerCreateConfig) { command, args := kubecontainer.ExpandContainerCommandAndArgs(container, opts.Envs) dockerOpts.Config.Entrypoint = dockerstrslice.StrSlice(command) @@ -957,7 +958,7 @@ func (dm *DockerManager) ListImages() ([]kubecontainer.Image, error) { } // PullImage pulls an image from network to local storage. -func (dm *DockerManager) PullImage(image kubecontainer.ImageSpec, secrets []api.Secret) error { +func (dm *DockerManager) PullImage(image kubecontainer.ImageSpec, secrets []v1.Secret) error { return dm.dockerPuller.Pull(image.Image, secrets) } @@ -983,8 +984,8 @@ func (dm *DockerManager) RemoveImage(image kubecontainer.ImageSpec) error { } // podInfraContainerChanged returns true if the pod infra container has changed. -func (dm *DockerManager) podInfraContainerChanged(pod *api.Pod, podInfraContainerStatus *kubecontainer.ContainerStatus) (bool, error) { - var ports []api.ContainerPort +func (dm *DockerManager) podInfraContainerChanged(pod *v1.Pod, podInfraContainerStatus *kubecontainer.ContainerStatus) (bool, error) { + var ports []v1.ContainerPort // Check network mode. if kubecontainer.IsHostNetworkPod(pod) { @@ -995,7 +996,7 @@ func (dm *DockerManager) podInfraContainerChanged(pod *api.Pod, podInfraContaine networkMode := getDockerNetworkMode(dockerPodInfraContainer) if networkMode != namespaceModeHost { - glog.V(4).Infof("host: %v, %v", pod.Spec.SecurityContext.HostNetwork, networkMode) + glog.V(4).Infof("host: %v, %v", pod.Spec.HostNetwork, networkMode) return true, nil } } else if dm.networkPlugin.Name() != "cni" && dm.networkPlugin.Name() != "kubenet" { @@ -1008,7 +1009,7 @@ func (dm *DockerManager) podInfraContainerChanged(pod *api.Pod, podInfraContaine ports = append(ports, container.Ports...) } } - expectedPodInfraContainer := &api.Container{ + expectedPodInfraContainer := &v1.Container{ Name: PodInfraContainerName, Image: dm.podInfraContainerImage, Ports: ports, @@ -1019,7 +1020,7 @@ func (dm *DockerManager) podInfraContainerChanged(pod *api.Pod, podInfraContaine } // determine if the container root should be a read only filesystem. -func readOnlyRootFilesystem(container *api.Container) bool { +func readOnlyRootFilesystem(container *v1.Container) bool { return container.SecurityContext != nil && container.SecurityContext.ReadOnlyRootFilesystem != nil && *container.SecurityContext.ReadOnlyRootFilesystem } @@ -1171,7 +1172,7 @@ func (d dockerOpt) GetKV() (string, string) { } // Get the docker security options for seccomp. -func (dm *DockerManager) getSeccompOpts(pod *api.Pod, ctrName string) ([]dockerOpt, error) { +func (dm *DockerManager) getSeccompOpts(pod *v1.Pod, ctrName string) ([]dockerOpt, error) { version, err := dm.APIVersion() if err != nil { return nil, err @@ -1190,10 +1191,10 @@ func (dm *DockerManager) getSeccompOpts(pod *api.Pod, ctrName string) ([]dockerO // Temporarily export this function to share with dockershim. // TODO: clean this up. func GetSeccompOpts(annotations map[string]string, ctrName, profileRoot string) ([]dockerOpt, error) { - profile, profileOK := annotations[api.SeccompContainerAnnotationKeyPrefix+ctrName] + profile, profileOK := annotations[v1.SeccompContainerAnnotationKeyPrefix+ctrName] if !profileOK { // try the pod profile - profile, profileOK = annotations[api.SeccompPodAnnotationKey] + profile, profileOK = annotations[v1.SeccompPodAnnotationKey] if !profileOK { // return early the default return defaultSeccompOpt, nil @@ -1232,7 +1233,7 @@ func GetSeccompOpts(annotations map[string]string, ctrName, profileRoot string) } // Get the docker security options for AppArmor. -func (dm *DockerManager) getAppArmorOpts(pod *api.Pod, ctrName string) ([]dockerOpt, error) { +func (dm *DockerManager) getAppArmorOpts(pod *v1.Pod, ctrName string) ([]dockerOpt, error) { return GetAppArmorOpts(pod.Annotations, ctrName) } @@ -1406,13 +1407,13 @@ func PortForward(client DockerInterface, podInfraContainerID string, port uint16 // TODO(random-liu): After using pod status for KillPod(), we can also remove the kubernetesPodLabel, because all the needed information should have // been extract from new labels and stored in pod status. // only hard eviction scenarios should provide a grace period override, all other code paths must pass nil. -func (dm *DockerManager) KillPod(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { +func (dm *DockerManager) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { result := dm.killPodWithSyncResult(pod, runningPod, gracePeriodOverride) return result.Error() } // NOTE(random-liu): The pod passed in could be *nil* when kubelet restarted. -func (dm *DockerManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) { +func (dm *DockerManager) killPodWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) { // Short circuit if there's nothing to kill. if len(runningPod.Containers) == 0 { return @@ -1423,7 +1424,7 @@ func (dm *DockerManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecont wg := sync.WaitGroup{} var ( networkContainer *kubecontainer.Container - networkSpec *api.Container + networkSpec *v1.Container ) wg.Add(len(runningPod.Containers)) for _, container := range runningPod.Containers { @@ -1431,7 +1432,7 @@ func (dm *DockerManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecont defer utilruntime.HandleCrash() defer wg.Done() - var containerSpec *api.Container + var containerSpec *v1.Container if pod != nil { for i, c := range pod.Spec.Containers { if c.Name == container.Name { @@ -1503,7 +1504,7 @@ func (dm *DockerManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecont // KillContainerInPod kills a container in the pod. It must be passed either a container ID or a container and pod, // and will attempt to lookup the other information if missing. -func (dm *DockerManager) KillContainerInPod(containerID kubecontainer.ContainerID, container *api.Container, pod *api.Pod, message string, gracePeriodOverride *int64) error { +func (dm *DockerManager) KillContainerInPod(containerID kubecontainer.ContainerID, container *v1.Container, pod *v1.Pod, message string, gracePeriodOverride *int64) error { switch { case containerID.IsEmpty(): // Locate the container. @@ -1542,7 +1543,7 @@ func (dm *DockerManager) KillContainerInPod(containerID kubecontainer.ContainerI // KillContainerInPod if information must be retrieved first. It is only valid to provide a grace period override // during hard eviction scenarios. All other code paths in kubelet must never provide a grace period override otherwise // data corruption could occur in the end-user application. -func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, container *api.Container, pod *api.Pod, reason string, gracePeriodOverride *int64) error { +func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, container *v1.Container, pod *v1.Pod, reason string, gracePeriodOverride *int64) error { ID := containerID.ID name := ID if container != nil { @@ -1614,7 +1615,7 @@ func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, co if reason != "" { message = fmt.Sprint(message, ": ", reason) } - dm.recorder.Event(ref, api.EventTypeNormal, events.KillingContainer, message) + dm.recorder.Event(ref, v1.EventTypeNormal, events.KillingContainer, message) dm.containerRefManager.ClearRef(containerID) } return err @@ -1626,13 +1627,13 @@ func (dm *DockerManager) generateFailedContainerEvent(containerID kubecontainer. glog.Warningf("No ref for pod '%q'", podName) return } - dm.recorder.Event(ref, api.EventTypeWarning, reason, message) + dm.recorder.Event(ref, v1.EventTypeWarning, reason, message) } var errNoPodOnContainer = fmt.Errorf("no pod information labels on Docker container") // containerAndPodFromLabels tries to load the appropriate container info off of a Docker container's labels -func containerAndPodFromLabels(inspect *dockertypes.ContainerJSON) (pod *api.Pod, container *api.Container, err error) { +func containerAndPodFromLabels(inspect *dockertypes.ContainerJSON) (pod *v1.Pod, container *v1.Container, err error) { if inspect == nil || inspect.Config == nil || inspect.Config.Labels == nil { return nil, nil, errNoPodOnContainer } @@ -1640,7 +1641,7 @@ func containerAndPodFromLabels(inspect *dockertypes.ContainerJSON) (pod *api.Pod // the pod data may not be set if body, found := labels[kubernetesPodLabel]; found { - pod = &api.Pod{} + pod = &v1.Pod{} if err = kruntime.DecodeInto(api.Codecs.UniversalDecoder(), []byte(body), pod); err == nil { name := labels[types.KubernetesContainerNameLabel] for ix := range pod.Spec.Containers { @@ -1670,7 +1671,7 @@ func containerAndPodFromLabels(inspect *dockertypes.ContainerJSON) (pod *api.Pod if pod == nil { if period, ok := labels[kubernetesPodTerminationGracePeriodLabel]; ok { if seconds, err := strconv.ParseInt(period, 10, 64); err == nil { - pod = &api.Pod{} + pod = &v1.Pod{} pod.DeletionGracePeriodSeconds = &seconds } } @@ -1679,7 +1680,7 @@ func containerAndPodFromLabels(inspect *dockertypes.ContainerJSON) (pod *api.Pod return } -func (dm *DockerManager) applyOOMScoreAdj(pod *api.Pod, container *api.Container, containerInfo *dockertypes.ContainerJSON) error { +func (dm *DockerManager) applyOOMScoreAdj(pod *v1.Pod, container *v1.Container, containerInfo *dockertypes.ContainerJSON) error { if containerInfo.State.Pid == 0 { // Container exited. We cannot do anything about it. Ignore this error. glog.V(2).Infof("Failed to apply OOM score adj on container %q with ID %q. Init process does not exist.", containerInfo.Name, containerInfo.ID) @@ -1709,7 +1710,7 @@ func (dm *DockerManager) applyOOMScoreAdj(pod *api.Pod, container *api.Container // Run a single container from a pod. Returns the docker container ID // If do not need to pass labels, just pass nil. -func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Container, netMode, ipcMode, pidMode, podIP string, restartCount int) (kubecontainer.ContainerID, error) { +func (dm *DockerManager) runContainerInPod(pod *v1.Pod, container *v1.Container, netMode, ipcMode, pidMode, podIP string, restartCount int) (kubecontainer.ContainerID, error) { start := time.Now() defer func() { metrics.ContainerManagerLatency.WithLabelValues("runContainerInPod").Observe(metrics.SinceInMicroseconds(start)) @@ -1790,7 +1791,7 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe return id, err } -func (dm *DockerManager) applyOOMScoreAdjIfNeeded(pod *api.Pod, container *api.Container, containerInfo *dockertypes.ContainerJSON) error { +func (dm *DockerManager) applyOOMScoreAdjIfNeeded(pod *v1.Pod, container *v1.Container, containerInfo *dockertypes.ContainerJSON) error { // Compare current API version with expected api version. result, err := dm.checkDockerAPIVersion(dockerV110APIVersion) if err != nil { @@ -1806,7 +1807,7 @@ func (dm *DockerManager) applyOOMScoreAdjIfNeeded(pod *api.Pod, container *api.C return nil } -func (dm *DockerManager) calculateOomScoreAdj(pod *api.Pod, container *api.Container) int { +func (dm *DockerManager) calculateOomScoreAdj(pod *v1.Pod, container *v1.Container) int { // Set OOM score of the container based on the priority of the container. // Processes in lower-priority pods should be killed first if the system runs out of memory. // The main pod infrastructure container is considered high priority, since if it is killed the @@ -1880,14 +1881,14 @@ func appendToFile(filePath, stringToAppend string) error { // createPodInfraContainer starts the pod infra container for a pod. Returns the docker container ID of the newly created container. // If any error occurs in this function, it will return a brief error and a detailed error message. -func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubecontainer.DockerID, error, string) { +func (dm *DockerManager) createPodInfraContainer(pod *v1.Pod) (kubecontainer.DockerID, error, string) { start := time.Now() defer func() { metrics.ContainerManagerLatency.WithLabelValues("createPodInfraContainer").Observe(metrics.SinceInMicroseconds(start)) }() // Use host networking if specified. netNamespace := "" - var ports []api.ContainerPort + var ports []v1.ContainerPort if kubecontainer.IsHostNetworkPod(pod) { netNamespace = namespaceModeHost @@ -1904,7 +1905,7 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubecontainer.Do } } - container := &api.Container{ + container := &v1.Container{ Name: PodInfraContainerName, Image: dm.podInfraContainerImage, Ports: ports, @@ -1948,7 +1949,7 @@ type podContainerChangesSpec struct { ContainersToKeep map[kubecontainer.DockerID]int } -func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kubecontainer.PodStatus) (podContainerChangesSpec, error) { +func (dm *DockerManager) computePodContainerChanges(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (podContainerChangesSpec, error) { start := time.Now() defer func() { metrics.ContainerManagerLatency.WithLabelValues("computePodContainerChanges").Observe(metrics.SinceInMicroseconds(start)) @@ -2031,7 +2032,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub // If we're creating infra container everything will be killed anyway // If RestartPolicy is Always or OnFailure we restart containers that were running before we // killed them when restarting Infra Container. - if pod.Spec.RestartPolicy != api.RestartPolicyNever { + if pod.Spec.RestartPolicy != v1.RestartPolicyNever { message := fmt.Sprintf("Infra Container is being recreated. %q will be restarted.", container.Name) glog.V(1).Info(message) containersToStart[index] = message @@ -2044,7 +2045,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub // If we have an initialization failure everything will be killed anyway // If RestartPolicy is Always or OnFailure we restart containers that were running before we // killed them when re-running initialization - if pod.Spec.RestartPolicy != api.RestartPolicyNever { + if pod.Spec.RestartPolicy != v1.RestartPolicyNever { message := fmt.Sprintf("Failed to initialize pod. %q will be restarted.", container.Name) glog.V(1).Info(message) containersToStart[index] = message @@ -2069,7 +2070,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub containersToKeep[containerID] = index continue } - if pod.Spec.RestartPolicy != api.RestartPolicyNever { + if pod.Spec.RestartPolicy != v1.RestartPolicyNever { message := fmt.Sprintf("pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name) glog.Info(message) containersToStart[index] = message @@ -2100,7 +2101,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub } // Sync the running pod to match the specified desired pod. -func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { +func (dm *DockerManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { start := time.Now() defer func() { metrics.ContainerManagerLatency.WithLabelValues("SyncPod").Observe(metrics.SinceInMicroseconds(start)) @@ -2114,7 +2115,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec glog.V(3).Infof("Got container changes for pod %q: %+v", format.Pod(pod), containerChanges) if containerChanges.InfraChanged { - dm.recorder.Eventf(pod, api.EventTypeNormal, "InfraChanged", "Pod infrastructure changed, it will be killed and re-created.") + dm.recorder.Eventf(pod, v1.EventTypeNormal, "InfraChanged", "Pod infrastructure changed, it will be killed and re-created.") } if containerChanges.StartInfraContainer || (len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0) { if len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0 { @@ -2139,7 +2140,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec if !keep && !keepInit { glog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerStatus.Name, containerStatus.ID, format.Pod(pod)) // attempt to find the appropriate container policy - var podContainer *api.Container + var podContainer *v1.Container var killMessage string for i, c := range pod.Spec.Containers { if c.Name == containerStatus.Name { @@ -2244,7 +2245,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec initContainerResult := kubecontainer.NewSyncResult(kubecontainer.InitContainer, status.Name) initContainerResult.Fail(kubecontainer.ErrRunInitContainer, fmt.Sprintf("init container %q exited with %d", status.Name, status.ExitCode)) result.AddSyncResult(initContainerResult) - if pod.Spec.RestartPolicy == api.RestartPolicyNever { + if pod.Spec.RestartPolicy == v1.RestartPolicyNever { utilruntime.HandleError(fmt.Errorf("error running pod %q init container %q, restart=Never: %#v", format.Pod(pod), status.Name, status)) return } @@ -2330,7 +2331,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec // tryContainerStart attempts to pull and start the container, returning an error and a reason string if the start // was not successful. -func (dm *DockerManager) tryContainerStart(container *api.Container, pod *api.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, namespaceMode, pidMode, podIP string) (err error, reason string) { +func (dm *DockerManager) tryContainerStart(container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, namespaceMode, pidMode, podIP string) (err error, reason string) { err, msg := dm.imagePuller.EnsureImageExists(pod, container, pullSecrets) if err != nil { return err, msg @@ -2368,7 +2369,7 @@ func (dm *DockerManager) tryContainerStart(container *api.Container, pod *api.Po // pruneInitContainers ensures that before we begin creating init containers, we have reduced the number // of outstanding init containers still present. This reduces load on the container garbage collector // by only preserving the most recent terminated init container. -func (dm *DockerManager) pruneInitContainersBeforeStart(pod *api.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.DockerID]int) { +func (dm *DockerManager) pruneInitContainersBeforeStart(pod *v1.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.DockerID]int) { // only the last execution of each init container should be preserved, and only preserve it if it is in the // list of init containers to keep. initContainerNames := sets.NewString() @@ -2417,7 +2418,7 @@ func (dm *DockerManager) pruneInitContainersBeforeStart(pod *api.Pod, podStatus // findActiveInitContainer returns the status of the last failed container, the next init container to // start, or done if there are no further init containers. Status is only returned if an init container // failed, in which case next will point to the current container. -func findActiveInitContainer(pod *api.Pod, podStatus *kubecontainer.PodStatus) (next *api.Container, status *kubecontainer.ContainerStatus, done bool) { +func findActiveInitContainer(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (next *v1.Container, status *kubecontainer.ContainerStatus, done bool) { if len(pod.Spec.InitContainers) == 0 { return nil, nil, true } @@ -2449,7 +2450,7 @@ func findActiveInitContainer(pod *api.Pod, podStatus *kubecontainer.PodStatus) ( } // verifyNonRoot returns an error if the container or image will run as the root user. -func (dm *DockerManager) verifyNonRoot(container *api.Container) error { +func (dm *DockerManager) verifyNonRoot(container *v1.Container) error { if securitycontext.HasRunAsUser(container) { if securitycontext.HasRootRunAsUser(container) { return fmt.Errorf("container's runAsUser breaks non-root policy") @@ -2510,7 +2511,7 @@ func GetUserFromImageUser(id string) string { // If all instances of a container are garbage collected, doBackOff will also return false, which means the container may be restarted before the // backoff deadline. However, because that won't cause error and the chance is really slim, we can just ignore it for now. // If a container is still in backoff, the function will return a brief backoff error and a detailed error message. -func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, error, string) { +func (dm *DockerManager) doBackOff(pod *v1.Pod, container *v1.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, error, string) { var cStatus *kubecontainer.ContainerStatus // Use the finished time of the latest exited container as the start point to calculate whether to do back-off. // TODO(random-liu): Better define backoff start point; add unit and e2e test after we finalize this. (See github issue #22240) @@ -2532,7 +2533,7 @@ func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podSt stableName, _, _ := BuildDockerName(dockerName, container) if backOff.IsInBackOffSince(stableName, ts) { if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil { - dm.recorder.Eventf(ref, api.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed docker container") + dm.recorder.Eventf(ref, v1.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed docker container") } err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(stableName), container.Name, format.Pod(pod)) glog.Infof("%s", err.Error()) @@ -2544,18 +2545,18 @@ func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podSt } // getPidMode returns the pid mode to use on the docker container based on pod.Spec.HostPID. -func getPidMode(pod *api.Pod) string { +func getPidMode(pod *v1.Pod) string { pidMode := "" - if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostPID { + if pod.Spec.HostPID { pidMode = namespaceModeHost } return pidMode } // getIPCMode returns the ipc mode to use on the docker container based on pod.Spec.HostIPC. -func getIPCMode(pod *api.Pod) string { +func getIPCMode(pod *v1.Pod) string { ipcMode := "" - if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostIPC { + if pod.Spec.HostIPC { ipcMode = namespaceModeHost } return ipcMode diff --git a/pkg/kubelet/dockertools/docker_manager_linux.go b/pkg/kubelet/dockertools/docker_manager_linux.go index 2a520468578..c806436c653 100644 --- a/pkg/kubelet/dockertools/docker_manager_linux.go +++ b/pkg/kubelet/dockertools/docker_manager_linux.go @@ -20,7 +20,7 @@ package dockertools import ( dockertypes "github.com/docker/engine-api/types" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) func getContainerIP(container *dockertypes.ContainerJSON) string { @@ -45,7 +45,7 @@ func containerProvidesPodIP(name *KubeletContainerName) bool { } // Returns Seccomp and AppArmor Security options -func (dm *DockerManager) getSecurityOpts(pod *api.Pod, ctrName string) ([]dockerOpt, error) { +func (dm *DockerManager) getSecurityOpts(pod *v1.Pod, ctrName string) ([]dockerOpt, error) { var securityOpts []dockerOpt if seccompOpts, err := dm.getSeccompOpts(pod, ctrName); err != nil { return nil, err diff --git a/pkg/kubelet/dockertools/docker_manager_test.go b/pkg/kubelet/dockertools/docker_manager_test.go index 62263bbb96f..b08fece458d 100644 --- a/pkg/kubelet/dockertools/docker_manager_test.go +++ b/pkg/kubelet/dockertools/docker_manager_test.go @@ -38,8 +38,8 @@ import ( "github.com/golang/mock/gomock" cadvisorapi "github.com/google/cadvisor/info/v1" "github.com/stretchr/testify/assert" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -80,7 +80,7 @@ var _ kubecontainer.RuntimeHelper = &fakeRuntimeHelper{} var testPodContainerDir string -func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *api.Pod, container *api.Container, podIP string) (*kubecontainer.RunContainerOptions, error) { +func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, error) { var opts kubecontainer.RunContainerOptions var err error if len(container.TerminationMessagePath) != 0 { @@ -93,12 +93,12 @@ func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *api.Pod, container return &opts, nil } -func (f *fakeRuntimeHelper) GetClusterDNS(pod *api.Pod) ([]string, []string, error) { +func (f *fakeRuntimeHelper) GetClusterDNS(pod *v1.Pod) ([]string, []string, error) { return nil, nil, fmt.Errorf("not implemented") } // This is not used by docker runtime. -func (f *fakeRuntimeHelper) GeneratePodHostNameAndDomain(pod *api.Pod) (string, string, error) { +func (f *fakeRuntimeHelper) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) { return "", "", nil } @@ -106,7 +106,7 @@ func (f *fakeRuntimeHelper) GetPodDir(kubetypes.UID) string { return "" } -func (f *fakeRuntimeHelper) GetExtraSupplementalGroupsForPod(pod *api.Pod) []int64 { +func (f *fakeRuntimeHelper) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 { return nil } @@ -116,7 +116,7 @@ func newFakeImageManager() images.ImageManager { return &fakeImageManager{} } -func (m *fakeImageManager) EnsureImageExists(pod *api.Pod, container *api.Container, pullSecrets []api.Secret) (error, string) { +func (m *fakeImageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret) (error, string) { return nil, "" } @@ -187,20 +187,20 @@ func matchString(t *testing.T, pattern, str string) bool { func TestSetEntrypointAndCommand(t *testing.T) { cases := []struct { name string - container *api.Container + container *v1.Container envs []kubecontainer.EnvVar expected *dockertypes.ContainerCreateConfig }{ { name: "none", - container: &api.Container{}, + container: &v1.Container{}, expected: &dockertypes.ContainerCreateConfig{ Config: &dockercontainer.Config{}, }, }, { name: "command", - container: &api.Container{ + container: &v1.Container{ Command: []string{"foo", "bar"}, }, expected: &dockertypes.ContainerCreateConfig{ @@ -211,7 +211,7 @@ func TestSetEntrypointAndCommand(t *testing.T) { }, { name: "command expanded", - container: &api.Container{ + container: &v1.Container{ Command: []string{"foo", "$(VAR_TEST)", "$(VAR_TEST2)"}, }, envs: []kubecontainer.EnvVar{ @@ -232,7 +232,7 @@ func TestSetEntrypointAndCommand(t *testing.T) { }, { name: "args", - container: &api.Container{ + container: &v1.Container{ Args: []string{"foo", "bar"}, }, expected: &dockertypes.ContainerCreateConfig{ @@ -243,7 +243,7 @@ func TestSetEntrypointAndCommand(t *testing.T) { }, { name: "args expanded", - container: &api.Container{ + container: &v1.Container{ Args: []string{"zap", "$(VAR_TEST)", "$(VAR_TEST2)"}, }, envs: []kubecontainer.EnvVar{ @@ -264,7 +264,7 @@ func TestSetEntrypointAndCommand(t *testing.T) { }, { name: "both", - container: &api.Container{ + container: &v1.Container{ Command: []string{"foo"}, Args: []string{"bar", "baz"}, }, @@ -277,7 +277,7 @@ func TestSetEntrypointAndCommand(t *testing.T) { }, { name: "both expanded", - container: &api.Container{ + container: &v1.Container{ Command: []string{"$(VAR_TEST2)--$(VAR_TEST)", "foo", "$(VAR_TEST3)"}, Args: []string{"foo", "$(VAR_TEST)", "$(VAR_TEST2)"}, }, @@ -314,10 +314,10 @@ func TestSetEntrypointAndCommand(t *testing.T) { } setEntrypointAndCommand(tc.container, opts, actualOpts) - if e, a := tc.expected.Config.Entrypoint, actualOpts.Config.Entrypoint; !api.Semantic.DeepEqual(e, a) { + if e, a := tc.expected.Config.Entrypoint, actualOpts.Config.Entrypoint; !v1.Semantic.DeepEqual(e, a) { t.Errorf("%v: unexpected entrypoint: expected %v, got %v", tc.name, e, a) } - if e, a := tc.expected.Config.Cmd, actualOpts.Config.Cmd; !api.Semantic.DeepEqual(e, a) { + if e, a := tc.expected.Config.Cmd, actualOpts.Config.Cmd; !v1.Semantic.DeepEqual(e, a) { t.Errorf("%v: unexpected command: expected %v, got %v", tc.name, e, a) } } @@ -477,13 +477,13 @@ func TestKillContainerInPodWithPreStop(t *testing.T) { ExitCode: 0, } expectedCmd := []string{"foo.sh", "bar"} - pod := makePod("qux", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("qux", &v1.PodSpec{ + Containers: []v1.Container{ { Name: "foo", - Lifecycle: &api.Lifecycle{ - PreStop: &api.Handler{ - Exec: &api.ExecAction{ + Lifecycle: &v1.Lifecycle{ + PreStop: &v1.Handler{ + Exec: &v1.ExecAction{ Command: expectedCmd, }, }, @@ -558,15 +558,15 @@ func TestIsAExitError(t *testing.T) { } } -func generatePodInfraContainerHash(pod *api.Pod) uint64 { - var ports []api.ContainerPort - if pod.Spec.SecurityContext == nil || !pod.Spec.SecurityContext.HostNetwork { +func generatePodInfraContainerHash(pod *v1.Pod) uint64 { + var ports []v1.ContainerPort + if pod.Spec.SecurityContext == nil || !pod.Spec.HostNetwork { for _, container := range pod.Spec.Containers { ports = append(ports, container.Ports...) } } - container := &api.Container{ + container := &v1.Container{ Name: PodInfraContainerName, Image: "", Ports: ports, @@ -577,7 +577,7 @@ func generatePodInfraContainerHash(pod *api.Pod) uint64 { // runSyncPod is a helper function to retrieve the running pods from the fake // docker client and runs SyncPod for the given pod. -func runSyncPod(t *testing.T, dm *DockerManager, fakeDocker *FakeDockerClient, pod *api.Pod, backOff *flowcontrol.Backoff, expectErr bool) kubecontainer.PodSyncResult { +func runSyncPod(t *testing.T, dm *DockerManager, fakeDocker *FakeDockerClient, pod *v1.Pod, backOff *flowcontrol.Backoff, expectErr bool) kubecontainer.PodSyncResult { podStatus, err := dm.GetPodStatus(pod.UID, pod.Name, pod.Namespace) if err != nil { t.Errorf("unexpected error: %v", err) @@ -586,8 +586,8 @@ func runSyncPod(t *testing.T, dm *DockerManager, fakeDocker *FakeDockerClient, p if backOff == nil { backOff = flowcontrol.NewBackOff(time.Second, time.Minute) } - // api.PodStatus is not used in SyncPod now, pass in an empty one. - result := dm.SyncPod(pod, api.PodStatus{}, podStatus, []api.Secret{}, backOff) + // v1.PodStatus is not used in SyncPod now, pass in an empty one. + result := dm.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff) err = result.Error() if err != nil && !expectErr { t.Errorf("unexpected error: %v", err) @@ -601,8 +601,8 @@ func TestSyncPodCreateNetAndContainer(t *testing.T) { dm, fakeDocker := newTestDockerManager() dm.podInfraContainerImage = "pod_infra_image" - pod := makePod("foo", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("foo", &v1.PodSpec{ + Containers: []v1.Container{ {Name: "bar"}, }, }) @@ -641,8 +641,8 @@ func TestSyncPodCreatesNetAndContainerPullsImage(t *testing.T) { puller := dm.dockerPuller.(*FakeDockerPuller) puller.HasImages = []string{} dm.podInfraContainerImage = "foo/infra_image:v1" - pod := makePod("foo", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("foo", &v1.PodSpec{ + Containers: []v1.Container{ {Name: "bar", Image: "foo/something:v0", ImagePullPolicy: "IfNotPresent"}, }, }) @@ -672,8 +672,8 @@ func TestSyncPodCreatesNetAndContainerPullsImage(t *testing.T) { func TestSyncPodWithPodInfraCreatesContainer(t *testing.T) { dm, fakeDocker := newTestDockerManager() - pod := makePod("foo", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("foo", &v1.PodSpec{ + Containers: []v1.Container{ {Name: "bar"}, }, }) @@ -700,8 +700,8 @@ func TestSyncPodWithPodInfraCreatesContainer(t *testing.T) { func TestSyncPodDeletesWithNoPodInfraContainer(t *testing.T) { dm, fakeDocker := newTestDockerManager() - pod := makePod("foo1", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("foo1", &v1.PodSpec{ + Containers: []v1.Container{ {Name: "bar1"}, }, }) @@ -735,8 +735,8 @@ func TestSyncPodDeletesWithNoPodInfraContainer(t *testing.T) { func TestSyncPodDeletesDuplicate(t *testing.T) { dm, fakeDocker := newTestDockerManager() - pod := makePod("bar", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("bar", &v1.PodSpec{ + Containers: []v1.Container{ {Name: "foo"}, }, }) @@ -769,8 +769,8 @@ func TestSyncPodDeletesDuplicate(t *testing.T) { func TestSyncPodBadHash(t *testing.T) { dm, fakeDocker := newTestDockerManager() - pod := makePod("foo", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("foo", &v1.PodSpec{ + Containers: []v1.Container{ {Name: "bar"}, }, }) @@ -802,8 +802,8 @@ func TestSyncPodsUnhealthy(t *testing.T) { infraContainerID = "9876" ) dm, fakeDocker := newTestDockerManager() - pod := makePod("foo", &api.PodSpec{ - Containers: []api.Container{{Name: "unhealthy"}}, + pod := makePod("foo", &v1.PodSpec{ + Containers: []v1.Container{{Name: "unhealthy"}}, }) fakeDocker.SetFakeRunningContainers([]*FakeContainer{ @@ -833,9 +833,9 @@ func TestSyncPodsUnhealthy(t *testing.T) { func TestSyncPodsDoesNothing(t *testing.T) { dm, fakeDocker := newTestDockerManager() - container := api.Container{Name: "bar"} - pod := makePod("foo", &api.PodSpec{ - Containers: []api.Container{ + container := v1.Container{Name: "bar"} + pod := makePod("foo", &v1.PodSpec{ + Containers: []v1.Container{ container, }, }) @@ -856,11 +856,11 @@ func TestSyncPodsDoesNothing(t *testing.T) { func TestSyncPodWithRestartPolicy(t *testing.T) { dm, fakeDocker := newTestDockerManager() - containers := []api.Container{ + containers := []v1.Container{ {Name: "succeeded"}, {Name: "failed"}, } - pod := makePod("foo", &api.PodSpec{ + pod := makePod("foo", &v1.PodSpec{ Containers: containers, }) dockerContainers := []*FakeContainer{ @@ -886,13 +886,13 @@ func TestSyncPodWithRestartPolicy(t *testing.T) { }} tests := []struct { - policy api.RestartPolicy + policy v1.RestartPolicy calls []string created []string stopped []string }{ { - api.RestartPolicyAlways, + v1.RestartPolicyAlways, []string{ // Restart both containers. "create", "start", "inspect_container", "create", "start", "inspect_container", @@ -901,7 +901,7 @@ func TestSyncPodWithRestartPolicy(t *testing.T) { []string{}, }, { - api.RestartPolicyOnFailure, + v1.RestartPolicyOnFailure, []string{ // Restart the failed container. "create", "start", "inspect_container", @@ -910,7 +910,7 @@ func TestSyncPodWithRestartPolicy(t *testing.T) { []string{}, }, { - api.RestartPolicyNever, + v1.RestartPolicyNever, []string{ // Check the pod infra container. "inspect_container", "inspect_container", @@ -943,11 +943,11 @@ func TestSyncPodBackoff(t *testing.T) { startTime := fakeClock.Now() dm, fakeDocker := newTestDockerManager() - containers := []api.Container{ + containers := []v1.Container{ {Name: "good"}, {Name: "bad"}, } - pod := makePod("podfoo", &api.PodSpec{ + pod := makePod("podfoo", &v1.PodSpec{ Containers: containers, }) @@ -1027,14 +1027,14 @@ func TestSyncPodBackoff(t *testing.T) { func TestGetRestartCount(t *testing.T) { dm, fakeDocker := newTestDockerManager() containerName := "bar" - pod := *makePod("foo", &api.PodSpec{ - Containers: []api.Container{ + pod := *makePod("foo", &v1.PodSpec{ + Containers: []v1.Container{ {Name: containerName}, }, RestartPolicy: "Always", }) - pod.Status = api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + pod.Status = v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ { Name: containerName, RestartCount: 3, @@ -1043,7 +1043,7 @@ func TestGetRestartCount(t *testing.T) { } // Helper function for verifying the restart count. - verifyRestartCount := func(pod *api.Pod, expectedCount int) { + verifyRestartCount := func(pod *v1.Pod, expectedCount int) { runSyncPod(t, dm, fakeDocker, pod, nil, false) status, err := dm.GetPodStatus(pod.UID, pod.Name, pod.Namespace) if err != nil { @@ -1059,7 +1059,7 @@ func TestGetRestartCount(t *testing.T) { } } - killOneContainer := func(pod *api.Pod) { + killOneContainer := func(pod *v1.Pod) { status, err := dm.GetPodStatus(pod.UID, pod.Name, pod.Namespace) if err != nil { t.Fatalf("unexpected error %v", err) @@ -1108,13 +1108,13 @@ func TestGetRestartCount(t *testing.T) { func TestGetTerminationMessagePath(t *testing.T) { dm, fakeDocker := newTestDockerManager() - containers := []api.Container{ + containers := []v1.Container{ { Name: "bar", TerminationMessagePath: "/dev/somepath", }, } - pod := makePod("foo", &api.PodSpec{ + pod := makePod("foo", &v1.PodSpec{ Containers: containers, }) @@ -1140,13 +1140,13 @@ func TestSyncPodWithPodInfraCreatesContainerCallsHandler(t *testing.T) { fakeHTTPClient := &fakeHTTP{} dm, fakeDocker := newTestDockerManagerWithHTTPClient(fakeHTTPClient) - pod := makePod("foo", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("foo", &v1.PodSpec{ + Containers: []v1.Container{ { Name: "bar", - Lifecycle: &api.Lifecycle{ - PostStart: &api.Handler{ - HTTPGet: &api.HTTPGetAction{ + Lifecycle: &v1.Lifecycle{ + PostStart: &v1.Handler{ + HTTPGet: &v1.HTTPGetAction{ Host: "foo", Port: intstr.FromInt(8080), Path: "bar", @@ -1183,12 +1183,12 @@ func TestSyncPodEventHandlerFails(t *testing.T) { fakeHTTPClient := &fakeHTTP{err: fmt.Errorf("test error")} dm, fakeDocker := newTestDockerManagerWithHTTPClient(fakeHTTPClient) - pod := makePod("foo", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("foo", &v1.PodSpec{ + Containers: []v1.Container{ {Name: "bar", - Lifecycle: &api.Lifecycle{ - PostStart: &api.Handler{ - HTTPGet: &api.HTTPGetAction{ + Lifecycle: &v1.Lifecycle{ + PostStart: &v1.Handler{ + HTTPGet: &v1.HTTPGetAction{ Host: "does.no.exist", Port: intstr.FromInt(8080), Path: "bar", @@ -1256,12 +1256,12 @@ func TestPortForwardNoSuchContainer(t *testing.T) { func TestSyncPodWithTerminationLog(t *testing.T) { dm, fakeDocker := newTestDockerManager() - container := api.Container{ + container := v1.Container{ Name: "bar", TerminationMessagePath: "/dev/somepath", } - pod := makePod("foo", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("foo", &v1.PodSpec{ + Containers: []v1.Container{ container, }, }) @@ -1298,13 +1298,12 @@ func TestSyncPodWithTerminationLog(t *testing.T) { func TestSyncPodWithHostNetwork(t *testing.T) { dm, fakeDocker := newTestDockerManager() - pod := makePod("foo", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("foo", &v1.PodSpec{ + Containers: []v1.Container{ {Name: "bar"}, }, - SecurityContext: &api.PodSecurityContext{ HostNetwork: true, - }, + }) runSyncPod(t, dm, fakeDocker, pod, nil, false) @@ -1342,20 +1341,20 @@ func TestVerifyNonRoot(t *testing.T) { var nonRootUid int64 = 1 tests := map[string]struct { - container *api.Container + container *v1.Container inspectImage *dockertypes.ImageInspect expectedError string }{ // success cases "non-root runAsUser": { - container: &api.Container{ - SecurityContext: &api.SecurityContext{ + container: &v1.Container{ + SecurityContext: &v1.SecurityContext{ RunAsUser: &nonRootUid, }, }, }, "numeric non-root image user": { - container: &api.Container{}, + container: &v1.Container{}, inspectImage: &dockertypes.ImageInspect{ Config: &dockercontainer.Config{ User: "1", @@ -1363,7 +1362,7 @@ func TestVerifyNonRoot(t *testing.T) { }, }, "numeric non-root image user with gid": { - container: &api.Container{}, + container: &v1.Container{}, inspectImage: &dockertypes.ImageInspect{ Config: &dockercontainer.Config{ User: "1:2", @@ -1373,15 +1372,15 @@ func TestVerifyNonRoot(t *testing.T) { // failure cases "root runAsUser": { - container: &api.Container{ - SecurityContext: &api.SecurityContext{ + container: &v1.Container{ + SecurityContext: &v1.SecurityContext{ RunAsUser: &rootUid, }, }, expectedError: "container's runAsUser breaks non-root policy", }, "non-numeric image user": { - container: &api.Container{}, + container: &v1.Container{}, inspectImage: &dockertypes.ImageInspect{ Config: &dockercontainer.Config{ User: "foo", @@ -1390,7 +1389,7 @@ func TestVerifyNonRoot(t *testing.T) { expectedError: "non-numeric user", }, "numeric root image user": { - container: &api.Container{}, + container: &v1.Container{}, inspectImage: &dockertypes.ImageInspect{ Config: &dockercontainer.Config{ User: "0", @@ -1399,7 +1398,7 @@ func TestVerifyNonRoot(t *testing.T) { expectedError: "container has no runAsUser and image will run as root", }, "numeric root image user with gid": { - container: &api.Container{}, + container: &v1.Container{}, inspectImage: &dockertypes.ImageInspect{ Config: &dockercontainer.Config{ User: "0:1", @@ -1408,12 +1407,12 @@ func TestVerifyNonRoot(t *testing.T) { expectedError: "container has no runAsUser and image will run as root", }, "nil image in inspect": { - container: &api.Container{}, + container: &v1.Container{}, inspectImage: nil, expectedError: "unable to inspect image", }, "nil config in image inspect": { - container: &api.Container{}, + container: &v1.Container{}, inspectImage: &dockertypes.ImageInspect{}, expectedError: "unable to inspect image", }, @@ -1471,7 +1470,7 @@ func TestGetUserFromImageUser(t *testing.T) { func TestGetPidMode(t *testing.T) { // test false - pod := &api.Pod{} + pod := &v1.Pod{} pidMode := getPidMode(pod) if pidMode != "" { @@ -1479,8 +1478,8 @@ func TestGetPidMode(t *testing.T) { } // test true - pod.Spec.SecurityContext = &api.PodSecurityContext{} - pod.Spec.SecurityContext.HostPID = true + pod.Spec.SecurityContext = &v1.PodSecurityContext{} + pod.Spec.HostPID = true pidMode = getPidMode(pod) if pidMode != "host" { t.Errorf("expected host pid mode for pod but got %v", pidMode) @@ -1489,7 +1488,7 @@ func TestGetPidMode(t *testing.T) { func TestGetIPCMode(t *testing.T) { // test false - pod := &api.Pod{} + pod := &v1.Pod{} ipcMode := getIPCMode(pod) if ipcMode != "" { @@ -1497,8 +1496,8 @@ func TestGetIPCMode(t *testing.T) { } // test true - pod.Spec.SecurityContext = &api.PodSecurityContext{} - pod.Spec.SecurityContext.HostIPC = true + pod.Spec.SecurityContext = &v1.PodSecurityContext{} + pod.Spec.HostIPC = true ipcMode = getIPCMode(pod) if ipcMode != "host" { t.Errorf("expected host ipc mode for pod but got %v", ipcMode) @@ -1511,13 +1510,13 @@ func TestSyncPodWithPullPolicy(t *testing.T) { puller.HasImages = []string{"foo/existing_one:v1", "foo/want:latest"} dm.podInfraContainerImage = "foo/infra_image:v1" - pod := makePod("foo", &api.PodSpec{ - Containers: []api.Container{ - {Name: "bar", Image: "foo/pull_always_image:v1", ImagePullPolicy: api.PullAlways}, - {Name: "bar2", Image: "foo/pull_if_not_present_image:v1", ImagePullPolicy: api.PullIfNotPresent}, - {Name: "bar3", Image: "foo/existing_one:v1", ImagePullPolicy: api.PullIfNotPresent}, - {Name: "bar4", Image: "foo/want:latest", ImagePullPolicy: api.PullIfNotPresent}, - {Name: "bar5", Image: "foo/pull_never_image:v1", ImagePullPolicy: api.PullNever}, + pod := makePod("foo", &v1.PodSpec{ + Containers: []v1.Container{ + {Name: "bar", Image: "foo/pull_always_image:v1", ImagePullPolicy: v1.PullAlways}, + {Name: "bar2", Image: "foo/pull_if_not_present_image:v1", ImagePullPolicy: v1.PullIfNotPresent}, + {Name: "bar3", Image: "foo/existing_one:v1", ImagePullPolicy: v1.PullIfNotPresent}, + {Name: "bar4", Image: "foo/want:latest", ImagePullPolicy: v1.PullIfNotPresent}, + {Name: "bar5", Image: "foo/pull_never_image:v1", ImagePullPolicy: v1.PullNever}, }, }) @@ -1555,25 +1554,25 @@ func TestSyncPodWithPullPolicy(t *testing.T) { func TestSyncPodWithFailure(t *testing.T) { pod := makePod("foo", nil) tests := map[string]struct { - container api.Container + container v1.Container dockerError map[string]error pullerError []error expected []*kubecontainer.SyncResult }{ "PullImageFailure": { - api.Container{Name: "bar", Image: "foo/real_image:v1", ImagePullPolicy: api.PullAlways}, + v1.Container{Name: "bar", Image: "foo/real_image:v1", ImagePullPolicy: v1.PullAlways}, map[string]error{}, []error{fmt.Errorf("can't pull image")}, []*kubecontainer.SyncResult{{kubecontainer.StartContainer, "bar", images.ErrImagePull, "can't pull image"}}, }, "CreateContainerFailure": { - api.Container{Name: "bar", Image: "foo/already_present:v2"}, + v1.Container{Name: "bar", Image: "foo/already_present:v2"}, map[string]error{"create": fmt.Errorf("can't create container")}, []error{}, []*kubecontainer.SyncResult{{kubecontainer.StartContainer, "bar", kubecontainer.ErrRunContainer, "can't create container"}}, }, "StartContainerFailure": { - api.Container{Name: "bar", Image: "foo/already_present:v2"}, + v1.Container{Name: "bar", Image: "foo/already_present:v2"}, map[string]error{"start": fmt.Errorf("can't start container")}, []error{}, []*kubecontainer.SyncResult{{kubecontainer.StartContainer, "bar", kubecontainer.ErrRunContainer, "can't start container"}}, @@ -1592,7 +1591,7 @@ func TestSyncPodWithFailure(t *testing.T) { }}) fakeDocker.InjectErrors(test.dockerError) puller.ErrorsToInject = test.pullerError - pod.Spec.Containers = []api.Container{test.container} + pod.Spec.Containers = []v1.Container{test.container} result := runSyncPod(t, dm, fakeDocker, pod, nil, true) verifySyncResults(t, test.expected, result) } @@ -1658,9 +1657,9 @@ func TestSecurityOptsOperator(t *testing.T) { func TestGetSecurityOpts(t *testing.T) { const containerName = "bar" - pod := func(annotations map[string]string) *api.Pod { - p := makePod("foo", &api.PodSpec{ - Containers: []api.Container{ + pod := func(annotations map[string]string) *v1.Pod { + p := makePod("foo", &v1.PodSpec{ + Containers: []v1.Container{ {Name: containerName}, }, }) @@ -1670,7 +1669,7 @@ func TestGetSecurityOpts(t *testing.T) { tests := []struct { msg string - pod *api.Pod + pod *v1.Pod expectedOpts []string }{{ msg: "No security annotations", @@ -1679,7 +1678,7 @@ func TestGetSecurityOpts(t *testing.T) { }, { msg: "Seccomp default", pod: pod(map[string]string{ - api.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default", + v1.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default", }), expectedOpts: nil, }, { @@ -1697,8 +1696,8 @@ func TestGetSecurityOpts(t *testing.T) { }, { msg: "AppArmor and seccomp profile", pod: pod(map[string]string{ - api.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default", - apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileNamePrefix + "foo", + v1.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default", + apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileNamePrefix + "foo", }), expectedOpts: []string{"apparmor=foo"}, }} @@ -1722,8 +1721,8 @@ func TestSeccompIsUnconfinedByDefaultWithDockerV110(t *testing.T) { recorder := record.NewFakeRecorder(20) dm.recorder = recorder - pod := makePod("foo", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("foo", &v1.PodSpec{ + Containers: []v1.Container{ {Name: "bar"}, }, }) @@ -1752,19 +1751,19 @@ func TestSeccompIsUnconfinedByDefaultWithDockerV110(t *testing.T) { assert.Contains(t, newContainer.HostConfig.SecurityOpt, "seccomp:unconfined", "Pods with Docker versions >= 1.10 must not have seccomp disabled by default") cid := utilstrings.ShortenString(fakeDocker.Created[1], 12) - assert.NoError(t, expectEvent(recorder, api.EventTypeNormal, events.CreatedContainer, + assert.NoError(t, expectEvent(recorder, v1.EventTypeNormal, events.CreatedContainer, fmt.Sprintf("Created container with docker id %s; Security:[seccomp=unconfined]", cid))) } func TestUnconfinedSeccompProfileWithDockerV110(t *testing.T) { dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22") - pod := makePod("foo4", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("foo4", &v1.PodSpec{ + Containers: []v1.Container{ {Name: "bar4"}, }, }) pod.Annotations = map[string]string{ - api.SeccompPodAnnotationKey: "unconfined", + v1.SeccompPodAnnotationKey: "unconfined", } runSyncPod(t, dm, fakeDocker, pod, nil, false) @@ -1793,13 +1792,13 @@ func TestUnconfinedSeccompProfileWithDockerV110(t *testing.T) { func TestDefaultSeccompProfileWithDockerV110(t *testing.T) { dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22") - pod := makePod("foo1", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("foo1", &v1.PodSpec{ + Containers: []v1.Container{ {Name: "bar1"}, }, }) pod.Annotations = map[string]string{ - api.SeccompPodAnnotationKey: "docker/default", + v1.SeccompPodAnnotationKey: "docker/default", } runSyncPod(t, dm, fakeDocker, pod, nil, false) @@ -1828,14 +1827,14 @@ func TestDefaultSeccompProfileWithDockerV110(t *testing.T) { func TestSeccompContainerAnnotationTrumpsPod(t *testing.T) { dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22") - pod := makePod("foo2", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("foo2", &v1.PodSpec{ + Containers: []v1.Container{ {Name: "bar2"}, }, }) pod.Annotations = map[string]string{ - api.SeccompPodAnnotationKey: "unconfined", - api.SeccompContainerAnnotationKeyPrefix + "bar2": "docker/default", + v1.SeccompPodAnnotationKey: "unconfined", + v1.SeccompContainerAnnotationKeyPrefix + "bar2": "docker/default", } runSyncPod(t, dm, fakeDocker, pod, nil, false) @@ -1871,21 +1870,21 @@ func TestSeccompLocalhostProfileIsLoaded(t *testing.T) { }{ { annotations: map[string]string{ - api.SeccompPodAnnotationKey: "localhost/test", + v1.SeccompPodAnnotationKey: "localhost/test", }, expectedSecOpt: `seccomp={"foo":"bar"}`, expectedSecMsg: "seccomp=test(md5:21aeae45053385adebd25311f9dd9cb1)", }, { annotations: map[string]string{ - api.SeccompPodAnnotationKey: "localhost/sub/subtest", + v1.SeccompPodAnnotationKey: "localhost/sub/subtest", }, expectedSecOpt: `seccomp={"abc":"def"}`, expectedSecMsg: "seccomp=sub/subtest(md5:07c9bcb4db631f7ca191d6e0bca49f76)", }, { annotations: map[string]string{ - api.SeccompPodAnnotationKey: "localhost/not-existing", + v1.SeccompPodAnnotationKey: "localhost/not-existing", }, expectedError: "cannot load seccomp profile", }, @@ -1900,8 +1899,8 @@ func TestSeccompLocalhostProfileIsLoaded(t *testing.T) { _, filename, _, _ := goruntime.Caller(0) dm.seccompProfileRoot = path.Join(path.Dir(filename), "fixtures", "seccomp") - pod := makePod("foo2", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("foo2", &v1.PodSpec{ + Containers: []v1.Container{ {Name: "bar2"}, }, }) @@ -1935,7 +1934,7 @@ func TestSeccompLocalhostProfileIsLoaded(t *testing.T) { assert.Contains(t, newContainer.HostConfig.SecurityOpt, test.expectedSecOpt, "The compacted seccomp json profile should be loaded.") cid := utilstrings.ShortenString(fakeDocker.Created[1], 12) - assert.NoError(t, expectEvent(recorder, api.EventTypeNormal, events.CreatedContainer, + assert.NoError(t, expectEvent(recorder, v1.EventTypeNormal, events.CreatedContainer, fmt.Sprintf("Created container with docker id %s; Security:[%s]", cid, test.expectedSecMsg)), "testcase %d", i) } @@ -1943,8 +1942,8 @@ func TestSeccompLocalhostProfileIsLoaded(t *testing.T) { func TestSecurityOptsAreNilWithDockerV19(t *testing.T) { dm, fakeDocker := newTestDockerManagerWithVersion("1.9.1", "1.21") - pod := makePod("foo", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("foo", &v1.PodSpec{ + Containers: []v1.Container{ {Name: "bar"}, }, }) @@ -2011,8 +2010,8 @@ func TestCreateAppArmorContanier(t *testing.T) { recorder := record.NewFakeRecorder(20) dm.recorder = recorder - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", @@ -2020,8 +2019,8 @@ func TestCreateAppArmorContanier(t *testing.T) { apparmor.ContainerAnnotationKeyPrefix + "test": apparmor.ProfileNamePrefix + "test-profile", }, }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ {Name: "test"}, }, }, @@ -2053,7 +2052,7 @@ func TestCreateAppArmorContanier(t *testing.T) { assert.Contains(t, securityOpts, "apparmor=test-profile", "Container should have apparmor security opt") cid := utilstrings.ShortenString(fakeDocker.Created[1], 12) - assert.NoError(t, expectEvent(recorder, api.EventTypeNormal, events.CreatedContainer, + assert.NoError(t, expectEvent(recorder, v1.EventTypeNormal, events.CreatedContainer, fmt.Sprintf("Created container with docker id %s; Security:[seccomp=unconfined apparmor=test-profile]", cid))) } @@ -2154,8 +2153,8 @@ func TestGetPodStatusNoSuchContainer(t *testing.T) { infraContainerID = "9876" ) dm, fakeDocker := newTestDockerManager() - pod := makePod("foo", &api.PodSpec{ - Containers: []api.Container{{Name: "nosuchcontainer"}}, + pod := makePod("foo", &v1.PodSpec{ + Containers: []v1.Container{{Name: "nosuchcontainer"}}, }) fakeDocker.SetFakeContainers([]*FakeContainer{ @@ -2191,8 +2190,8 @@ func TestGetPodStatusNoSuchContainer(t *testing.T) { func TestPruneInitContainers(t *testing.T) { dm, fake := newTestDockerManager() - pod := makePod("", &api.PodSpec{ - InitContainers: []api.Container{ + pod := makePod("", &v1.PodSpec{ + InitContainers: []v1.Container{ {Name: "init1"}, {Name: "init2"}, }, @@ -2223,7 +2222,7 @@ func TestPruneInitContainers(t *testing.T) { func TestGetPodStatusFromNetworkPlugin(t *testing.T) { cases := []struct { - pod *api.Pod + pod *v1.Pod fakePodIP string containerID string infraContainerID string @@ -2232,14 +2231,14 @@ func TestGetPodStatusFromNetworkPlugin(t *testing.T) { expectUnknown bool }{ { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, - Spec: api.PodSpec{ - Containers: []api.Container{{Name: "container"}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Name: "container"}}, }, }, fakePodIP: "10.10.10.10", @@ -2250,14 +2249,14 @@ func TestGetPodStatusFromNetworkPlugin(t *testing.T) { expectUnknown: false, }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, - Spec: api.PodSpec{ - Containers: []api.Container{{Name: "container"}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Name: "container"}}, }, }, fakePodIP: "", @@ -2335,8 +2334,8 @@ func TestSyncPodGetsPodIPFromNetworkPlugin(t *testing.T) { fnp := mock_network.NewMockNetworkPlugin(ctrl) dm.networkPlugin = fnp - pod := makePod("foo", &api.PodSpec{ - Containers: []api.Container{ + pod := makePod("foo", &v1.PodSpec{ + Containers: []v1.Container{ {Name: "bar"}, }, }) @@ -2387,12 +2386,12 @@ func TestContainerAndPodFromLabels(t *testing.T) { } } -func makePod(name string, spec *api.PodSpec) *api.Pod { +func makePod(name string, spec *v1.PodSpec) *v1.Pod { if spec == nil { - spec = &api.PodSpec{Containers: []api.Container{{Name: "foo"}, {Name: "bar"}}} + spec = &v1.PodSpec{Containers: []v1.Container{{Name: "foo"}, {Name: "bar"}}} } - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: name, Namespace: "new", diff --git a/pkg/kubelet/dockertools/docker_manager_unsupported.go b/pkg/kubelet/dockertools/docker_manager_unsupported.go index 8182784a161..be8e48f24d7 100644 --- a/pkg/kubelet/dockertools/docker_manager_unsupported.go +++ b/pkg/kubelet/dockertools/docker_manager_unsupported.go @@ -19,7 +19,7 @@ limitations under the License. package dockertools import ( - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" dockertypes "github.com/docker/engine-api/types" ) @@ -37,6 +37,6 @@ func containerProvidesPodIP(name *KubeletContainerName) bool { } // Returns nil as both Seccomp and AppArmor security options are not valid on Windows -func (dm *DockerManager) getSecurityOpts(pod *api.Pod, ctrName string) ([]dockerOpt, error) { +func (dm *DockerManager) getSecurityOpts(pod *v1.Pod, ctrName string) ([]dockerOpt, error) { return nil, nil } diff --git a/pkg/kubelet/dockertools/docker_manager_windows.go b/pkg/kubelet/dockertools/docker_manager_windows.go index cb32cc7aa72..5e1842302af 100644 --- a/pkg/kubelet/dockertools/docker_manager_windows.go +++ b/pkg/kubelet/dockertools/docker_manager_windows.go @@ -21,7 +21,7 @@ package dockertools import ( "os" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" dockertypes "github.com/docker/engine-api/types" ) @@ -53,6 +53,6 @@ func containerProvidesPodIP(name *KubeletContainerName) bool { } // Returns nil as both Seccomp and AppArmor security options are not valid on Windows -func (dm *DockerManager) getSecurityOpts(pod *api.Pod, ctrName string) ([]dockerOpt, error) { +func (dm *DockerManager) getSecurityOpts(pod *v1.Pod, ctrName string) ([]dockerOpt, error) { return nil, nil } diff --git a/pkg/kubelet/dockertools/docker_test.go b/pkg/kubelet/dockertools/docker_test.go index 0a0ba852738..7af13252226 100644 --- a/pkg/kubelet/dockertools/docker_test.go +++ b/pkg/kubelet/dockertools/docker_test.go @@ -33,7 +33,7 @@ import ( dockernat "github.com/docker/go-connections/nat" cadvisorapi "github.com/google/cadvisor/info/v1" "github.com/stretchr/testify/assert" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/credentialprovider" @@ -118,7 +118,7 @@ func TestGetContainerID(t *testing.T) { } func verifyPackUnpack(t *testing.T, podNamespace, podUID, podName, containerName string) { - container := &api.Container{Name: containerName} + container := &v1.Container{Name: containerName} hasher := adler32.New() hashutil.DeepHashObject(hasher, *container) computedHash := uint64(hasher.Sum32()) @@ -142,7 +142,7 @@ func TestContainerNaming(t *testing.T) { // No Container name verifyPackUnpack(t, "other", podUID, "name", "") - container := &api.Container{Name: "container"} + container := &v1.Container{Name: "container"} podName := "foo" podNamespace := "test" name := fmt.Sprintf("k8s_%s_%s_%s_%s_42", container.Name, podName, podNamespace, podUID) @@ -416,7 +416,7 @@ func TestPullWithNoSecrets(t *testing.T) { keyring: fakeKeyring, } - err := dp.Pull(test.imageName, []api.Secret{}) + err := dp.Pull(test.imageName, []v1.Secret{}) if err != nil { t.Errorf("unexpected non-nil err: %s", err) continue @@ -459,7 +459,7 @@ func TestPullWithJSONError(t *testing.T) { client: fakeClient, keyring: fakeKeyring, } - err := puller.Pull(test.imageName, []api.Secret{}) + err := puller.Pull(test.imageName, []v1.Secret{}) if err == nil || !strings.Contains(err.Error(), test.expectedError) { t.Errorf("%s: expect error %s, got : %s", i, test.expectedError, err) continue @@ -483,19 +483,19 @@ func TestPullWithSecrets(t *testing.T) { tests := map[string]struct { imageName string - passedSecrets []api.Secret + passedSecrets []v1.Secret builtInDockerConfig credentialprovider.DockerConfig expectedPulls []string }{ "no matching secrets": { "ubuntu", - []api.Secret{}, + []v1.Secret{}, credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{}), []string{"ubuntu using {}"}, }, "default keyring secrets": { "ubuntu", - []api.Secret{}, + []v1.Secret{}, credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{ "index.docker.io/v1/": {Username: "built-in", Password: "password", Email: "email", Provider: nil}, }), @@ -503,7 +503,7 @@ func TestPullWithSecrets(t *testing.T) { }, "default keyring secrets unused": { "ubuntu", - []api.Secret{}, + []v1.Secret{}, credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{ "extraneous": {Username: "built-in", Password: "password", Email: "email", Provider: nil}, }), @@ -511,7 +511,7 @@ func TestPullWithSecrets(t *testing.T) { }, "builtin keyring secrets, but use passed": { "ubuntu", - []api.Secret{{Type: api.SecretTypeDockercfg, Data: map[string][]byte{api.DockerConfigKey: dockercfgContent}}}, + []v1.Secret{{Type: v1.SecretTypeDockercfg, Data: map[string][]byte{v1.DockerConfigKey: dockercfgContent}}}, credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{ "index.docker.io/v1/": {Username: "built-in", Password: "password", Email: "email", Provider: nil}, }), @@ -519,7 +519,7 @@ func TestPullWithSecrets(t *testing.T) { }, "builtin keyring secrets, but use passed with new docker config": { "ubuntu", - []api.Secret{{Type: api.SecretTypeDockerConfigJson, Data: map[string][]byte{api.DockerConfigJsonKey: dockerConfigJsonContent}}}, + []v1.Secret{{Type: v1.SecretTypeDockerConfigJson, Data: map[string][]byte{v1.DockerConfigJsonKey: dockerConfigJsonContent}}}, credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{ "index.docker.io/v1/": {Username: "built-in", Password: "password", Email: "email", Provider: nil}, }), @@ -564,7 +564,7 @@ func TestDockerKeyringLookupFails(t *testing.T) { keyring: fakeKeyring, } - err := dp.Pull("host/repository/image:version", []api.Secret{}) + err := dp.Pull("host/repository/image:version", []v1.Secret{}) if err == nil { t.Errorf("unexpected non-error") } @@ -908,7 +908,7 @@ func TestFindContainersByPod(t *testing.T) { } func TestMakePortsAndBindings(t *testing.T) { - portMapping := func(container, host int, protocol api.Protocol, ip string) kubecontainer.PortMapping { + portMapping := func(container, host int, protocol v1.Protocol, ip string) kubecontainer.PortMapping { return kubecontainer.PortMapping{ ContainerPort: container, HostPort: host, diff --git a/pkg/kubelet/dockertools/fake_docker_client.go b/pkg/kubelet/dockertools/fake_docker_client.go index 0c538c50e74..e26b4c2a084 100644 --- a/pkg/kubelet/dockertools/fake_docker_client.go +++ b/pkg/kubelet/dockertools/fake_docker_client.go @@ -30,7 +30,7 @@ import ( dockercontainer "github.com/docker/engine-api/types/container" "k8s.io/kubernetes/pkg/util/clock" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) type calledDetail struct { @@ -580,7 +580,7 @@ type FakeDockerPuller struct { } // Pull records the image pull attempt, and optionally injects an error. -func (f *FakeDockerPuller) Pull(image string, secrets []api.Secret) (err error) { +func (f *FakeDockerPuller) Pull(image string, secrets []v1.Secret) (err error) { f.Lock() defer f.Unlock() f.ImagesPulled = append(f.ImagesPulled, image) diff --git a/pkg/kubelet/dockertools/fake_manager.go b/pkg/kubelet/dockertools/fake_manager.go index 1bb5cf59b82..a9538e6abef 100644 --- a/pkg/kubelet/dockertools/fake_manager.go +++ b/pkg/kubelet/dockertools/fake_manager.go @@ -18,7 +18,7 @@ package dockertools import ( cadvisorapi "github.com/google/cadvisor/info/v1" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/network" @@ -65,14 +65,14 @@ func NewFakeDockerManager( } type fakePodGetter struct { - pods map[types.UID]*api.Pod + pods map[types.UID]*v1.Pod } func newFakePodGetter() *fakePodGetter { - return &fakePodGetter{make(map[types.UID]*api.Pod)} + return &fakePodGetter{make(map[types.UID]*v1.Pod)} } -func (f *fakePodGetter) GetPodByUID(uid types.UID) (*api.Pod, bool) { +func (f *fakePodGetter) GetPodByUID(uid types.UID) (*v1.Pod, bool) { pod, found := f.pods[uid] return pod, found } diff --git a/pkg/kubelet/dockertools/kube_docker_client.go b/pkg/kubelet/dockertools/kube_docker_client.go index e6d140cf748..6322148d45f 100644 --- a/pkg/kubelet/dockertools/kube_docker_client.go +++ b/pkg/kubelet/dockertools/kube_docker_client.go @@ -161,7 +161,7 @@ func (d *kubeDockerClient) StartContainer(id string) error { return err } -// Stopping an already stopped container will not cause an error in engine-api. +// Stopping an already stopped container will not cause an error in engine-v1. func (d *kubeDockerClient) StopContainer(id string, timeout int) error { ctx, cancel := d.getCustomTimeoutContext(time.Duration(timeout) * time.Second) defer cancel() diff --git a/pkg/kubelet/dockertools/labels.go b/pkg/kubelet/dockertools/labels.go index 5935e68e0b1..be8cfd9777c 100644 --- a/pkg/kubelet/dockertools/labels.go +++ b/pkg/kubelet/dockertools/labels.go @@ -22,6 +22,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/custommetrics" "k8s.io/kubernetes/pkg/kubelet/types" @@ -62,11 +63,11 @@ type labelledContainerInfo struct { Hash string RestartCount int TerminationMessagePath string - PreStopHandler *api.Handler - Ports []api.ContainerPort + PreStopHandler *v1.Handler + Ports []v1.ContainerPort } -func newLabels(container *api.Container, pod *api.Pod, restartCount int, enableCustomMetrics bool) map[string]string { +func newLabels(container *v1.Container, pod *v1.Pod, restartCount int, enableCustomMetrics bool) map[string]string { labels := map[string]string{} labels[types.KubernetesPodNameLabel] = pod.Name labels[types.KubernetesPodNamespaceLabel] = pod.Namespace @@ -128,13 +129,13 @@ func getContainerInfoFromLabel(labels map[string]string) *labelledContainerInfo if containerInfo.PodTerminationGracePeriod, err = getInt64PointerFromLabel(labels, kubernetesPodTerminationGracePeriodLabel); err != nil { logError(containerInfo, kubernetesPodTerminationGracePeriodLabel, err) } - preStopHandler := &api.Handler{} + preStopHandler := &v1.Handler{} if found, err := getJsonObjectFromLabel(labels, kubernetesContainerPreStopHandlerLabel, preStopHandler); err != nil { logError(containerInfo, kubernetesContainerPreStopHandlerLabel, err) } else if found { containerInfo.PreStopHandler = preStopHandler } - containerPorts := []api.ContainerPort{} + containerPorts := []v1.ContainerPort{} if found, err := getJsonObjectFromLabel(labels, kubernetesContainerPortsLabel, &containerPorts); err != nil { logError(containerInfo, kubernetesContainerPortsLabel, err) } else if found { @@ -192,7 +193,7 @@ func getJsonObjectFromLabel(labels map[string]string, label string, value interf return false, nil } -// The label kubernetesPodLabel is added a long time ago (#7421), it serialized the whole api.Pod to a docker label. +// The label kubernetesPodLabel is added a long time ago (#7421), it serialized the whole v1.Pod to a docker label. // We want to remove this label because it serialized too much useless information. However kubelet may still work // with old containers which only have this label for a long time until we completely deprecate the old label. // Before that to ensure correctness we have to supply information with the old labels when newly added labels @@ -200,15 +201,15 @@ func getJsonObjectFromLabel(labels map[string]string, label string, value interf // TODO(random-liu): Remove this function when we can completely remove label kubernetesPodLabel, probably after // dropping support for v1.1. func supplyContainerInfoWithOldLabel(labels map[string]string, containerInfo *labelledContainerInfo) { - // Get api.Pod from old label - var pod *api.Pod + // Get v1.Pod from old label + var pod *v1.Pod data, found := labels[kubernetesPodLabel] if !found { // Don't report any error here, because it's normal that a container has no pod label, especially // when we gradually deprecate the old label return } - pod = &api.Pod{} + pod = &v1.Pod{} if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), []byte(data), pod); err != nil { // If the pod label can't be parsed, we should report an error logError(containerInfo, kubernetesPodLabel, err) @@ -221,8 +222,8 @@ func supplyContainerInfoWithOldLabel(labels map[string]string, containerInfo *la containerInfo.PodTerminationGracePeriod = pod.Spec.TerminationGracePeriodSeconds } - // Get api.Container from api.Pod - var container *api.Container + // Get v1.Container from v1.Pod + var container *v1.Container for i := range pod.Spec.Containers { if pod.Spec.Containers[i].Name == containerInfo.Name { container = &pod.Spec.Containers[i] diff --git a/pkg/kubelet/dockertools/labels_test.go b/pkg/kubelet/dockertools/labels_test.go index 7b763ed5383..86b97958642 100644 --- a/pkg/kubelet/dockertools/labels_test.go +++ b/pkg/kubelet/dockertools/labels_test.go @@ -21,8 +21,8 @@ import ( "strconv" "testing" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/runtime" @@ -33,52 +33,52 @@ func TestLabels(t *testing.T) { restartCount := 5 deletionGracePeriod := int64(10) terminationGracePeriod := int64(10) - lifecycle := &api.Lifecycle{ + lifecycle := &v1.Lifecycle{ // Left PostStart as nil - PreStop: &api.Handler{ - Exec: &api.ExecAction{ + PreStop: &v1.Handler{ + Exec: &v1.ExecAction{ Command: []string{"action1", "action2"}, }, - HTTPGet: &api.HTTPGetAction{ + HTTPGet: &v1.HTTPGetAction{ Path: "path", Host: "host", Port: intstr.FromInt(8080), Scheme: "scheme", }, - TCPSocket: &api.TCPSocketAction{ + TCPSocket: &v1.TCPSocketAction{ Port: intstr.FromString("80"), }, }, } - containerPorts := []api.ContainerPort{ + containerPorts := []v1.ContainerPort{ { Name: "http", HostPort: 80, ContainerPort: 8080, - Protocol: api.ProtocolTCP, + Protocol: v1.ProtocolTCP, }, { Name: "https", HostPort: 443, ContainerPort: 6443, - Protocol: api.ProtocolTCP, + Protocol: v1.ProtocolTCP, }, } - container := &api.Container{ + container := &v1.Container{ Name: "test_container", Ports: containerPorts, TerminationMessagePath: "/somepath", Lifecycle: lifecycle, } - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "test_pod", Namespace: "test_pod_namespace", UID: "test_pod_uid", DeletionGracePeriodSeconds: &deletionGracePeriod, }, - Spec: api.PodSpec{ - Containers: []api.Container{*container}, + Spec: v1.PodSpec{ + Containers: []v1.Container{*container}, TerminationGracePeriodSeconds: &terminationGracePeriod, }, } diff --git a/pkg/kubelet/envvars/envvars.go b/pkg/kubelet/envvars/envvars.go index 883acf61943..713480e162c 100644 --- a/pkg/kubelet/envvars/envvars.go +++ b/pkg/kubelet/envvars/envvars.go @@ -21,36 +21,36 @@ import ( "strconv" "strings" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) // FromServices builds environment variables that a container is started with, // which tell the container where to find the services it may need, which are // provided as an argument. -func FromServices(services []*api.Service) []api.EnvVar { - var result []api.EnvVar +func FromServices(services []*v1.Service) []v1.EnvVar { + var result []v1.EnvVar for i := range services { service := services[i] // ignore services where ClusterIP is "None" or empty // the services passed to this method should be pre-filtered // only services that have the cluster IP set should be included here - if !api.IsServiceIPSet(service) { + if !v1.IsServiceIPSet(service) { continue } // Host name := makeEnvVariableName(service.Name) + "_SERVICE_HOST" - result = append(result, api.EnvVar{Name: name, Value: service.Spec.ClusterIP}) + result = append(result, v1.EnvVar{Name: name, Value: service.Spec.ClusterIP}) // First port - give it the backwards-compatible name name = makeEnvVariableName(service.Name) + "_SERVICE_PORT" - result = append(result, api.EnvVar{Name: name, Value: strconv.Itoa(int(service.Spec.Ports[0].Port))}) + result = append(result, v1.EnvVar{Name: name, Value: strconv.Itoa(int(service.Spec.Ports[0].Port))}) // All named ports (only the first may be unnamed, checked in validation) for i := range service.Spec.Ports { sp := &service.Spec.Ports[i] if sp.Name != "" { pn := name + "_" + makeEnvVariableName(sp.Name) - result = append(result, api.EnvVar{Name: pn, Value: strconv.Itoa(int(sp.Port))}) + result = append(result, v1.EnvVar{Name: pn, Value: strconv.Itoa(int(sp.Port))}) } } // Docker-compatible vars. @@ -67,25 +67,25 @@ func makeEnvVariableName(str string) string { return strings.ToUpper(strings.Replace(str, "-", "_", -1)) } -func makeLinkVariables(service *api.Service) []api.EnvVar { +func makeLinkVariables(service *v1.Service) []v1.EnvVar { prefix := makeEnvVariableName(service.Name) - all := []api.EnvVar{} + all := []v1.EnvVar{} for i := range service.Spec.Ports { sp := &service.Spec.Ports[i] - protocol := string(api.ProtocolTCP) + protocol := string(v1.ProtocolTCP) if sp.Protocol != "" { protocol = string(sp.Protocol) } if i == 0 { // Docker special-cases the first port. - all = append(all, api.EnvVar{ + all = append(all, v1.EnvVar{ Name: prefix + "_PORT", Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.ClusterIP, sp.Port), }) } portPrefix := fmt.Sprintf("%s_PORT_%d_%s", prefix, sp.Port, strings.ToUpper(protocol)) - all = append(all, []api.EnvVar{ + all = append(all, []v1.EnvVar{ { Name: portPrefix, Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.ClusterIP, sp.Port), diff --git a/pkg/kubelet/envvars/envvars_test.go b/pkg/kubelet/envvars/envvars_test.go index 9a3876b6f88..1a580136097 100644 --- a/pkg/kubelet/envvars/envvars_test.go +++ b/pkg/kubelet/envvars/envvars_test.go @@ -20,67 +20,67 @@ import ( "reflect" "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubelet/envvars" ) func TestFromServices(t *testing.T) { - sl := []*api.Service{ + sl := []*v1.Service{ { - ObjectMeta: api.ObjectMeta{Name: "foo-bar"}, - Spec: api.ServiceSpec{ + ObjectMeta: v1.ObjectMeta{Name: "foo-bar"}, + Spec: v1.ServiceSpec{ Selector: map[string]string{"bar": "baz"}, ClusterIP: "1.2.3.4", - Ports: []api.ServicePort{ + Ports: []v1.ServicePort{ {Port: 8080, Protocol: "TCP"}, }, }, }, { - ObjectMeta: api.ObjectMeta{Name: "abc-123"}, - Spec: api.ServiceSpec{ + ObjectMeta: v1.ObjectMeta{Name: "abc-123"}, + Spec: v1.ServiceSpec{ Selector: map[string]string{"bar": "baz"}, ClusterIP: "5.6.7.8", - Ports: []api.ServicePort{ + Ports: []v1.ServicePort{ {Name: "u-d-p", Port: 8081, Protocol: "UDP"}, {Name: "t-c-p", Port: 8081, Protocol: "TCP"}, }, }, }, { - ObjectMeta: api.ObjectMeta{Name: "q-u-u-x"}, - Spec: api.ServiceSpec{ + ObjectMeta: v1.ObjectMeta{Name: "q-u-u-x"}, + Spec: v1.ServiceSpec{ Selector: map[string]string{"bar": "baz"}, ClusterIP: "9.8.7.6", - Ports: []api.ServicePort{ + Ports: []v1.ServicePort{ {Port: 8082, Protocol: "TCP"}, {Name: "8083", Port: 8083, Protocol: "TCP"}, }, }, }, { - ObjectMeta: api.ObjectMeta{Name: "svrc-clusterip-none"}, - Spec: api.ServiceSpec{ + ObjectMeta: v1.ObjectMeta{Name: "svrc-clusterip-none"}, + Spec: v1.ServiceSpec{ Selector: map[string]string{"bar": "baz"}, ClusterIP: "None", - Ports: []api.ServicePort{ + Ports: []v1.ServicePort{ {Port: 8082, Protocol: "TCP"}, }, }, }, { - ObjectMeta: api.ObjectMeta{Name: "svrc-clusterip-empty"}, - Spec: api.ServiceSpec{ + ObjectMeta: v1.ObjectMeta{Name: "svrc-clusterip-empty"}, + Spec: v1.ServiceSpec{ Selector: map[string]string{"bar": "baz"}, ClusterIP: "", - Ports: []api.ServicePort{ + Ports: []v1.ServicePort{ {Port: 8082, Protocol: "TCP"}, }, }, }, } vars := envvars.FromServices(sl) - expected := []api.EnvVar{ + expected := []v1.EnvVar{ {Name: "FOO_BAR_SERVICE_HOST", Value: "1.2.3.4"}, {Name: "FOO_BAR_SERVICE_PORT", Value: "8080"}, {Name: "FOO_BAR_PORT", Value: "tcp://1.2.3.4:8080"}, diff --git a/pkg/kubelet/eviction/eviction_manager.go b/pkg/kubelet/eviction/eviction_manager.go index 0e98156493b..58bf12cb1bb 100644 --- a/pkg/kubelet/eviction/eviction_manager.go +++ b/pkg/kubelet/eviction/eviction_manager.go @@ -23,8 +23,8 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/lifecycle" @@ -48,11 +48,11 @@ type managerImpl struct { // protects access to internal state sync.RWMutex // node conditions are the set of conditions present - nodeConditions []api.NodeConditionType + nodeConditions []v1.NodeConditionType // captures when a node condition was last observed based on a threshold being met nodeConditionsLastObservedAt nodeConditionsObservedAt // nodeRef is a reference to the node - nodeRef *api.ObjectReference + nodeRef *v1.ObjectReference // used to record events about the node recorder record.EventRecorder // used to measure usage stats on system @@ -62,9 +62,9 @@ type managerImpl struct { // records the set of thresholds that have been met (including graceperiod) but not yet resolved thresholdsMet []Threshold // resourceToRankFunc maps a resource to ranking function for that resource. - resourceToRankFunc map[api.ResourceName]rankFunc + resourceToRankFunc map[v1.ResourceName]rankFunc // resourceToNodeReclaimFuncs maps a resource to an ordered list of functions that know how to reclaim that resource. - resourceToNodeReclaimFuncs map[api.ResourceName]nodeReclaimFuncs + resourceToNodeReclaimFuncs map[v1.ResourceName]nodeReclaimFuncs // last observations from synchronize lastObservations signalObservations // notifiersInitialized indicates if the threshold notifiers have been initialized (i.e. synchronize() has been called once) @@ -81,7 +81,7 @@ func NewManager( killPodFunc KillPodFunc, imageGC ImageGC, recorder record.EventRecorder, - nodeRef *api.ObjectReference, + nodeRef *v1.ObjectReference, clock clock.Clock) (Manager, lifecycle.PodAdmitHandler, error) { manager := &managerImpl{ clock: clock, @@ -106,7 +106,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd } // the node has memory pressure, admit if not best-effort - if hasNodeCondition(m.nodeConditions, api.NodeMemoryPressure) { + if hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure) { notBestEffort := qos.BestEffort != qos.GetPodQOS(attrs.Pod) if notBestEffort { return lifecycle.PodAdmitResult{Admit: true} @@ -133,14 +133,14 @@ func (m *managerImpl) Start(diskInfoProvider DiskInfoProvider, podFunc ActivePod func (m *managerImpl) IsUnderMemoryPressure() bool { m.RLock() defer m.RUnlock() - return hasNodeCondition(m.nodeConditions, api.NodeMemoryPressure) + return hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure) } // IsUnderDiskPressure returns true if the node is under disk pressure. func (m *managerImpl) IsUnderDiskPressure() bool { m.RLock() defer m.RUnlock() - return hasNodeCondition(m.nodeConditions, api.NodeDiskPressure) + return hasNodeCondition(m.nodeConditions, v1.NodeDiskPressure) } func startMemoryThresholdNotifier(thresholds []Threshold, observations signalObservations, hard bool, handler thresholdNotifierHandlerFunc) error { @@ -278,7 +278,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act softEviction := isSoftEvictionThresholds(thresholds, resourceToReclaim) // record an event about the resources we are now attempting to reclaim via eviction - m.recorder.Eventf(m.nodeRef, api.EventTypeWarning, "EvictionThresholdMet", "Attempting to reclaim %s", resourceToReclaim) + m.recorder.Eventf(m.nodeRef, v1.EventTypeWarning, "EvictionThresholdMet", "Attempting to reclaim %s", resourceToReclaim) // check if there are node-level resources we can reclaim to reduce pressure before evicting end-user pods. if m.reclaimNodeLevelResources(resourceToReclaim, observations) { @@ -310,13 +310,13 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act // we kill at most a single pod during each eviction interval for i := range activePods { pod := activePods[i] - status := api.PodStatus{ - Phase: api.PodFailed, + status := v1.PodStatus{ + Phase: v1.PodFailed, Message: fmt.Sprintf(message, resourceToReclaim), Reason: reason, } // record that we are evicting the pod - m.recorder.Eventf(pod, api.EventTypeWarning, reason, fmt.Sprintf(message, resourceToReclaim)) + m.recorder.Eventf(pod, v1.EventTypeWarning, reason, fmt.Sprintf(message, resourceToReclaim)) gracePeriodOverride := int64(0) if softEviction { gracePeriodOverride = m.config.MaxPodGracePeriodSeconds @@ -335,7 +335,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act } // reclaimNodeLevelResources attempts to reclaim node level resources. returns true if thresholds were satisfied and no pod eviction is required. -func (m *managerImpl) reclaimNodeLevelResources(resourceToReclaim api.ResourceName, observations signalObservations) bool { +func (m *managerImpl) reclaimNodeLevelResources(resourceToReclaim v1.ResourceName, observations signalObservations) bool { nodeReclaimFuncs := m.resourceToNodeReclaimFuncs[resourceToReclaim] for _, nodeReclaimFunc := range nodeReclaimFuncs { // attempt to reclaim the pressured resource. diff --git a/pkg/kubelet/eviction/eviction_manager_test.go b/pkg/kubelet/eviction/eviction_manager_test.go index eb40c82c043..a54baaa318f 100644 --- a/pkg/kubelet/eviction/eviction_manager_test.go +++ b/pkg/kubelet/eviction/eviction_manager_test.go @@ -20,8 +20,8 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" statsapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/lifecycle" @@ -31,13 +31,13 @@ import ( // mockPodKiller is used to testing which pod is killed type mockPodKiller struct { - pod *api.Pod - status api.PodStatus + pod *v1.Pod + status v1.PodStatus gracePeriodOverride *int64 } // killPodNow records the pod that was killed -func (m *mockPodKiller) killPodNow(pod *api.Pod, status api.PodStatus, gracePeriodOverride *int64) error { +func (m *mockPodKiller) killPodNow(pod *v1.Pod, status v1.PodStatus, gracePeriodOverride *int64) error { m.pod = pod m.status = status m.gracePeriodOverride = gracePeriodOverride @@ -67,23 +67,23 @@ func (m *mockImageGC) DeleteUnusedImages() (int64, error) { return m.freed, m.err } -func makePodWithMemoryStats(name string, requests api.ResourceList, limits api.ResourceList, memoryWorkingSet string) (*api.Pod, statsapi.PodStats) { - pod := newPod(name, []api.Container{ +func makePodWithMemoryStats(name string, requests v1.ResourceList, limits v1.ResourceList, memoryWorkingSet string) (*v1.Pod, statsapi.PodStats) { + pod := newPod(name, []v1.Container{ newContainer(name, requests, limits), }, nil) podStats := newPodMemoryStats(pod, resource.MustParse(memoryWorkingSet)) return pod, podStats } -func makePodWithDiskStats(name string, requests api.ResourceList, limits api.ResourceList, rootFsUsed, logsUsed, perLocalVolumeUsed string) (*api.Pod, statsapi.PodStats) { - pod := newPod(name, []api.Container{ +func makePodWithDiskStats(name string, requests v1.ResourceList, limits v1.ResourceList, rootFsUsed, logsUsed, perLocalVolumeUsed string) (*v1.Pod, statsapi.PodStats) { + pod := newPod(name, []v1.Container{ newContainer(name, requests, limits), }, nil) podStats := newPodDiskStats(pod, parseQuantity(rootFsUsed), parseQuantity(logsUsed), parseQuantity(perLocalVolumeUsed)) return pod, podStats } -func makeMemoryStats(nodeAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary { +func makeMemoryStats(nodeAvailableBytes string, podStats map[*v1.Pod]statsapi.PodStats) *statsapi.Summary { val := resource.MustParse(nodeAvailableBytes) availableBytes := uint64(val.Value()) WorkingSetBytes := uint64(val.Value()) @@ -102,7 +102,7 @@ func makeMemoryStats(nodeAvailableBytes string, podStats map[*api.Pod]statsapi.P return result } -func makeDiskStats(rootFsAvailableBytes, imageFsAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary { +func makeDiskStats(rootFsAvailableBytes, imageFsAvailableBytes string, podStats map[*v1.Pod]statsapi.PodStats) *statsapi.Summary { rootFsVal := resource.MustParse(rootFsAvailableBytes) rootFsBytes := uint64(rootFsVal.Value()) rootFsCapacityBytes := uint64(rootFsVal.Value() * 2) @@ -132,8 +132,8 @@ func makeDiskStats(rootFsAvailableBytes, imageFsAvailableBytes string, podStats type podToMake struct { name string - requests api.ResourceList - limits api.ResourceList + requests v1.ResourceList + limits v1.ResourceList memoryWorkingSet string rootFsUsed string logsFsUsed string @@ -155,15 +155,15 @@ func TestMemoryPressure(t *testing.T) { {name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "300Mi"}, {name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "500Mi"}, } - pods := []*api.Pod{} - podStats := map[*api.Pod]statsapi.PodStats{} + pods := []*v1.Pod{} + podStats := map[*v1.Pod]statsapi.PodStats{} for _, podToMake := range podsToMake { pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.memoryWorkingSet) pods = append(pods, pod) podStats[pod] = podStat } podToEvict := pods[5] - activePodsFunc := func() []*api.Pod { + activePodsFunc := func() []*v1.Pod { return pods } @@ -171,7 +171,7 @@ func TestMemoryPressure(t *testing.T) { podKiller := &mockPodKiller{} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} imageGC := &mockImageGC{freed: int64(0), err: nil} - nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} + nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} config := Config{ MaxPodGracePeriodSeconds: 5, @@ -221,7 +221,7 @@ func TestMemoryPressure(t *testing.T) { // try to admit our pods (they should succeed) expected := []bool{true, true} - for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { + for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit { t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit) } @@ -298,7 +298,7 @@ func TestMemoryPressure(t *testing.T) { // the best-effort pod should not admit, burstable should expected = []bool{false, true} - for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { + for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit { t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit) } @@ -322,7 +322,7 @@ func TestMemoryPressure(t *testing.T) { // the best-effort pod should not admit, burstable should expected = []bool{false, true} - for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { + for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit { t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit) } @@ -346,7 +346,7 @@ func TestMemoryPressure(t *testing.T) { // all pods should admit now expected = []bool{true, true} - for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { + for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit { t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit) } @@ -372,15 +372,15 @@ func TestDiskPressureNodeFs(t *testing.T) { {name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), perLocalVolumeUsed: "300Mi"}, {name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), rootFsUsed: "500Mi"}, } - pods := []*api.Pod{} - podStats := map[*api.Pod]statsapi.PodStats{} + pods := []*v1.Pod{} + podStats := map[*v1.Pod]statsapi.PodStats{} for _, podToMake := range podsToMake { pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed) pods = append(pods, pod) podStats[pod] = podStat } podToEvict := pods[5] - activePodsFunc := func() []*api.Pod { + activePodsFunc := func() []*v1.Pod { return pods } @@ -388,7 +388,7 @@ func TestDiskPressureNodeFs(t *testing.T) { podKiller := &mockPodKiller{} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} imageGC := &mockImageGC{freed: int64(0), err: nil} - nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} + nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} config := Config{ MaxPodGracePeriodSeconds: 5, @@ -569,15 +569,15 @@ func TestMinReclaim(t *testing.T) { {name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "300Mi"}, {name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "500Mi"}, } - pods := []*api.Pod{} - podStats := map[*api.Pod]statsapi.PodStats{} + pods := []*v1.Pod{} + podStats := map[*v1.Pod]statsapi.PodStats{} for _, podToMake := range podsToMake { pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.memoryWorkingSet) pods = append(pods, pod) podStats[pod] = podStat } podToEvict := pods[5] - activePodsFunc := func() []*api.Pod { + activePodsFunc := func() []*v1.Pod { return pods } @@ -585,7 +585,7 @@ func TestMinReclaim(t *testing.T) { podKiller := &mockPodKiller{} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} imageGC := &mockImageGC{freed: int64(0), err: nil} - nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} + nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} config := Config{ MaxPodGracePeriodSeconds: 5, @@ -707,15 +707,15 @@ func TestNodeReclaimFuncs(t *testing.T) { {name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), rootFsUsed: "300Mi"}, {name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), rootFsUsed: "500Mi"}, } - pods := []*api.Pod{} - podStats := map[*api.Pod]statsapi.PodStats{} + pods := []*v1.Pod{} + podStats := map[*v1.Pod]statsapi.PodStats{} for _, podToMake := range podsToMake { pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed) pods = append(pods, pod) podStats[pod] = podStat } podToEvict := pods[5] - activePodsFunc := func() []*api.Pod { + activePodsFunc := func() []*v1.Pod { return pods } @@ -724,7 +724,7 @@ func TestNodeReclaimFuncs(t *testing.T) { diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} imageGcFree := resource.MustParse("700Mi") imageGC := &mockImageGC{freed: imageGcFree.Value(), err: nil} - nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} + nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} config := Config{ MaxPodGracePeriodSeconds: 5, @@ -866,14 +866,14 @@ func TestNodeReclaimFuncs(t *testing.T) { } func TestInodePressureNodeFsInodes(t *testing.T) { - podMaker := func(name string, requests api.ResourceList, limits api.ResourceList, rootInodes, logInodes, volumeInodes string) (*api.Pod, statsapi.PodStats) { - pod := newPod(name, []api.Container{ + podMaker := func(name string, requests v1.ResourceList, limits v1.ResourceList, rootInodes, logInodes, volumeInodes string) (*v1.Pod, statsapi.PodStats) { + pod := newPod(name, []v1.Container{ newContainer(name, requests, limits), }, nil) podStats := newPodInodeStats(pod, parseQuantity(rootInodes), parseQuantity(logInodes), parseQuantity(volumeInodes)) return pod, podStats } - summaryStatsMaker := func(rootFsInodesFree, rootFsInodes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary { + summaryStatsMaker := func(rootFsInodesFree, rootFsInodes string, podStats map[*v1.Pod]statsapi.PodStats) *statsapi.Summary { rootFsInodesFreeVal := resource.MustParse(rootFsInodesFree) internalRootFsInodesFree := uint64(rootFsInodesFreeVal.Value()) rootFsInodesVal := resource.MustParse(rootFsInodes) @@ -900,15 +900,15 @@ func TestInodePressureNodeFsInodes(t *testing.T) { {name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), rootFsInodesUsed: "300Mi"}, {name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), rootFsInodesUsed: "800Mi"}, } - pods := []*api.Pod{} - podStats := map[*api.Pod]statsapi.PodStats{} + pods := []*v1.Pod{} + podStats := map[*v1.Pod]statsapi.PodStats{} for _, podToMake := range podsToMake { pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.rootFsInodesUsed, podToMake.logsFsInodesUsed, podToMake.perLocalVolumeInodesUsed) pods = append(pods, pod) podStats[pod] = podStat } podToEvict := pods[5] - activePodsFunc := func() []*api.Pod { + activePodsFunc := func() []*v1.Pod { return pods } @@ -916,7 +916,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) { podKiller := &mockPodKiller{} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} imageGC := &mockImageGC{freed: int64(0), err: nil} - nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} + nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} config := Config{ MaxPodGracePeriodSeconds: 5, diff --git a/pkg/kubelet/eviction/helpers.go b/pkg/kubelet/eviction/helpers.go index b6bb414241a..b98a3ef29c4 100644 --- a/pkg/kubelet/eviction/helpers.go +++ b/pkg/kubelet/eviction/helpers.go @@ -26,6 +26,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" statsapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/server/stats" @@ -40,45 +41,45 @@ const ( // the message associated with the reason. message = "The node was low on resource: %v." // disk, in bytes. internal to this module, used to account for local disk usage. - resourceDisk api.ResourceName = "disk" + resourceDisk v1.ResourceName = "disk" // inodes, number. internal to this module, used to account for local disk inode consumption. - resourceInodes api.ResourceName = "inodes" + resourceInodes v1.ResourceName = "inodes" // imagefs, in bytes. internal to this module, used to account for local image filesystem usage. - resourceImageFs api.ResourceName = "imagefs" + resourceImageFs v1.ResourceName = "imagefs" // imagefs inodes, number. internal to this module, used to account for local image filesystem inodes. - resourceImageFsInodes api.ResourceName = "imagefsInodes" + resourceImageFsInodes v1.ResourceName = "imagefsInodes" // nodefs, in bytes. internal to this module, used to account for local node root filesystem usage. - resourceNodeFs api.ResourceName = "nodefs" + resourceNodeFs v1.ResourceName = "nodefs" // nodefs inodes, number. internal to this module, used to account for local node root filesystem inodes. - resourceNodeFsInodes api.ResourceName = "nodefsInodes" + resourceNodeFsInodes v1.ResourceName = "nodefsInodes" ) var ( // signalToNodeCondition maps a signal to the node condition to report if threshold is met. - signalToNodeCondition map[Signal]api.NodeConditionType + signalToNodeCondition map[Signal]v1.NodeConditionType // signalToResource maps a Signal to its associated Resource. - signalToResource map[Signal]api.ResourceName + signalToResource map[Signal]v1.ResourceName // resourceToSignal maps a Resource to its associated Signal - resourceToSignal map[api.ResourceName]Signal + resourceToSignal map[v1.ResourceName]Signal ) func init() { // map eviction signals to node conditions - signalToNodeCondition = map[Signal]api.NodeConditionType{} - signalToNodeCondition[SignalMemoryAvailable] = api.NodeMemoryPressure - signalToNodeCondition[SignalImageFsAvailable] = api.NodeDiskPressure - signalToNodeCondition[SignalNodeFsAvailable] = api.NodeDiskPressure - signalToNodeCondition[SignalImageFsInodesFree] = api.NodeDiskPressure - signalToNodeCondition[SignalNodeFsInodesFree] = api.NodeDiskPressure + signalToNodeCondition = map[Signal]v1.NodeConditionType{} + signalToNodeCondition[SignalMemoryAvailable] = v1.NodeMemoryPressure + signalToNodeCondition[SignalImageFsAvailable] = v1.NodeDiskPressure + signalToNodeCondition[SignalNodeFsAvailable] = v1.NodeDiskPressure + signalToNodeCondition[SignalImageFsInodesFree] = v1.NodeDiskPressure + signalToNodeCondition[SignalNodeFsInodesFree] = v1.NodeDiskPressure // map signals to resources (and vice-versa) - signalToResource = map[Signal]api.ResourceName{} - signalToResource[SignalMemoryAvailable] = api.ResourceMemory + signalToResource = map[Signal]v1.ResourceName{} + signalToResource[SignalMemoryAvailable] = v1.ResourceMemory signalToResource[SignalImageFsAvailable] = resourceImageFs signalToResource[SignalImageFsInodesFree] = resourceImageFsInodes signalToResource[SignalNodeFsAvailable] = resourceNodeFs signalToResource[SignalNodeFsInodesFree] = resourceNodeFsInodes - resourceToSignal = map[api.ResourceName]Signal{} + resourceToSignal = map[v1.ResourceName]Signal{} for key, value := range signalToResource { resourceToSignal[value] = key } @@ -337,11 +338,11 @@ func memoryUsage(memStats *statsapi.MemoryStats) *resource.Quantity { // localVolumeNames returns the set of volumes for the pod that are local // TODO: sumamry API should report what volumes consume local storage rather than hard-code here. -func localVolumeNames(pod *api.Pod) []string { +func localVolumeNames(pod *v1.Pod) []string { result := []string{} for _, volume := range pod.Spec.Volumes { if volume.HostPath != nil || - (volume.EmptyDir != nil && volume.EmptyDir.Medium != api.StorageMediumMemory) || + (volume.EmptyDir != nil && volume.EmptyDir.Medium != v1.StorageMediumMemory) || volume.ConfigMap != nil || volume.GitRepo != nil { result = append(result, volume.Name) @@ -351,7 +352,7 @@ func localVolumeNames(pod *api.Pod) []string { } // podDiskUsage aggregates pod disk usage and inode consumption for the specified stats to measure. -func podDiskUsage(podStats statsapi.PodStats, pod *api.Pod, statsToMeasure []fsStatsType) (api.ResourceList, error) { +func podDiskUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsStatsType) (v1.ResourceList, error) { disk := resource.Quantity{Format: resource.BinarySI} inodes := resource.Quantity{Format: resource.BinarySI} for _, container := range podStats.Containers { @@ -376,14 +377,14 @@ func podDiskUsage(podStats statsapi.PodStats, pod *api.Pod, statsToMeasure []fsS } } } - return api.ResourceList{ + return v1.ResourceList{ resourceDisk: disk, resourceInodes: inodes, }, nil } // podMemoryUsage aggregates pod memory usage. -func podMemoryUsage(podStats statsapi.PodStats) (api.ResourceList, error) { +func podMemoryUsage(podStats statsapi.PodStats) (v1.ResourceList, error) { disk := resource.Quantity{Format: resource.BinarySI} memory := resource.Quantity{Format: resource.BinarySI} for _, container := range podStats.Containers { @@ -394,9 +395,9 @@ func podMemoryUsage(podStats statsapi.PodStats) (api.ResourceList, error) { // memory usage (if known) memory.Add(*memoryUsage(container.Memory)) } - return api.ResourceList{ - api.ResourceMemory: memory, - resourceDisk: disk, + return v1.ResourceList{ + v1.ResourceMemory: memory, + resourceDisk: disk, }, nil } @@ -419,7 +420,7 @@ func cachedStatsFunc(podStats []statsapi.PodStats) statsFunc { for i := range podStats { uid2PodStats[podStats[i].PodRef.UID] = podStats[i] } - return func(pod *api.Pod) (statsapi.PodStats, bool) { + return func(pod *v1.Pod) (statsapi.PodStats, bool) { stats, found := uid2PodStats[string(pod.UID)] return stats, found } @@ -431,16 +432,16 @@ func cachedStatsFunc(podStats []statsapi.PodStats) statsFunc { // 0 if p1 == p2 // +1 if p1 > p2 // -type cmpFunc func(p1, p2 *api.Pod) int +type cmpFunc func(p1, p2 *v1.Pod) int // multiSorter implements the Sort interface, sorting changes within. type multiSorter struct { - pods []*api.Pod + pods []*v1.Pod cmp []cmpFunc } // Sort sorts the argument slice according to the less functions passed to OrderedBy. -func (ms *multiSorter) Sort(pods []*api.Pod) { +func (ms *multiSorter) Sort(pods []*v1.Pod) { ms.pods = pods sort.Sort(ms) } @@ -484,7 +485,7 @@ func (ms *multiSorter) Less(i, j int) bool { } // qosComparator compares pods by QoS (BestEffort < Burstable < Guaranteed) -func qosComparator(p1, p2 *api.Pod) int { +func qosComparator(p1, p2 *v1.Pod) int { qosP1 := qos.GetPodQOS(p1) qosP2 := qos.GetPodQOS(p2) // its a tie @@ -508,7 +509,7 @@ func qosComparator(p1, p2 *api.Pod) int { // memory compares pods by largest consumer of memory relative to request. func memory(stats statsFunc) cmpFunc { - return func(p1, p2 *api.Pod) int { + return func(p1, p2 *v1.Pod) int { p1Stats, found := stats(p1) // if we have no usage stats for p1, we want p2 first if !found { @@ -531,12 +532,12 @@ func memory(stats statsFunc) cmpFunc { } // adjust p1, p2 usage relative to the request (if any) - p1Memory := p1Usage[api.ResourceMemory] + p1Memory := p1Usage[v1.ResourceMemory] p1Spec := core.PodUsageFunc(p1) p1Request := p1Spec[api.ResourceRequestsMemory] p1Memory.Sub(p1Request) - p2Memory := p2Usage[api.ResourceMemory] + p2Memory := p2Usage[v1.ResourceMemory] p2Spec := core.PodUsageFunc(p2) p2Request := p2Spec[api.ResourceRequestsMemory] p2Memory.Sub(p2Request) @@ -547,8 +548,8 @@ func memory(stats statsFunc) cmpFunc { } // disk compares pods by largest consumer of disk relative to request for the specified disk resource. -func disk(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource api.ResourceName) cmpFunc { - return func(p1, p2 *api.Pod) int { +func disk(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource v1.ResourceName) cmpFunc { + return func(p1, p2 *v1.Pod) int { p1Stats, found := stats(p1) // if we have no usage stats for p1, we want p2 first if !found { @@ -580,26 +581,26 @@ func disk(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource api.Reso } // rankMemoryPressure orders the input pods for eviction in response to memory pressure. -func rankMemoryPressure(pods []*api.Pod, stats statsFunc) { +func rankMemoryPressure(pods []*v1.Pod, stats statsFunc) { orderedBy(qosComparator, memory(stats)).Sort(pods) } // rankDiskPressureFunc returns a rankFunc that measures the specified fs stats. -func rankDiskPressureFunc(fsStatsToMeasure []fsStatsType, diskResource api.ResourceName) rankFunc { - return func(pods []*api.Pod, stats statsFunc) { +func rankDiskPressureFunc(fsStatsToMeasure []fsStatsType, diskResource v1.ResourceName) rankFunc { + return func(pods []*v1.Pod, stats statsFunc) { orderedBy(qosComparator, disk(stats, fsStatsToMeasure, diskResource)).Sort(pods) } } -// byEvictionPriority implements sort.Interface for []api.ResourceName. -type byEvictionPriority []api.ResourceName +// byEvictionPriority implements sort.Interface for []v1.ResourceName. +type byEvictionPriority []v1.ResourceName func (a byEvictionPriority) Len() int { return len(a) } func (a byEvictionPriority) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // Less ranks memory before all other resources. func (a byEvictionPriority) Less(i, j int) bool { - return a[i] == api.ResourceMemory + return a[i] == v1.ResourceMemory } // makeSignalObservations derives observations using the specified summary provider. @@ -740,8 +741,8 @@ func thresholdsMetGracePeriod(observedAt thresholdsObservedAt, now time.Time) [] } // nodeConditions returns the set of node conditions associated with a threshold -func nodeConditions(thresholds []Threshold) []api.NodeConditionType { - results := []api.NodeConditionType{} +func nodeConditions(thresholds []Threshold) []v1.NodeConditionType { + results := []v1.NodeConditionType{} for _, threshold := range thresholds { if nodeCondition, found := signalToNodeCondition[threshold.Signal]; found { if !hasNodeCondition(results, nodeCondition) { @@ -753,7 +754,7 @@ func nodeConditions(thresholds []Threshold) []api.NodeConditionType { } // nodeConditionsLastObservedAt merges the input with the previous observation to determine when a condition was most recently met. -func nodeConditionsLastObservedAt(nodeConditions []api.NodeConditionType, lastObservedAt nodeConditionsObservedAt, now time.Time) nodeConditionsObservedAt { +func nodeConditionsLastObservedAt(nodeConditions []v1.NodeConditionType, lastObservedAt nodeConditionsObservedAt, now time.Time) nodeConditionsObservedAt { results := nodeConditionsObservedAt{} // the input conditions were observed "now" for i := range nodeConditions { @@ -770,8 +771,8 @@ func nodeConditionsLastObservedAt(nodeConditions []api.NodeConditionType, lastOb } // nodeConditionsObservedSince returns the set of conditions that have been observed within the specified period -func nodeConditionsObservedSince(observedAt nodeConditionsObservedAt, period time.Duration, now time.Time) []api.NodeConditionType { - results := []api.NodeConditionType{} +func nodeConditionsObservedSince(observedAt nodeConditionsObservedAt, period time.Duration, now time.Time) []v1.NodeConditionType { + results := []v1.NodeConditionType{} for nodeCondition, at := range observedAt { duration := now.Sub(at) if duration < period { @@ -792,7 +793,7 @@ func hasFsStatsType(inputs []fsStatsType, item fsStatsType) bool { } // hasNodeCondition returns true if the node condition is in the input list -func hasNodeCondition(inputs []api.NodeConditionType, item api.NodeConditionType) bool { +func hasNodeCondition(inputs []v1.NodeConditionType, item v1.NodeConditionType) bool { for _, input := range inputs { if input == item { return true @@ -837,8 +838,8 @@ func compareThresholdValue(a ThresholdValue, b ThresholdValue) bool { } // getStarvedResources returns the set of resources that are starved based on thresholds met. -func getStarvedResources(thresholds []Threshold) []api.ResourceName { - results := []api.ResourceName{} +func getStarvedResources(thresholds []Threshold) []v1.ResourceName { + results := []v1.ResourceName{} for _, threshold := range thresholds { if starvedResource, found := signalToResource[threshold.Signal]; found { results = append(results, starvedResource) @@ -848,7 +849,7 @@ func getStarvedResources(thresholds []Threshold) []api.ResourceName { } // isSoftEviction returns true if the thresholds met for the starved resource are only soft thresholds -func isSoftEvictionThresholds(thresholds []Threshold, starvedResource api.ResourceName) bool { +func isSoftEvictionThresholds(thresholds []Threshold, starvedResource v1.ResourceName) bool { for _, threshold := range thresholds { if resourceToCheck := signalToResource[threshold.Signal]; resourceToCheck != starvedResource { continue @@ -866,9 +867,9 @@ func isHardEvictionThreshold(threshold Threshold) bool { } // buildResourceToRankFunc returns ranking functions associated with resources -func buildResourceToRankFunc(withImageFs bool) map[api.ResourceName]rankFunc { - resourceToRankFunc := map[api.ResourceName]rankFunc{ - api.ResourceMemory: rankMemoryPressure, +func buildResourceToRankFunc(withImageFs bool) map[v1.ResourceName]rankFunc { + resourceToRankFunc := map[v1.ResourceName]rankFunc{ + v1.ResourceMemory: rankMemoryPressure, } // usage of an imagefs is optional if withImageFs { @@ -890,13 +891,13 @@ func buildResourceToRankFunc(withImageFs bool) map[api.ResourceName]rankFunc { } // PodIsEvicted returns true if the reported pod status is due to an eviction. -func PodIsEvicted(podStatus api.PodStatus) bool { - return podStatus.Phase == api.PodFailed && podStatus.Reason == reason +func PodIsEvicted(podStatus v1.PodStatus) bool { + return podStatus.Phase == v1.PodFailed && podStatus.Reason == reason } // buildResourceToNodeReclaimFuncs returns reclaim functions associated with resources. -func buildResourceToNodeReclaimFuncs(imageGC ImageGC, withImageFs bool) map[api.ResourceName]nodeReclaimFuncs { - resourceToReclaimFunc := map[api.ResourceName]nodeReclaimFuncs{} +func buildResourceToNodeReclaimFuncs(imageGC ImageGC, withImageFs bool) map[v1.ResourceName]nodeReclaimFuncs { + resourceToReclaimFunc := map[v1.ResourceName]nodeReclaimFuncs{} // usage of an imagefs is optional if withImageFs { // with an imagefs, nodefs pressure should just delete logs diff --git a/pkg/kubelet/eviction/helpers_test.go b/pkg/kubelet/eviction/helpers_test.go index b53a5008baa..bff42bcc903 100644 --- a/pkg/kubelet/eviction/helpers_test.go +++ b/pkg/kubelet/eviction/helpers_test.go @@ -25,6 +25,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" statsapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/quota" "k8s.io/kubernetes/pkg/types" @@ -397,20 +398,20 @@ func thresholdEqual(a Threshold, b Threshold) bool { // TestOrderedByQoS ensures we order BestEffort < Burstable < Guaranteed func TestOrderedByQoS(t *testing.T) { - bestEffort := newPod("best-effort", []api.Container{ + bestEffort := newPod("best-effort", []v1.Container{ newContainer("best-effort", newResourceList("", ""), newResourceList("", "")), }, nil) - burstable := newPod("burstable", []api.Container{ + burstable := newPod("burstable", []v1.Container{ newContainer("burstable", newResourceList("100m", "100Mi"), newResourceList("200m", "200Mi")), }, nil) - guaranteed := newPod("guaranteed", []api.Container{ + guaranteed := newPod("guaranteed", []v1.Container{ newContainer("guaranteed", newResourceList("200m", "200Mi"), newResourceList("200m", "200Mi")), }, nil) - pods := []*api.Pod{guaranteed, burstable, bestEffort} + pods := []*v1.Pod{guaranteed, burstable, bestEffort} orderedBy(qosComparator).Sort(pods) - expected := []*api.Pod{bestEffort, burstable, guaranteed} + expected := []*v1.Pod{bestEffort, burstable, guaranteed} for i := range expected { if pods[i] != expected[i] { t.Errorf("Expected pod: %s, but got: %s", expected[i].Name, pods[i].Name) @@ -427,51 +428,51 @@ func TestOrderedbyInodes(t *testing.T) { } // testOrderedByDisk ensures we order pods by greediest resource consumer -func testOrderedByResource(t *testing.T, orderedByResource api.ResourceName, - newPodStatsFunc func(pod *api.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed resource.Quantity) statsapi.PodStats) { - pod1 := newPod("best-effort-high", []api.Container{ +func testOrderedByResource(t *testing.T, orderedByResource v1.ResourceName, + newPodStatsFunc func(pod *v1.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed resource.Quantity) statsapi.PodStats) { + pod1 := newPod("best-effort-high", []v1.Container{ newContainer("best-effort-high", newResourceList("", ""), newResourceList("", "")), - }, []api.Volume{ - newVolume("local-volume", api.VolumeSource{ - EmptyDir: &api.EmptyDirVolumeSource{}, + }, []v1.Volume{ + newVolume("local-volume", v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, }), }) - pod2 := newPod("best-effort-low", []api.Container{ + pod2 := newPod("best-effort-low", []v1.Container{ newContainer("best-effort-low", newResourceList("", ""), newResourceList("", "")), - }, []api.Volume{ - newVolume("local-volume", api.VolumeSource{ - EmptyDir: &api.EmptyDirVolumeSource{}, + }, []v1.Volume{ + newVolume("local-volume", v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, }), }) - pod3 := newPod("burstable-high", []api.Container{ + pod3 := newPod("burstable-high", []v1.Container{ newContainer("burstable-high", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")), - }, []api.Volume{ - newVolume("local-volume", api.VolumeSource{ - EmptyDir: &api.EmptyDirVolumeSource{}, + }, []v1.Volume{ + newVolume("local-volume", v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, }), }) - pod4 := newPod("burstable-low", []api.Container{ + pod4 := newPod("burstable-low", []v1.Container{ newContainer("burstable-low", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")), - }, []api.Volume{ - newVolume("local-volume", api.VolumeSource{ - EmptyDir: &api.EmptyDirVolumeSource{}, + }, []v1.Volume{ + newVolume("local-volume", v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, }), }) - pod5 := newPod("guaranteed-high", []api.Container{ + pod5 := newPod("guaranteed-high", []v1.Container{ newContainer("guaranteed-high", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")), - }, []api.Volume{ - newVolume("local-volume", api.VolumeSource{ - EmptyDir: &api.EmptyDirVolumeSource{}, + }, []v1.Volume{ + newVolume("local-volume", v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, }), }) - pod6 := newPod("guaranteed-low", []api.Container{ + pod6 := newPod("guaranteed-low", []v1.Container{ newContainer("guaranteed-low", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")), - }, []api.Volume{ - newVolume("local-volume", api.VolumeSource{ - EmptyDir: &api.EmptyDirVolumeSource{}, + }, []v1.Volume{ + newVolume("local-volume", v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, }), }) - stats := map[*api.Pod]statsapi.PodStats{ + stats := map[*v1.Pod]statsapi.PodStats{ pod1: newPodStatsFunc(pod1, resource.MustParse("50Mi"), resource.MustParse("100Mi"), resource.MustParse("50Mi")), // 200Mi pod2: newPodStatsFunc(pod2, resource.MustParse("100Mi"), resource.MustParse("150Mi"), resource.MustParse("50Mi")), // 300Mi pod3: newPodStatsFunc(pod3, resource.MustParse("200Mi"), resource.MustParse("150Mi"), resource.MustParse("50Mi")), // 400Mi @@ -479,13 +480,13 @@ func testOrderedByResource(t *testing.T, orderedByResource api.ResourceName, pod5: newPodStatsFunc(pod5, resource.MustParse("400Mi"), resource.MustParse("100Mi"), resource.MustParse("50Mi")), // 550Mi pod6: newPodStatsFunc(pod6, resource.MustParse("500Mi"), resource.MustParse("100Mi"), resource.MustParse("50Mi")), // 650Mi } - statsFn := func(pod *api.Pod) (statsapi.PodStats, bool) { + statsFn := func(pod *v1.Pod) (statsapi.PodStats, bool) { result, found := stats[pod] return result, found } - pods := []*api.Pod{pod1, pod2, pod3, pod4, pod5, pod6} + pods := []*v1.Pod{pod1, pod2, pod3, pod4, pod5, pod6} orderedBy(disk(statsFn, []fsStatsType{fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource}, orderedByResource)).Sort(pods) - expected := []*api.Pod{pod6, pod5, pod4, pod3, pod2, pod1} + expected := []*v1.Pod{pod6, pod5, pod4, pod3, pod2, pod1} for i := range expected { if pods[i] != expected[i] { t.Errorf("Expected pod[%d]: %s, but got: %s", i, expected[i].Name, pods[i].Name) @@ -502,51 +503,51 @@ func TestOrderedbyQoSInodes(t *testing.T) { } // testOrderedByQoSDisk ensures we order pods by qos and then greediest resource consumer -func testOrderedByQoSResource(t *testing.T, orderedByResource api.ResourceName, - newPodStatsFunc func(pod *api.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed resource.Quantity) statsapi.PodStats) { - pod1 := newPod("best-effort-high", []api.Container{ +func testOrderedByQoSResource(t *testing.T, orderedByResource v1.ResourceName, + newPodStatsFunc func(pod *v1.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed resource.Quantity) statsapi.PodStats) { + pod1 := newPod("best-effort-high", []v1.Container{ newContainer("best-effort-high", newResourceList("", ""), newResourceList("", "")), - }, []api.Volume{ - newVolume("local-volume", api.VolumeSource{ - EmptyDir: &api.EmptyDirVolumeSource{}, + }, []v1.Volume{ + newVolume("local-volume", v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, }), }) - pod2 := newPod("best-effort-low", []api.Container{ + pod2 := newPod("best-effort-low", []v1.Container{ newContainer("best-effort-low", newResourceList("", ""), newResourceList("", "")), - }, []api.Volume{ - newVolume("local-volume", api.VolumeSource{ - EmptyDir: &api.EmptyDirVolumeSource{}, + }, []v1.Volume{ + newVolume("local-volume", v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, }), }) - pod3 := newPod("burstable-high", []api.Container{ + pod3 := newPod("burstable-high", []v1.Container{ newContainer("burstable-high", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")), - }, []api.Volume{ - newVolume("local-volume", api.VolumeSource{ - EmptyDir: &api.EmptyDirVolumeSource{}, + }, []v1.Volume{ + newVolume("local-volume", v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, }), }) - pod4 := newPod("burstable-low", []api.Container{ + pod4 := newPod("burstable-low", []v1.Container{ newContainer("burstable-low", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")), - }, []api.Volume{ - newVolume("local-volume", api.VolumeSource{ - EmptyDir: &api.EmptyDirVolumeSource{}, + }, []v1.Volume{ + newVolume("local-volume", v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, }), }) - pod5 := newPod("guaranteed-high", []api.Container{ + pod5 := newPod("guaranteed-high", []v1.Container{ newContainer("guaranteed-high", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")), - }, []api.Volume{ - newVolume("local-volume", api.VolumeSource{ - EmptyDir: &api.EmptyDirVolumeSource{}, + }, []v1.Volume{ + newVolume("local-volume", v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, }), }) - pod6 := newPod("guaranteed-low", []api.Container{ + pod6 := newPod("guaranteed-low", []v1.Container{ newContainer("guaranteed-low", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")), - }, []api.Volume{ - newVolume("local-volume", api.VolumeSource{ - EmptyDir: &api.EmptyDirVolumeSource{}, + }, []v1.Volume{ + newVolume("local-volume", v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, }), }) - stats := map[*api.Pod]statsapi.PodStats{ + stats := map[*v1.Pod]statsapi.PodStats{ pod1: newPodStatsFunc(pod1, resource.MustParse("50Mi"), resource.MustParse("100Mi"), resource.MustParse("50Mi")), // 200Mi pod2: newPodStatsFunc(pod2, resource.MustParse("100Mi"), resource.MustParse("150Mi"), resource.MustParse("50Mi")), // 300Mi pod3: newPodStatsFunc(pod3, resource.MustParse("200Mi"), resource.MustParse("150Mi"), resource.MustParse("50Mi")), // 400Mi @@ -554,13 +555,13 @@ func testOrderedByQoSResource(t *testing.T, orderedByResource api.ResourceName, pod5: newPodStatsFunc(pod5, resource.MustParse("400Mi"), resource.MustParse("100Mi"), resource.MustParse("50Mi")), // 550Mi pod6: newPodStatsFunc(pod6, resource.MustParse("500Mi"), resource.MustParse("100Mi"), resource.MustParse("50Mi")), // 650Mi } - statsFn := func(pod *api.Pod) (statsapi.PodStats, bool) { + statsFn := func(pod *v1.Pod) (statsapi.PodStats, bool) { result, found := stats[pod] return result, found } - pods := []*api.Pod{pod1, pod2, pod3, pod4, pod5, pod6} + pods := []*v1.Pod{pod1, pod2, pod3, pod4, pod5, pod6} orderedBy(qosComparator, disk(statsFn, []fsStatsType{fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource}, orderedByResource)).Sort(pods) - expected := []*api.Pod{pod2, pod1, pod4, pod3, pod6, pod5} + expected := []*v1.Pod{pod2, pod1, pod4, pod3, pod6, pod5} for i := range expected { if pods[i] != expected[i] { t.Errorf("Expected pod[%d]: %s, but got: %s", i, expected[i].Name, pods[i].Name) @@ -570,25 +571,25 @@ func testOrderedByQoSResource(t *testing.T, orderedByResource api.ResourceName, // TestOrderedByMemory ensures we order pods by greediest memory consumer relative to request. func TestOrderedByMemory(t *testing.T) { - pod1 := newPod("best-effort-high", []api.Container{ + pod1 := newPod("best-effort-high", []v1.Container{ newContainer("best-effort-high", newResourceList("", ""), newResourceList("", "")), }, nil) - pod2 := newPod("best-effort-low", []api.Container{ + pod2 := newPod("best-effort-low", []v1.Container{ newContainer("best-effort-low", newResourceList("", ""), newResourceList("", "")), }, nil) - pod3 := newPod("burstable-high", []api.Container{ + pod3 := newPod("burstable-high", []v1.Container{ newContainer("burstable-high", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")), }, nil) - pod4 := newPod("burstable-low", []api.Container{ + pod4 := newPod("burstable-low", []v1.Container{ newContainer("burstable-low", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")), }, nil) - pod5 := newPod("guaranteed-high", []api.Container{ + pod5 := newPod("guaranteed-high", []v1.Container{ newContainer("guaranteed-high", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")), }, nil) - pod6 := newPod("guaranteed-low", []api.Container{ + pod6 := newPod("guaranteed-low", []v1.Container{ newContainer("guaranteed-low", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")), }, nil) - stats := map[*api.Pod]statsapi.PodStats{ + stats := map[*v1.Pod]statsapi.PodStats{ pod1: newPodMemoryStats(pod1, resource.MustParse("500Mi")), // 500 relative to request pod2: newPodMemoryStats(pod2, resource.MustParse("300Mi")), // 300 relative to request pod3: newPodMemoryStats(pod3, resource.MustParse("800Mi")), // 700 relative to request @@ -596,13 +597,13 @@ func TestOrderedByMemory(t *testing.T) { pod5: newPodMemoryStats(pod5, resource.MustParse("800Mi")), // -200 relative to request pod6: newPodMemoryStats(pod6, resource.MustParse("200Mi")), // -800 relative to request } - statsFn := func(pod *api.Pod) (statsapi.PodStats, bool) { + statsFn := func(pod *v1.Pod) (statsapi.PodStats, bool) { result, found := stats[pod] return result, found } - pods := []*api.Pod{pod1, pod2, pod3, pod4, pod5, pod6} + pods := []*v1.Pod{pod1, pod2, pod3, pod4, pod5, pod6} orderedBy(memory(statsFn)).Sort(pods) - expected := []*api.Pod{pod3, pod1, pod2, pod4, pod5, pod6} + expected := []*v1.Pod{pod3, pod1, pod2, pod4, pod5, pod6} for i := range expected { if pods[i] != expected[i] { t.Errorf("Expected pod[%d]: %s, but got: %s", i, expected[i].Name, pods[i].Name) @@ -612,25 +613,25 @@ func TestOrderedByMemory(t *testing.T) { // TestOrderedByQoSMemory ensures we order by qosComparator and then memory consumption relative to request. func TestOrderedByQoSMemory(t *testing.T) { - pod1 := newPod("best-effort-high", []api.Container{ + pod1 := newPod("best-effort-high", []v1.Container{ newContainer("best-effort-high", newResourceList("", ""), newResourceList("", "")), }, nil) - pod2 := newPod("best-effort-low", []api.Container{ + pod2 := newPod("best-effort-low", []v1.Container{ newContainer("best-effort-low", newResourceList("", ""), newResourceList("", "")), }, nil) - pod3 := newPod("burstable-high", []api.Container{ + pod3 := newPod("burstable-high", []v1.Container{ newContainer("burstable-high", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")), }, nil) - pod4 := newPod("burstable-low", []api.Container{ + pod4 := newPod("burstable-low", []v1.Container{ newContainer("burstable-low", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")), }, nil) - pod5 := newPod("guaranteed-high", []api.Container{ + pod5 := newPod("guaranteed-high", []v1.Container{ newContainer("guaranteed-high", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")), }, nil) - pod6 := newPod("guaranteed-low", []api.Container{ + pod6 := newPod("guaranteed-low", []v1.Container{ newContainer("guaranteed-low", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")), }, nil) - stats := map[*api.Pod]statsapi.PodStats{ + stats := map[*v1.Pod]statsapi.PodStats{ pod1: newPodMemoryStats(pod1, resource.MustParse("500Mi")), // 500 relative to request pod2: newPodMemoryStats(pod2, resource.MustParse("50Mi")), // 50 relative to request pod3: newPodMemoryStats(pod3, resource.MustParse("50Mi")), // -50 relative to request @@ -638,12 +639,12 @@ func TestOrderedByQoSMemory(t *testing.T) { pod5: newPodMemoryStats(pod5, resource.MustParse("800Mi")), // -200 relative to request pod6: newPodMemoryStats(pod6, resource.MustParse("200Mi")), // -800 relative to request } - statsFn := func(pod *api.Pod) (statsapi.PodStats, bool) { + statsFn := func(pod *v1.Pod) (statsapi.PodStats, bool) { result, found := stats[pod] return result, found } - pods := []*api.Pod{pod1, pod2, pod3, pod4, pod5, pod6} - expected := []*api.Pod{pod1, pod2, pod4, pod3, pod5, pod6} + pods := []*v1.Pod{pod1, pod2, pod3, pod4, pod5, pod6} + expected := []*v1.Pod{pod1, pod2, pod4, pod3, pod5, pod6} orderedBy(qosComparator, memory(statsFn)).Sort(pods) for i := range expected { if pods[i] != expected[i] { @@ -662,7 +663,7 @@ func (f *fakeSummaryProvider) Get() (*statsapi.Summary, error) { // newPodStats returns a pod stat where each container is using the specified working set // each pod must have a Name, UID, Namespace -func newPodStats(pod *api.Pod, containerWorkingSetBytes int64) statsapi.PodStats { +func newPodStats(pod *v1.Pod, containerWorkingSetBytes int64) statsapi.PodStats { result := statsapi.PodStats{ PodRef: statsapi.PodReference{ Name: pod.Name, @@ -682,14 +683,14 @@ func newPodStats(pod *api.Pod, containerWorkingSetBytes int64) statsapi.PodStats } func TestMakeSignalObservations(t *testing.T) { - podMaker := func(name, namespace, uid string, numContainers int) *api.Pod { - pod := &api.Pod{} + podMaker := func(name, namespace, uid string, numContainers int) *v1.Pod { + pod := &v1.Pod{} pod.Name = name pod.Namespace = namespace pod.UID = types.UID(uid) - pod.Spec = api.PodSpec{} + pod.Spec = v1.PodSpec{} for i := 0; i < numContainers; i++ { - pod.Spec.Containers = append(pod.Spec.Containers, api.Container{ + pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{ Name: fmt.Sprintf("ctr%v", i), }) } @@ -731,7 +732,7 @@ func TestMakeSignalObservations(t *testing.T) { provider := &fakeSummaryProvider{ result: fakeStats, } - pods := []*api.Pod{ + pods := []*v1.Pod{ podMaker("pod1", "ns1", "uuid1", 1), podMaker("pod1", "ns2", "uuid2", 1), podMaker("pod3", "ns3", "uuid3", 1), @@ -1199,17 +1200,17 @@ func TestThresholdsMetGracePeriod(t *testing.T) { func TestNodeConditions(t *testing.T) { testCases := map[string]struct { inputs []Threshold - result []api.NodeConditionType + result []v1.NodeConditionType }{ "empty-list": { inputs: []Threshold{}, - result: []api.NodeConditionType{}, + result: []v1.NodeConditionType{}, }, "memory.available": { inputs: []Threshold{ {Signal: SignalMemoryAvailable}, }, - result: []api.NodeConditionType{api.NodeMemoryPressure}, + result: []v1.NodeConditionType{v1.NodeMemoryPressure}, }, } for testName, testCase := range testCases { @@ -1224,37 +1225,37 @@ func TestNodeConditionsLastObservedAt(t *testing.T) { now := unversioned.Now() oldTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute)) testCases := map[string]struct { - nodeConditions []api.NodeConditionType + nodeConditions []v1.NodeConditionType lastObservedAt nodeConditionsObservedAt now time.Time result nodeConditionsObservedAt }{ "no-previous-observation": { - nodeConditions: []api.NodeConditionType{api.NodeMemoryPressure}, + nodeConditions: []v1.NodeConditionType{v1.NodeMemoryPressure}, lastObservedAt: nodeConditionsObservedAt{}, now: now.Time, result: nodeConditionsObservedAt{ - api.NodeMemoryPressure: now.Time, + v1.NodeMemoryPressure: now.Time, }, }, "previous-observation": { - nodeConditions: []api.NodeConditionType{api.NodeMemoryPressure}, + nodeConditions: []v1.NodeConditionType{v1.NodeMemoryPressure}, lastObservedAt: nodeConditionsObservedAt{ - api.NodeMemoryPressure: oldTime.Time, + v1.NodeMemoryPressure: oldTime.Time, }, now: now.Time, result: nodeConditionsObservedAt{ - api.NodeMemoryPressure: now.Time, + v1.NodeMemoryPressure: now.Time, }, }, "old-observation": { - nodeConditions: []api.NodeConditionType{}, + nodeConditions: []v1.NodeConditionType{}, lastObservedAt: nodeConditionsObservedAt{ - api.NodeMemoryPressure: oldTime.Time, + v1.NodeMemoryPressure: oldTime.Time, }, now: now.Time, result: nodeConditionsObservedAt{ - api.NodeMemoryPressure: oldTime.Time, + v1.NodeMemoryPressure: oldTime.Time, }, }, } @@ -1273,23 +1274,23 @@ func TestNodeConditionsObservedSince(t *testing.T) { observedAt nodeConditionsObservedAt period time.Duration now time.Time - result []api.NodeConditionType + result []v1.NodeConditionType }{ "in-period": { observedAt: nodeConditionsObservedAt{ - api.NodeMemoryPressure: observedTime.Time, + v1.NodeMemoryPressure: observedTime.Time, }, period: 2 * time.Minute, now: now.Time, - result: []api.NodeConditionType{api.NodeMemoryPressure}, + result: []v1.NodeConditionType{v1.NodeMemoryPressure}, }, "out-of-period": { observedAt: nodeConditionsObservedAt{ - api.NodeMemoryPressure: observedTime.Time, + v1.NodeMemoryPressure: observedTime.Time, }, period: 30 * time.Second, now: now.Time, - result: []api.NodeConditionType{}, + result: []v1.NodeConditionType{}, }, } for testName, testCase := range testCases { @@ -1302,18 +1303,18 @@ func TestNodeConditionsObservedSince(t *testing.T) { func TestHasNodeConditions(t *testing.T) { testCases := map[string]struct { - inputs []api.NodeConditionType - item api.NodeConditionType + inputs []v1.NodeConditionType + item v1.NodeConditionType result bool }{ "has-condition": { - inputs: []api.NodeConditionType{api.NodeReady, api.NodeOutOfDisk, api.NodeMemoryPressure}, - item: api.NodeMemoryPressure, + inputs: []v1.NodeConditionType{v1.NodeReady, v1.NodeOutOfDisk, v1.NodeMemoryPressure}, + item: v1.NodeMemoryPressure, result: true, }, "does-not-have-condition": { - inputs: []api.NodeConditionType{api.NodeReady, api.NodeOutOfDisk}, - item: api.NodeMemoryPressure, + inputs: []v1.NodeConditionType{v1.NodeReady, v1.NodeOutOfDisk}, + item: v1.NodeMemoryPressure, result: false, }, } @@ -1327,31 +1328,38 @@ func TestHasNodeConditions(t *testing.T) { func TestGetStarvedResources(t *testing.T) { testCases := map[string]struct { inputs []Threshold - result []api.ResourceName + result []v1.ResourceName }{ "memory.available": { inputs: []Threshold{ {Signal: SignalMemoryAvailable}, }, - result: []api.ResourceName{api.ResourceMemory}, + result: []v1.ResourceName{v1.ResourceMemory}, }, "imagefs.available": { inputs: []Threshold{ {Signal: SignalImageFsAvailable}, }, - result: []api.ResourceName{resourceImageFs}, + result: []v1.ResourceName{resourceImageFs}, }, "nodefs.available": { inputs: []Threshold{ {Signal: SignalNodeFsAvailable}, }, - result: []api.ResourceName{resourceNodeFs}, + result: []v1.ResourceName{resourceNodeFs}, }, } + var internalResourceNames = func(in []v1.ResourceName) []api.ResourceName { + var out []api.ResourceName + for _, name := range in { + out = append(out, api.ResourceName(name)) + } + return out + } for testName, testCase := range testCases { actual := getStarvedResources(testCase.inputs) - actualSet := quota.ToSet(actual) - expectedSet := quota.ToSet(testCase.result) + actualSet := quota.ToSet(internalResourceNames(actual)) + expectedSet := quota.ToSet(internalResourceNames(testCase.result)) if !actualSet.Equal(expectedSet) { t.Errorf("Test case: %s, expected: %v, actual: %v", testName, expectedSet, actualSet) } @@ -1448,7 +1456,7 @@ func testCompareThresholdValue(t *testing.T) { } // newPodInodeStats returns stats with specified usage amounts. -func newPodInodeStats(pod *api.Pod, rootFsInodesUsed, logsInodesUsed, perLocalVolumeInodesUsed resource.Quantity) statsapi.PodStats { +func newPodInodeStats(pod *v1.Pod, rootFsInodesUsed, logsInodesUsed, perLocalVolumeInodesUsed resource.Quantity) statsapi.PodStats { result := statsapi.PodStats{ PodRef: statsapi.PodReference{ Name: pod.Name, Namespace: pod.Namespace, UID: string(pod.UID), @@ -1480,7 +1488,7 @@ func newPodInodeStats(pod *api.Pod, rootFsInodesUsed, logsInodesUsed, perLocalVo } // newPodDiskStats returns stats with specified usage amounts. -func newPodDiskStats(pod *api.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed resource.Quantity) statsapi.PodStats { +func newPodDiskStats(pod *v1.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed resource.Quantity) statsapi.PodStats { result := statsapi.PodStats{ PodRef: statsapi.PodReference{ Name: pod.Name, Namespace: pod.Namespace, UID: string(pod.UID), @@ -1513,7 +1521,7 @@ func newPodDiskStats(pod *api.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed reso return result } -func newPodMemoryStats(pod *api.Pod, workingSet resource.Quantity) statsapi.PodStats { +func newPodMemoryStats(pod *v1.Pod, workingSet resource.Quantity) statsapi.PodStats { result := statsapi.PodStats{ PodRef: statsapi.PodReference{ Name: pod.Name, Namespace: pod.Namespace, UID: string(pod.UID), @@ -1530,46 +1538,46 @@ func newPodMemoryStats(pod *api.Pod, workingSet resource.Quantity) statsapi.PodS return result } -func newResourceList(cpu, memory string) api.ResourceList { - res := api.ResourceList{} +func newResourceList(cpu, memory string) v1.ResourceList { + res := v1.ResourceList{} if cpu != "" { - res[api.ResourceCPU] = resource.MustParse(cpu) + res[v1.ResourceCPU] = resource.MustParse(cpu) } if memory != "" { - res[api.ResourceMemory] = resource.MustParse(memory) + res[v1.ResourceMemory] = resource.MustParse(memory) } return res } -func newResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements { - res := api.ResourceRequirements{} +func newResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements { + res := v1.ResourceRequirements{} res.Requests = requests res.Limits = limits return res } -func newContainer(name string, requests api.ResourceList, limits api.ResourceList) api.Container { - return api.Container{ +func newContainer(name string, requests v1.ResourceList, limits v1.ResourceList) v1.Container { + return v1.Container{ Name: name, Resources: newResourceRequirements(requests, limits), } } -func newVolume(name string, volumeSource api.VolumeSource) api.Volume { - return api.Volume{ +func newVolume(name string, volumeSource v1.VolumeSource) v1.Volume { + return v1.Volume{ Name: name, VolumeSource: volumeSource, } } // newPod uses the name as the uid. Make names unique for testing. -func newPod(name string, containers []api.Container, volumes []api.Volume) *api.Pod { - return &api.Pod{ - ObjectMeta: api.ObjectMeta{ +func newPod(name string, containers []v1.Container, volumes []v1.Volume) *v1.Pod { + return &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: name, UID: types.UID(name), }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ Containers: containers, Volumes: volumes, }, @@ -1577,7 +1585,7 @@ func newPod(name string, containers []api.Container, volumes []api.Volume) *api. } // nodeConditionList is a simple alias to support equality checking independent of order -type nodeConditionList []api.NodeConditionType +type nodeConditionList []v1.NodeConditionType // Equal adds the ability to check equality between two lists of node conditions. func (s1 nodeConditionList) Equal(s2 nodeConditionList) bool { diff --git a/pkg/kubelet/eviction/types.go b/pkg/kubelet/eviction/types.go index 7cbfff7f9a3..e7e2a7d3b73 100644 --- a/pkg/kubelet/eviction/types.go +++ b/pkg/kubelet/eviction/types.go @@ -19,9 +19,9 @@ package eviction import ( "time" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" statsapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" ) @@ -126,16 +126,16 @@ type ImageGC interface { // pod - the pod to kill // status - the desired status to associate with the pod (i.e. why its killed) // gracePeriodOverride - the grace period override to use instead of what is on the pod spec -type KillPodFunc func(pod *api.Pod, status api.PodStatus, gracePeriodOverride *int64) error +type KillPodFunc func(pod *v1.Pod, status v1.PodStatus, gracePeriodOverride *int64) error // ActivePodsFunc returns pods bound to the kubelet that are active (i.e. non-terminal state) -type ActivePodsFunc func() []*api.Pod +type ActivePodsFunc func() []*v1.Pod // statsFunc returns the usage stats if known for an input pod. -type statsFunc func(pod *api.Pod) (statsapi.PodStats, bool) +type statsFunc func(pod *v1.Pod) (statsapi.PodStats, bool) // rankFunc sorts the pods in eviction order -type rankFunc func(pods []*api.Pod, stats statsFunc) +type rankFunc func(pods []*v1.Pod, stats statsFunc) // signalObservation is the observed resource usage type signalObservation struct { @@ -154,7 +154,7 @@ type signalObservations map[Signal]signalObservation type thresholdsObservedAt map[Threshold]time.Time // nodeConditionsObservedAt maps a node condition to a time that it was observed -type nodeConditionsObservedAt map[api.NodeConditionType]time.Time +type nodeConditionsObservedAt map[v1.NodeConditionType]time.Time // nodeReclaimFunc is a function that knows how to reclaim a resource from the node without impacting pods. type nodeReclaimFunc func() (*resource.Quantity, error) diff --git a/pkg/kubelet/images/helpers.go b/pkg/kubelet/images/helpers.go index f8fd5f1537e..67a8da94ff3 100644 --- a/pkg/kubelet/images/helpers.go +++ b/pkg/kubelet/images/helpers.go @@ -19,7 +19,7 @@ package images import ( "fmt" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/util/flowcontrol" ) @@ -42,7 +42,7 @@ type throttledImageService struct { limiter flowcontrol.RateLimiter } -func (ts throttledImageService) PullImage(image kubecontainer.ImageSpec, secrets []api.Secret) error { +func (ts throttledImageService) PullImage(image kubecontainer.ImageSpec, secrets []v1.Secret) error { if ts.limiter.TryAccept() { return ts.ImageService.PullImage(image, secrets) } diff --git a/pkg/kubelet/images/image_gc_manager.go b/pkg/kubelet/images/image_gc_manager.go index c17c8741275..131b5685e87 100644 --- a/pkg/kubelet/images/image_gc_manager.go +++ b/pkg/kubelet/images/image_gc_manager.go @@ -24,7 +24,7 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/container" @@ -85,7 +85,7 @@ type realImageGCManager struct { recorder record.EventRecorder // Reference to this node. - nodeRef *api.ObjectReference + nodeRef *v1.ObjectReference // Track initialization initialized bool @@ -103,7 +103,7 @@ type imageRecord struct { size int64 } -func NewImageGCManager(runtime container.Runtime, cadvisorInterface cadvisor.Interface, recorder record.EventRecorder, nodeRef *api.ObjectReference, policy ImageGCPolicy) (ImageGCManager, error) { +func NewImageGCManager(runtime container.Runtime, cadvisorInterface cadvisor.Interface, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy) (ImageGCManager, error) { // Validate policy. if policy.HighThresholdPercent < 0 || policy.HighThresholdPercent > 100 { return nil, fmt.Errorf("invalid HighThresholdPercent %d, must be in range [0-100]", policy.HighThresholdPercent) @@ -227,7 +227,7 @@ func (im *realImageGCManager) GarbageCollect() error { // Check valid capacity. if capacity == 0 { err := fmt.Errorf("invalid capacity %d on device %q at mount point %q", capacity, fsInfo.Device, fsInfo.Mountpoint) - im.recorder.Eventf(im.nodeRef, api.EventTypeWarning, events.InvalidDiskCapacity, err.Error()) + im.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.InvalidDiskCapacity, err.Error()) return err } @@ -243,7 +243,7 @@ func (im *realImageGCManager) GarbageCollect() error { if freed < amountToFree { err := fmt.Errorf("failed to garbage collect required amount of images. Wanted to free %d, but freed %d", amountToFree, freed) - im.recorder.Eventf(im.nodeRef, api.EventTypeWarning, events.FreeDiskSpaceFailed, err.Error()) + im.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.FreeDiskSpaceFailed, err.Error()) return err } } diff --git a/pkg/kubelet/images/image_manager.go b/pkg/kubelet/images/image_manager.go index 02ea5b4267a..b2809f1353a 100644 --- a/pkg/kubelet/images/image_manager.go +++ b/pkg/kubelet/images/image_manager.go @@ -21,7 +21,7 @@ import ( dockerref "github.com/docker/distribution/reference" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" @@ -59,13 +59,13 @@ func NewImageManager(recorder record.EventRecorder, imageService kubecontainer.I // shouldPullImage returns whether we should pull an image according to // the presence and pull policy of the image. -func shouldPullImage(container *api.Container, imagePresent bool) bool { - if container.ImagePullPolicy == api.PullNever { +func shouldPullImage(container *v1.Container, imagePresent bool) bool { + if container.ImagePullPolicy == v1.PullNever { return false } - if container.ImagePullPolicy == api.PullAlways || - (container.ImagePullPolicy == api.PullIfNotPresent && (!imagePresent)) { + if container.ImagePullPolicy == v1.PullAlways || + (container.ImagePullPolicy == v1.PullIfNotPresent && (!imagePresent)) { return true } @@ -73,7 +73,7 @@ func shouldPullImage(container *api.Container, imagePresent bool) bool { } // records an event using ref, event msg. log to glog using prefix, msg, logFn -func (m *imageManager) logIt(ref *api.ObjectReference, eventtype, event, prefix, msg string, logFn func(args ...interface{})) { +func (m *imageManager) logIt(ref *v1.ObjectReference, eventtype, event, prefix, msg string, logFn func(args ...interface{})) { if ref != nil { m.recorder.Event(ref, eventtype, event, msg) } else { @@ -82,7 +82,7 @@ func (m *imageManager) logIt(ref *api.ObjectReference, eventtype, event, prefix, } // EnsureImageExists pulls the image for the specified pod and container. -func (m *imageManager) EnsureImageExists(pod *api.Pod, container *api.Container, pullSecrets []api.Secret) (error, string) { +func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret) (error, string) { logPrefix := fmt.Sprintf("%s/%s", pod.Name, container.Image) ref, err := kubecontainer.GenerateContainerRef(pod, container) if err != nil { @@ -93,7 +93,7 @@ func (m *imageManager) EnsureImageExists(pod *api.Pod, container *api.Container, image, err := applyDefaultImageTag(container.Image) if err != nil { msg := fmt.Sprintf("Failed to apply default image tag %q: %v", container.Image, err) - m.logIt(ref, api.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning) + m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning) return ErrInvalidImageName, msg } @@ -101,18 +101,18 @@ func (m *imageManager) EnsureImageExists(pod *api.Pod, container *api.Container, present, err := m.imageService.IsImagePresent(spec) if err != nil { msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err) - m.logIt(ref, api.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning) + m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning) return ErrImageInspect, msg } if !shouldPullImage(container, present) { if present { msg := fmt.Sprintf("Container image %q already present on machine", container.Image) - m.logIt(ref, api.EventTypeNormal, events.PulledImage, logPrefix, msg, glog.Info) + m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, msg, glog.Info) return nil, "" } else { msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image) - m.logIt(ref, api.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning) + m.logIt(ref, v1.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning) return ErrImageNeverPull, msg } } @@ -120,14 +120,14 @@ func (m *imageManager) EnsureImageExists(pod *api.Pod, container *api.Container, backOffKey := fmt.Sprintf("%s_%s", pod.UID, container.Image) if m.backOff.IsInBackOffSinceUpdate(backOffKey, m.backOff.Clock.Now()) { msg := fmt.Sprintf("Back-off pulling image %q", container.Image) - m.logIt(ref, api.EventTypeNormal, events.BackOffPullImage, logPrefix, msg, glog.Info) + m.logIt(ref, v1.EventTypeNormal, events.BackOffPullImage, logPrefix, msg, glog.Info) return ErrImagePullBackOff, msg } - m.logIt(ref, api.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info) + m.logIt(ref, v1.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info) errChan := make(chan error) m.puller.pullImage(spec, pullSecrets, errChan) if err := <-errChan; err != nil { - m.logIt(ref, api.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning) + m.logIt(ref, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning) m.backOff.Next(backOffKey, m.backOff.Clock.Now()) if err == RegistryUnavailable { msg := fmt.Sprintf("image pull failed for %s because the registry is unavailable.", container.Image) @@ -136,7 +136,7 @@ func (m *imageManager) EnsureImageExists(pod *api.Pod, container *api.Container, return ErrImagePull, err.Error() } } - m.logIt(ref, api.EventTypeNormal, events.PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info) + m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info) m.backOff.GC() return nil, "" } diff --git a/pkg/kubelet/images/image_manager_test.go b/pkg/kubelet/images/image_manager_test.go index 7ff7137a627..e8284932ed6 100644 --- a/pkg/kubelet/images/image_manager_test.go +++ b/pkg/kubelet/images/image_manager_test.go @@ -22,7 +22,7 @@ import ( "time" "github.com/stretchr/testify/assert" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" . "k8s.io/kubernetes/pkg/kubelet/container" ctest "k8s.io/kubernetes/pkg/kubelet/container/testing" @@ -31,8 +31,8 @@ import ( ) func TestParallelPuller(t *testing.T) { - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "test_pod", Namespace: "test-ns", UID: "bar", @@ -42,7 +42,7 @@ func TestParallelPuller(t *testing.T) { cases := []struct { containerImage string - policy api.PullPolicy + policy v1.PullPolicy calledFunctions []string inspectErr error pullerErr error @@ -50,7 +50,7 @@ func TestParallelPuller(t *testing.T) { }{ { // pull missing image containerImage: "missing_image", - policy: api.PullIfNotPresent, + policy: v1.PullIfNotPresent, calledFunctions: []string{"IsImagePresent", "PullImage"}, inspectErr: nil, pullerErr: nil, @@ -58,35 +58,35 @@ func TestParallelPuller(t *testing.T) { { // image present, don't pull containerImage: "present_image", - policy: api.PullIfNotPresent, + policy: v1.PullIfNotPresent, calledFunctions: []string{"IsImagePresent"}, inspectErr: nil, pullerErr: nil, expectedErr: []error{nil, nil, nil}}, // image present, pull it {containerImage: "present_image", - policy: api.PullAlways, + policy: v1.PullAlways, calledFunctions: []string{"IsImagePresent", "PullImage"}, inspectErr: nil, pullerErr: nil, expectedErr: []error{nil, nil, nil}}, // missing image, error PullNever {containerImage: "missing_image", - policy: api.PullNever, + policy: v1.PullNever, calledFunctions: []string{"IsImagePresent"}, inspectErr: nil, pullerErr: nil, expectedErr: []error{ErrImageNeverPull, ErrImageNeverPull, ErrImageNeverPull}}, // missing image, unable to inspect {containerImage: "missing_image", - policy: api.PullIfNotPresent, + policy: v1.PullIfNotPresent, calledFunctions: []string{"IsImagePresent"}, inspectErr: errors.New("unknown inspectError"), pullerErr: nil, expectedErr: []error{ErrImageInspect, ErrImageInspect, ErrImageInspect}}, // missing image, unable to fetch {containerImage: "typo_image", - policy: api.PullIfNotPresent, + policy: v1.PullIfNotPresent, calledFunctions: []string{"IsImagePresent", "PullImage"}, inspectErr: nil, pullerErr: errors.New("404"), @@ -94,7 +94,7 @@ func TestParallelPuller(t *testing.T) { } for i, c := range cases { - container := &api.Container{ + container := &v1.Container{ Name: "container_name", Image: c.containerImage, ImagePullPolicy: c.policy, @@ -122,8 +122,8 @@ func TestParallelPuller(t *testing.T) { } func TestSerializedPuller(t *testing.T) { - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "test_pod", Namespace: "test-ns", UID: "bar", @@ -133,7 +133,7 @@ func TestSerializedPuller(t *testing.T) { cases := []struct { containerImage string - policy api.PullPolicy + policy v1.PullPolicy calledFunctions []string inspectErr error pullerErr error @@ -141,7 +141,7 @@ func TestSerializedPuller(t *testing.T) { }{ { // pull missing image containerImage: "missing_image", - policy: api.PullIfNotPresent, + policy: v1.PullIfNotPresent, calledFunctions: []string{"IsImagePresent", "PullImage"}, inspectErr: nil, pullerErr: nil, @@ -149,35 +149,35 @@ func TestSerializedPuller(t *testing.T) { { // image present, don't pull containerImage: "present_image", - policy: api.PullIfNotPresent, + policy: v1.PullIfNotPresent, calledFunctions: []string{"IsImagePresent"}, inspectErr: nil, pullerErr: nil, expectedErr: []error{nil, nil, nil}}, // image present, pull it {containerImage: "present_image", - policy: api.PullAlways, + policy: v1.PullAlways, calledFunctions: []string{"IsImagePresent", "PullImage"}, inspectErr: nil, pullerErr: nil, expectedErr: []error{nil, nil, nil}}, // missing image, error PullNever {containerImage: "missing_image", - policy: api.PullNever, + policy: v1.PullNever, calledFunctions: []string{"IsImagePresent"}, inspectErr: nil, pullerErr: nil, expectedErr: []error{ErrImageNeverPull, ErrImageNeverPull, ErrImageNeverPull}}, // missing image, unable to inspect {containerImage: "missing_image", - policy: api.PullIfNotPresent, + policy: v1.PullIfNotPresent, calledFunctions: []string{"IsImagePresent"}, inspectErr: errors.New("unknown inspectError"), pullerErr: nil, expectedErr: []error{ErrImageInspect, ErrImageInspect, ErrImageInspect}}, // missing image, unable to fetch {containerImage: "typo_image", - policy: api.PullIfNotPresent, + policy: v1.PullIfNotPresent, calledFunctions: []string{"IsImagePresent", "PullImage"}, inspectErr: nil, pullerErr: errors.New("404"), @@ -185,7 +185,7 @@ func TestSerializedPuller(t *testing.T) { } for i, c := range cases { - container := &api.Container{ + container := &v1.Container{ Name: "container_name", Image: c.containerImage, ImagePullPolicy: c.policy, diff --git a/pkg/kubelet/images/puller.go b/pkg/kubelet/images/puller.go index 7bcbd0bf9f5..5698e4c2664 100644 --- a/pkg/kubelet/images/puller.go +++ b/pkg/kubelet/images/puller.go @@ -19,13 +19,13 @@ package images import ( "time" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/util/wait" ) type imagePuller interface { - pullImage(kubecontainer.ImageSpec, []api.Secret, chan<- error) + pullImage(kubecontainer.ImageSpec, []v1.Secret, chan<- error) } var _, _ imagePuller = ¶llelImagePuller{}, &serialImagePuller{} @@ -38,7 +38,7 @@ func newParallelImagePuller(imageService kubecontainer.ImageService) imagePuller return ¶llelImagePuller{imageService} } -func (pip *parallelImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []api.Secret, errChan chan<- error) { +func (pip *parallelImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, errChan chan<- error) { go func() { errChan <- pip.imageService.PullImage(spec, pullSecrets) }() @@ -60,11 +60,11 @@ func newSerialImagePuller(imageService kubecontainer.ImageService) imagePuller { type imagePullRequest struct { spec kubecontainer.ImageSpec - pullSecrets []api.Secret + pullSecrets []v1.Secret errChan chan<- error } -func (sip *serialImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []api.Secret, errChan chan<- error) { +func (sip *serialImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, errChan chan<- error) { sip.pullRequests <- &imagePullRequest{ spec: spec, pullSecrets: pullSecrets, diff --git a/pkg/kubelet/images/types.go b/pkg/kubelet/images/types.go index 1f81d947e17..9870088f948 100644 --- a/pkg/kubelet/images/types.go +++ b/pkg/kubelet/images/types.go @@ -19,7 +19,7 @@ package images import ( "errors" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) var ( @@ -49,7 +49,7 @@ var ( // Implementations are expected to be thread safe. type ImageManager interface { // EnsureImageExists ensures that image specified in `container` exists. - EnsureImageExists(pod *api.Pod, container *api.Container, pullSecrets []api.Secret) (error, string) + EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret) (error, string) // TODO(ronl): consolidating image managing and deleting operation in this interface } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index a563f1dd572..67cb8f9c0ff 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -33,10 +33,11 @@ import ( cadvisorapi "github.com/google/cadvisor/info/v1" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/componentconfig" componentconfigv1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" "k8s.io/kubernetes/pkg/client/cache" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/fields" @@ -152,11 +153,11 @@ const ( // SyncHandler is an interface implemented by Kubelet, for testability type SyncHandler interface { - HandlePodAdditions(pods []*api.Pod) - HandlePodUpdates(pods []*api.Pod) - HandlePodRemoves(pods []*api.Pod) - HandlePodReconcile(pods []*api.Pod) - HandlePodSyncs(pods []*api.Pod) + HandlePodAdditions(pods []*v1.Pod) + HandlePodUpdates(pods []*v1.Pod) + HandlePodRemoves(pods []*v1.Pod) + HandlePodReconcile(pods []*v1.Pod) + HandlePodSyncs(pods []*v1.Pod) HandlePodCleanups() error } @@ -330,8 +331,8 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub MaxContainers: int(kubeCfg.MaxContainerCount), } - daemonEndpoints := &api.NodeDaemonEndpoints{ - KubeletEndpoint: api.DaemonEndpoint{Port: kubeCfg.Port}, + daemonEndpoints := &v1.NodeDaemonEndpoints{ + KubeletEndpoint: v1.DaemonEndpoint{Port: kubeCfg.Port}, } imageGCPolicy := images.ImageGCPolicy{ @@ -373,16 +374,16 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub serviceStore := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) if kubeClient != nil { - serviceLW := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "services", api.NamespaceAll, fields.Everything()) - cache.NewReflector(serviceLW, &api.Service{}, serviceStore, 0).Run() + serviceLW := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "services", v1.NamespaceAll, fields.Everything()) + cache.NewReflector(serviceLW, &v1.Service{}, serviceStore, 0).Run() } serviceLister := &cache.StoreToServiceLister{Indexer: serviceStore} nodeStore := cache.NewStore(cache.MetaNamespaceKeyFunc) if kubeClient != nil { fieldSelector := fields.Set{api.ObjectNameField: string(nodeName)}.AsSelector() - nodeLW := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "nodes", api.NamespaceAll, fieldSelector) - cache.NewReflector(nodeLW, &api.Node{}, nodeStore, 0).Run() + nodeLW := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "nodes", v1.NamespaceAll, fieldSelector) + cache.NewReflector(nodeLW, &v1.Node{}, nodeStore, 0).Run() } nodeLister := &cache.StoreToNodeLister{Store: nodeStore} nodeInfo := &predicates.CachedNodeInfo{StoreToNodeLister: nodeLister} @@ -390,7 +391,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub // TODO: get the real node object of ourself, // and use the real node name and UID. // TODO: what is namespace for node? - nodeRef := &api.ObjectReference{ + nodeRef := &v1.ObjectReference{ Kind: "Node", Name: string(nodeName), UID: types.UID(nodeName), @@ -764,14 +765,14 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub if err != nil { return nil, err } - safeWhitelist, err := sysctl.NewWhitelist(sysctl.SafeSysctlWhitelist(), api.SysctlsPodAnnotationKey) + safeWhitelist, err := sysctl.NewWhitelist(sysctl.SafeSysctlWhitelist(), v1.SysctlsPodAnnotationKey) if err != nil { return nil, err } // Safe, whitelisted sysctls can always be used as unsafe sysctls in the spec // Hence, we concatenate those two lists. safeAndUnsafeSysctls := append(sysctl.SafeSysctlWhitelist(), kubeCfg.AllowedUnsafeSysctls...) - unsafeWhitelist, err := sysctl.NewWhitelist(safeAndUnsafeSysctls, api.UnsafeSysctlsPodAnnotationKey) + unsafeWhitelist, err := sysctl.NewWhitelist(safeAndUnsafeSysctls, v1.UnsafeSysctlsPodAnnotationKey) if err != nil { return nil, err } @@ -803,11 +804,11 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub } type serviceLister interface { - List(labels.Selector) ([]*api.Service, error) + List(labels.Selector) ([]*v1.Service, error) } type nodeLister interface { - List() (machines api.NodeList, err error) + List() (machines v1.NodeList, err error) } // Kubelet is the main kubelet implementation. @@ -928,7 +929,7 @@ type Kubelet struct { autoDetectCloudProvider bool // Reference to this node. - nodeRef *api.ObjectReference + nodeRef *v1.ObjectReference // Container runtime. containerRuntime kubecontainer.Runtime @@ -1012,7 +1013,7 @@ type Kubelet struct { cpuCFSQuota bool // Information about the ports which are opened by daemons on Node running this Kubelet server. - daemonEndpoints *api.NodeDaemonEndpoints + daemonEndpoints *v1.NodeDaemonEndpoints // A queue used to trigger pod workers. workQueue queue.WorkQueue @@ -1049,7 +1050,7 @@ type Kubelet struct { babysitDaemons bool // handlers called during the tryUpdateNodeStatus cycle - setNodeStatusFuncs []func(*api.Node) error + setNodeStatusFuncs []func(*v1.Node) error // TODO: think about moving this to be centralized in PodWorkers in follow-on. // the list of handlers to call during pod admission. @@ -1125,7 +1126,7 @@ func (kl *Kubelet) StartGarbageCollection() { go wait.Until(func() { if err := kl.containerGC.GarbageCollect(kl.sourcesReady.AllReady()); err != nil { glog.Errorf("Container garbage collection failed: %v", err) - kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, events.ContainerGCFailed, err.Error()) + kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.ContainerGCFailed, err.Error()) loggedContainerGCFailure = true } else { var vLevel glog.Level = 4 @@ -1142,7 +1143,7 @@ func (kl *Kubelet) StartGarbageCollection() { go wait.Until(func() { if err := kl.imageManager.GarbageCollect(); err != nil { glog.Errorf("Image garbage collection failed: %v", err) - kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, events.ImageGCFailed, err.Error()) + kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.ImageGCFailed, err.Error()) loggedImageGCFailure = true } else { var vLevel glog.Level = 4 @@ -1223,7 +1224,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { glog.Warning("No api server defined - no node status update will be sent.") } if err := kl.initializeModules(); err != nil { - kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, events.KubeletSetupFailed, err.Error()) + kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error()) glog.Error(err) kl.runtimeState.setInitError(err) } @@ -1265,7 +1266,7 @@ func (kl *Kubelet) GetKubeClient() clientset.Interface { // GetClusterDNS returns a list of the DNS servers and a list of the DNS search // domains of the cluster. -func (kl *Kubelet) GetClusterDNS(pod *api.Pod) ([]string, []string, error) { +func (kl *Kubelet) GetClusterDNS(pod *v1.Pod) ([]string, []string, error) { var hostDNS, hostSearch []string // Get host DNS settings if kl.resolverConfig != "" { @@ -1280,13 +1281,13 @@ func (kl *Kubelet) GetClusterDNS(pod *api.Pod) ([]string, []string, error) { return nil, nil, err } } - useClusterFirstPolicy := pod.Spec.DNSPolicy == api.DNSClusterFirst + useClusterFirstPolicy := pod.Spec.DNSPolicy == v1.DNSClusterFirst if useClusterFirstPolicy && kl.clusterDNS == nil { // clusterDNS is not known. // pod with ClusterDNSFirst Policy cannot be created - kl.recorder.Eventf(pod, api.EventTypeWarning, "MissingClusterDNS", "kubelet does not have ClusterDNS IP configured and cannot create Pod using %q policy. Falling back to DNSDefault policy.", pod.Spec.DNSPolicy) + kl.recorder.Eventf(pod, v1.EventTypeWarning, "MissingClusterDNS", "kubelet does not have ClusterDNS IP configured and cannot create Pod using %q policy. Falling back to DNSDefault policy.", pod.Spec.DNSPolicy) log := fmt.Sprintf("kubelet does not have ClusterDNS IP configured and cannot create Pod using %q policy. pod: %q. Falling back to DNSDefault policy.", pod.Spec.DNSPolicy, format.Pod(pod)) - kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, "MissingClusterDNS", log) + kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, "MissingClusterDNS", log) // fallback to DNSDefault useClusterFirstPolicy = false @@ -1331,7 +1332,7 @@ func (kl *Kubelet) GetClusterDNS(pod *api.Pod) ([]string, []string, error) { // // The workflow is: // * If the pod is being created, record pod worker start latency -// * Call generateAPIPodStatus to prepare an api.PodStatus for the pod +// * Call generateAPIPodStatus to prepare an v1.PodStatus for the pod // * If the pod is being seen as running for the first time, record pod // start latency // * Update the status of the pod in the status manager @@ -1398,7 +1399,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { // Record the time it takes for the pod to become running. existingStatus, ok := kl.statusManager.GetPodStatus(pod.UID) - if !ok || existingStatus.Phase == api.PodPending && apiPodStatus.Phase == api.PodRunning && + if !ok || existingStatus.Phase == v1.PodPending && apiPodStatus.Phase == v1.PodRunning && !firstSeenTime.IsZero() { metrics.PodStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime)) } @@ -1426,7 +1427,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { kl.statusManager.SetPodStatus(pod, apiPodStatus) // Kill pod if it should not be running - if !runnable.Admit || pod.DeletionTimestamp != nil || apiPodStatus.Phase == api.PodFailed { + if !runnable.Admit || pod.DeletionTimestamp != nil || apiPodStatus.Phase == v1.PodFailed { var syncErr error if err := kl.killPod(pod, nil, podStatus, nil); err != nil { syncErr = fmt.Errorf("error killing pod: %v", err) @@ -1478,7 +1479,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { // expected to run only once and if the kubelet is restarted then // they are not expected to run again. // We don't create and apply updates to cgroup if its a run once pod and was killed above - if !(podKilled && pod.Spec.RestartPolicy == api.RestartPolicyNever) { + if !(podKilled && pod.Spec.RestartPolicy == v1.RestartPolicyNever) { if err := pcm.EnsureExists(pod); err != nil { return fmt.Errorf("failed to ensure that the pod: %v cgroups exist and are correctly applied: %v", pod.UID, err) } @@ -1517,7 +1518,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { // Wait for volumes to attach/mount if err := kl.volumeManager.WaitForAttachAndMount(pod); err != nil { - kl.recorder.Eventf(pod, api.EventTypeWarning, events.FailedMountVolume, "Unable to mount volumes for pod %q: %v", format.Pod(pod), err) + kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedMountVolume, "Unable to mount volumes for pod %q: %v", format.Pod(pod), err) glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", format.Pod(pod), err) return err } @@ -1548,13 +1549,13 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { } if egress != nil || ingress != nil { if podUsesHostNetwork(pod) { - kl.recorder.Event(pod, api.EventTypeWarning, events.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network") + kl.recorder.Event(pod, v1.EventTypeWarning, events.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network") } else if kl.shaper != nil { if len(apiPodStatus.PodIP) > 0 { err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", apiPodStatus.PodIP), egress, ingress) } } else { - kl.recorder.Event(pod, api.EventTypeWarning, events.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined") + kl.recorder.Event(pod, v1.EventTypeWarning, events.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined") } } @@ -1564,14 +1565,14 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { // Get pods which should be resynchronized. Currently, the following pod should be resynchronized: // * pod whose work is ready. // * internal modules that request sync of a pod. -func (kl *Kubelet) getPodsToSync() []*api.Pod { +func (kl *Kubelet) getPodsToSync() []*v1.Pod { allPods := kl.podManager.GetPods() podUIDs := kl.workQueue.GetWork() podUIDSet := sets.NewString() for _, podUID := range podUIDs { podUIDSet.Insert(string(podUID)) } - var podsToSync []*api.Pod + var podsToSync []*v1.Pod for _, pod := range allPods { if podUIDSet.Has(string(pod.UID)) { // The work of the pod is ready @@ -1594,7 +1595,7 @@ func (kl *Kubelet) getPodsToSync() []*api.Pod { // // deletePod returns an error if not all sources are ready or the pod is not // found in the runtime cache. -func (kl *Kubelet) deletePod(pod *api.Pod) error { +func (kl *Kubelet) deletePod(pod *v1.Pod) error { if pod == nil { return fmt.Errorf("deletePod does not allow nil pod") } @@ -1647,10 +1648,10 @@ func (kl *Kubelet) isOutOfDisk() bool { // rejectPod records an event about the pod with the given reason and message, // and updates the pod to the failed phase in the status manage. -func (kl *Kubelet) rejectPod(pod *api.Pod, reason, message string) { - kl.recorder.Eventf(pod, api.EventTypeWarning, reason, message) - kl.statusManager.SetPodStatus(pod, api.PodStatus{ - Phase: api.PodFailed, +func (kl *Kubelet) rejectPod(pod *v1.Pod, reason, message string) { + kl.recorder.Eventf(pod, v1.EventTypeWarning, reason, message) + kl.statusManager.SetPodStatus(pod, v1.PodStatus{ + Phase: v1.PodFailed, Reason: reason, Message: "Pod " + message}) } @@ -1660,7 +1661,7 @@ func (kl *Kubelet) rejectPod(pod *api.Pod, reason, message string) { // The function returns a boolean value indicating whether the pod // can be admitted, a brief single-word reason and a message explaining why // the pod cannot be admitted. -func (kl *Kubelet) canAdmitPod(pods []*api.Pod, pod *api.Pod) (bool, string, string) { +func (kl *Kubelet) canAdmitPod(pods []*v1.Pod, pod *v1.Pod) (bool, string, string) { // the kubelet will invoke each pod admit handler in sequence // if any handler rejects, the pod is rejected. // TODO: move out of disk check into a pod admitter @@ -1681,7 +1682,7 @@ func (kl *Kubelet) canAdmitPod(pods []*api.Pod, pod *api.Pod) (bool, string, str return true, "", "" } -func (kl *Kubelet) canRunPod(pod *api.Pod) lifecycle.PodAdmitResult { +func (kl *Kubelet) canRunPod(pod *v1.Pod) lifecycle.PodAdmitResult { attrs := &lifecycle.PodAdmitAttributes{Pod: pod} // Get "OtherPods". Rejected pods are failed, so only include admitted pods that are alive. attrs.OtherPods = kl.filterOutTerminatedPods(kl.podManager.GetPods()) @@ -1813,7 +1814,7 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle // PLEG event for a pod; sync it. if pod, ok := kl.podManager.GetPodByUID(e.ID); ok { glog.V(2).Infof("SyncLoop (PLEG): %q, event: %#v", format.Pod(pod), e) - handler.HandlePodSyncs([]*api.Pod{pod}) + handler.HandlePodSyncs([]*v1.Pod{pod}) } else { // If the pod no longer exists, ignore the event. glog.V(4).Infof("SyncLoop (PLEG): ignore irrelevant event: %#v", e) @@ -1846,7 +1847,7 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle break } glog.V(1).Infof("SyncLoop (container unhealthy): %q", format.Pod(pod)) - handler.HandlePodSyncs([]*api.Pod{pod}) + handler.HandlePodSyncs([]*v1.Pod{pod}) } case <-housekeepingCh: if !kl.sourcesReady.AllReady() { @@ -1866,7 +1867,7 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle // dispatchWork starts the asynchronous sync of the pod in a pod worker. // If the pod is terminated, dispatchWork -func (kl *Kubelet) dispatchWork(pod *api.Pod, syncType kubetypes.SyncPodType, mirrorPod *api.Pod, start time.Time) { +func (kl *Kubelet) dispatchWork(pod *v1.Pod, syncType kubetypes.SyncPodType, mirrorPod *v1.Pod, start time.Time) { if kl.podIsTerminated(pod) { if pod.DeletionTimestamp != nil { // If the pod is in a terminated state, there is no pod worker to @@ -1895,7 +1896,7 @@ func (kl *Kubelet) dispatchWork(pod *api.Pod, syncType kubetypes.SyncPodType, mi } // TODO: handle mirror pods in a separate component (issue #17251) -func (kl *Kubelet) handleMirrorPod(mirrorPod *api.Pod, start time.Time) { +func (kl *Kubelet) handleMirrorPod(mirrorPod *v1.Pod, start time.Time) { // Mirror pod ADD/UPDATE/DELETE operations are considered an UPDATE to the // corresponding static pod. Send update to the pod worker if the static // pod exists. @@ -1906,7 +1907,7 @@ func (kl *Kubelet) handleMirrorPod(mirrorPod *api.Pod, start time.Time) { // HandlePodAdditions is the callback in SyncHandler for pods being added from // a config source. -func (kl *Kubelet) HandlePodAdditions(pods []*api.Pod) { +func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) { start := kl.clock.Now() sort.Sort(sliceutils.PodsByCreationTime(pods)) for _, pod := range pods { @@ -1934,7 +1935,7 @@ func (kl *Kubelet) HandlePodAdditions(pods []*api.Pod) { // HandlePodUpdates is the callback in the SyncHandler interface for pods // being updated from a config source. -func (kl *Kubelet) HandlePodUpdates(pods []*api.Pod) { +func (kl *Kubelet) HandlePodUpdates(pods []*v1.Pod) { start := kl.clock.Now() for _, pod := range pods { kl.podManager.UpdatePod(pod) @@ -1951,7 +1952,7 @@ func (kl *Kubelet) HandlePodUpdates(pods []*api.Pod) { // HandlePodRemoves is the callback in the SyncHandler interface for pods // being removed from a config source. -func (kl *Kubelet) HandlePodRemoves(pods []*api.Pod) { +func (kl *Kubelet) HandlePodRemoves(pods []*v1.Pod) { start := kl.clock.Now() for _, pod := range pods { kl.podManager.DeletePod(pod) @@ -1970,7 +1971,7 @@ func (kl *Kubelet) HandlePodRemoves(pods []*api.Pod) { // HandlePodReconcile is the callback in the SyncHandler interface for pods // that should be reconciled. -func (kl *Kubelet) HandlePodReconcile(pods []*api.Pod) { +func (kl *Kubelet) HandlePodReconcile(pods []*v1.Pod) { for _, pod := range pods { // Update the pod in pod manager, status manager will do periodically reconcile according // to the pod manager. @@ -1987,7 +1988,7 @@ func (kl *Kubelet) HandlePodReconcile(pods []*api.Pod) { // HandlePodSyncs is the callback in the syncHandler interface for pods // that should be dispatched to pod workers for sync. -func (kl *Kubelet) HandlePodSyncs(pods []*api.Pod) { +func (kl *Kubelet) HandlePodSyncs(pods []*v1.Pod) { start := kl.clock.Now() for _, pod := range pods { mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod) @@ -2054,7 +2055,7 @@ func (kl *Kubelet) updateRuntimeUp() { // updateCloudProviderFromMachineInfo updates the node's provider ID field // from the given cadvisor machine info. -func (kl *Kubelet) updateCloudProviderFromMachineInfo(node *api.Node, info *cadvisorapi.MachineInfo) { +func (kl *Kubelet) updateCloudProviderFromMachineInfo(node *v1.Node, info *cadvisorapi.MachineInfo) { if info.CloudProvider != cadvisorapi.UnknownProvider && info.CloudProvider != cadvisorapi.Baremetal { // The cloud providers from pkg/cloudprovider/providers/* that update ProviderID @@ -2074,7 +2075,7 @@ func (kl *Kubelet) GetConfiguration() componentconfig.KubeletConfiguration { // BirthCry sends an event that the kubelet has started up. func (kl *Kubelet) BirthCry() { // Make an event that kubelet restarted. - kl.recorder.Eventf(kl.nodeRef, api.EventTypeNormal, events.StartingKubelet, "Starting kubelet.") + kl.recorder.Eventf(kl.nodeRef, v1.EventTypeNormal, events.StartingKubelet, "Starting kubelet.") } // StreamingConnectionIdleTimeout returns the timeout for streaming connections to the HTTP server. @@ -2117,12 +2118,12 @@ func isSyncPodWorthy(event *pleg.PodLifecycleEvent) bool { // parseResourceList parses the given configuration map into an API // ResourceList or returns an error. -func parseResourceList(m utilconfig.ConfigurationMap) (api.ResourceList, error) { - rl := make(api.ResourceList) +func parseResourceList(m utilconfig.ConfigurationMap) (v1.ResourceList, error) { + rl := make(v1.ResourceList) for k, v := range m { - switch api.ResourceName(k) { + switch v1.ResourceName(k) { // Only CPU and memory resources are supported. - case api.ResourceCPU, api.ResourceMemory: + case v1.ResourceCPU, v1.ResourceMemory: q, err := resource.ParseQuantity(v) if err != nil { return nil, err @@ -2130,7 +2131,7 @@ func parseResourceList(m utilconfig.ConfigurationMap) (api.ResourceList, error) if q.Sign() == -1 { return nil, fmt.Errorf("resource quantity for %q cannot be negative: %v", k, v) } - rl[api.ResourceName(k)] = q + rl[v1.ResourceName(k)] = q default: return nil, fmt.Errorf("cannot reserve %q resource", k) } diff --git a/pkg/kubelet/kubelet_getters.go b/pkg/kubelet/kubelet_getters.go index 0eb55d826ca..df0a44fca93 100644 --- a/pkg/kubelet/kubelet_getters.go +++ b/pkg/kubelet/kubelet_getters.go @@ -24,7 +24,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/cmd/kubelet/app/options" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/types" @@ -142,21 +142,21 @@ func (kl *Kubelet) getPodContainerDir(podUID types.UID, ctrName string) string { // GetPods returns all pods bound to the kubelet and their spec, and the mirror // pods. -func (kl *Kubelet) GetPods() []*api.Pod { +func (kl *Kubelet) GetPods() []*v1.Pod { return kl.podManager.GetPods() } // GetRunningPods returns all pods running on kubelet from looking at the // container runtime cache. This function converts kubecontainer.Pod to -// api.Pod, so only the fields that exist in both kubecontainer.Pod and -// api.Pod are considered meaningful. -func (kl *Kubelet) GetRunningPods() ([]*api.Pod, error) { +// v1.Pod, so only the fields that exist in both kubecontainer.Pod and +// v1.Pod are considered meaningful. +func (kl *Kubelet) GetRunningPods() ([]*v1.Pod, error) { pods, err := kl.runtimeCache.GetPods() if err != nil { return nil, err } - apiPods := make([]*api.Pod, 0, len(pods)) + apiPods := make([]*v1.Pod, 0, len(pods)) for _, pod := range pods { apiPods = append(apiPods, pod.ToAPIPod()) } @@ -165,13 +165,13 @@ func (kl *Kubelet) GetRunningPods() ([]*api.Pod, error) { // GetPodByFullName gets the pod with the given 'full' name, which // incorporates the namespace as well as whether the pod was found. -func (kl *Kubelet) GetPodByFullName(podFullName string) (*api.Pod, bool) { +func (kl *Kubelet) GetPodByFullName(podFullName string) (*v1.Pod, bool) { return kl.podManager.GetPodByFullName(podFullName) } // GetPodByName provides the first pod that matches namespace and name, as well // as whether the pod was found. -func (kl *Kubelet) GetPodByName(namespace, name string) (*api.Pod, bool) { +func (kl *Kubelet) GetPodByName(namespace, name string) (*v1.Pod, bool) { return kl.podManager.GetPodByName(namespace, name) } @@ -187,19 +187,19 @@ func (kl *Kubelet) GetRuntime() kubecontainer.Runtime { } // GetNode returns the node info for the configured node name of this Kubelet. -func (kl *Kubelet) GetNode() (*api.Node, error) { +func (kl *Kubelet) GetNode() (*v1.Node, error) { if kl.standaloneMode { return kl.initialNode() } return kl.nodeInfo.GetNodeInfo(string(kl.nodeName)) } -// getNodeAnyWay() must return a *api.Node which is required by RunGeneralPredicates(). -// The *api.Node is obtained as follows: +// getNodeAnyWay() must return a *v1.Node which is required by RunGeneralPredicates(). +// The *v1.Node is obtained as follows: // Return kubelet's nodeInfo for this node, except on error or if in standalone mode, // in which case return a manufactured nodeInfo representing a node with no pods, // zero capacity, and the default labels. -func (kl *Kubelet) getNodeAnyWay() (*api.Node, error) { +func (kl *Kubelet) getNodeAnyWay() (*v1.Node, error) { if !kl.standaloneMode { if n, err := kl.nodeInfo.GetNodeInfo(string(kl.nodeName)); err == nil { return n, nil @@ -235,7 +235,7 @@ func (kl *Kubelet) getHostIPAnyWay() (net.IP, error) { // GetExtraSupplementalGroupsForPod returns a list of the extra // supplemental groups for the Pod. These extra supplemental groups come // from annotations on persistent volumes that the pod depends on. -func (kl *Kubelet) GetExtraSupplementalGroupsForPod(pod *api.Pod) []int64 { +func (kl *Kubelet) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 { return kl.volumeManager.GetExtraSupplementalGroupsForPod(pod) } diff --git a/pkg/kubelet/kubelet_network.go b/pkg/kubelet/kubelet_network.go index 8267560841b..db1538969d8 100644 --- a/pkg/kubelet/kubelet_network.go +++ b/pkg/kubelet/kubelet_network.go @@ -23,7 +23,7 @@ import ( "strings" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/kubelet/network" "k8s.io/kubernetes/pkg/util/bandwidth" @@ -146,7 +146,7 @@ func parseResolvConf(reader io.Reader, dnsScrubber dnsScrubber) (nameservers []s // cleanupBandwidthLimits updates the status of bandwidth-limited containers // and ensures that only the appropriate CIDRs are active on the node. -func (kl *Kubelet) cleanupBandwidthLimits(allPods []*api.Pod) error { +func (kl *Kubelet) cleanupBandwidthLimits(allPods []*v1.Pod) error { if kl.shaper == nil { return nil } @@ -174,7 +174,7 @@ func (kl *Kubelet) cleanupBandwidthLimits(allPods []*api.Pod) error { } status = kl.generateAPIPodStatus(pod, s) } - if status.Phase == api.PodRunning { + if status.Phase == v1.PodRunning { possibleCIDRs.Insert(fmt.Sprintf("%s/32", status.PodIP)) } } diff --git a/pkg/kubelet/kubelet_network_test.go b/pkg/kubelet/kubelet_network_test.go index dbb3fa1cba8..6c36d2d7dd3 100644 --- a/pkg/kubelet/kubelet_network_test.go +++ b/pkg/kubelet/kubelet_network_test.go @@ -22,7 +22,7 @@ import ( "strings" "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/util/bandwidth" ) @@ -137,7 +137,7 @@ func TestParseResolvConf(t *testing.T) { } func TestCleanupBandwidthLimits(t *testing.T) { - testPod := func(name, ingress string) *api.Pod { + testPod := func(name, ingress string) *v1.Pod { pod := podWithUidNameNs("", name, "") if len(ingress) != 0 { @@ -150,18 +150,18 @@ func TestCleanupBandwidthLimits(t *testing.T) { // TODO(random-liu): We removed the test case for pod status not cached here. We should add a higher // layer status getter function and test that function instead. tests := []struct { - status *api.PodStatus - pods []*api.Pod + status *v1.PodStatus + pods []*v1.Pod inputCIDRs []string expectResetCIDRs []string name string }{ { - status: &api.PodStatus{ + status: &v1.PodStatus{ PodIP: "1.2.3.4", - Phase: api.PodRunning, + Phase: v1.PodRunning, }, - pods: []*api.Pod{ + pods: []*v1.Pod{ testPod("foo", "10M"), testPod("bar", ""), }, @@ -170,11 +170,11 @@ func TestCleanupBandwidthLimits(t *testing.T) { name: "pod running", }, { - status: &api.PodStatus{ + status: &v1.PodStatus{ PodIP: "1.2.3.4", - Phase: api.PodFailed, + Phase: v1.PodFailed, }, - pods: []*api.Pod{ + pods: []*v1.Pod{ testPod("foo", "10M"), testPod("bar", ""), }, @@ -183,11 +183,11 @@ func TestCleanupBandwidthLimits(t *testing.T) { name: "pod not running", }, { - status: &api.PodStatus{ + status: &v1.PodStatus{ PodIP: "1.2.3.4", - Phase: api.PodFailed, + Phase: v1.PodFailed, }, - pods: []*api.Pod{ + pods: []*v1.Pod{ testPod("foo", ""), testPod("bar", ""), }, diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index ec8f52f6ddf..0b353d005c0 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -26,10 +26,10 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" apierrors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/kubelet/cadvisor" @@ -67,7 +67,7 @@ func (kl *Kubelet) registerWithApiServer() { node, err := kl.initialNode() if err != nil { - glog.Errorf("Unable to construct api.Node object for kubelet: %v", err) + glog.Errorf("Unable to construct v1.Node object for kubelet: %v", err) continue } @@ -88,7 +88,7 @@ func (kl *Kubelet) registerWithApiServer() { // persistent volumes for the node. If a node of the same name exists but has // a different externalID value, it attempts to delete that node so that a // later attempt can recreate it. -func (kl *Kubelet) tryRegisterWithApiServer(node *api.Node) bool { +func (kl *Kubelet) tryRegisterWithApiServer(node *v1.Node) bool { _, err := kl.kubeClient.Core().Nodes().Create(node) if err == nil { return true @@ -142,7 +142,7 @@ func (kl *Kubelet) tryRegisterWithApiServer(node *api.Node) bool { // reconcileCMADAnnotationWithExistingNode reconciles the controller-managed // attach-detach annotation on a new node and the existing node, returning // whether the existing node must be updated. -func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *api.Node) bool { +func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v1.Node) bool { var ( existingCMAAnnotation = existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation] newCMAAnnotation, newSet = node.Annotations[volumehelper.ControllerManagedAttachAnnotation] @@ -169,11 +169,11 @@ func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *a return true } -// initialNode constructs the initial api.Node for this Kubelet, incorporating node +// initialNode constructs the initial v1.Node for this Kubelet, incorporating node // labels, information from the cloud provider, and Kubelet configuration. -func (kl *Kubelet) initialNode() (*api.Node, error) { - node := &api.Node{ - ObjectMeta: api.ObjectMeta{ +func (kl *Kubelet) initialNode() (*v1.Node, error) { + node := &v1.Node{ + ObjectMeta: v1.ObjectMeta{ Name: string(kl.nodeName), Labels: map[string]string{ unversioned.LabelHostname: kl.hostname, @@ -181,15 +181,15 @@ func (kl *Kubelet) initialNode() (*api.Node, error) { unversioned.LabelArch: goRuntime.GOARCH, }, }, - Spec: api.NodeSpec{ + Spec: v1.NodeSpec{ Unschedulable: !kl.registerSchedulable, }, } // Initially, set NodeNetworkUnavailable to true. if kl.providerRequiresNetworkingConfiguration() { - node.Status.Conditions = append(node.Status.Conditions, api.NodeCondition{ - Type: api.NodeNetworkUnavailable, - Status: api.ConditionTrue, + node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{ + Type: v1.NodeNetworkUnavailable, + Status: v1.ConditionTrue, Reason: "NoRouteCreated", Message: "Node created without a route", LastTransitionTime: unversioned.NewTime(kl.clock.Now()), @@ -320,8 +320,8 @@ func (kl *Kubelet) tryUpdateNodeStatus() error { // field selector for the name of the node (field selectors with // specified name are handled efficiently by apiserver). Once // apiserver supports GET from cache, change it here. - opts := api.ListOptions{ - FieldSelector: fields.Set{"metadata.name": string(kl.nodeName)}.AsSelector(), + opts := v1.ListOptions{ + FieldSelector: fields.Set{"metadata.name": string(kl.nodeName)}.AsSelector().String(), ResourceVersion: "0", } nodes, err := kl.kubeClient.Core().Nodes().List(opts) @@ -359,7 +359,7 @@ func (kl *Kubelet) recordNodeStatusEvent(eventtype, event string) { } // Set IP and hostname addresses for the node. -func (kl *Kubelet) setNodeAddress(node *api.Node) error { +func (kl *Kubelet) setNodeAddress(node *v1.Node) error { if kl.nodeIP != nil { if err := kl.validateNodeIP(); err != nil { return fmt.Errorf("failed to validate nodeIP: %v", err) @@ -383,9 +383,9 @@ func (kl *Kubelet) setNodeAddress(node *api.Node) error { if kl.nodeIP != nil { for _, nodeAddress := range nodeAddresses { if nodeAddress.Address == kl.nodeIP.String() { - node.Status.Addresses = []api.NodeAddress{ + node.Status.Addresses = []v1.NodeAddress{ {Type: nodeAddress.Type, Address: nodeAddress.Address}, - {Type: api.NodeHostName, Address: kl.GetHostname()}, + {Type: v1.NodeHostName, Address: kl.GetHostname()}, } return nil } @@ -395,15 +395,15 @@ func (kl *Kubelet) setNodeAddress(node *api.Node) error { // Only add a NodeHostName address if the cloudprovider did not specify one // (we assume the cloudprovider knows best) - var addressNodeHostName *api.NodeAddress + var addressNodeHostName *v1.NodeAddress for i := range nodeAddresses { - if nodeAddresses[i].Type == api.NodeHostName { + if nodeAddresses[i].Type == v1.NodeHostName { addressNodeHostName = &nodeAddresses[i] break } } if addressNodeHostName == nil { - hostnameAddress := api.NodeAddress{Type: api.NodeHostName, Address: kl.GetHostname()} + hostnameAddress := v1.NodeAddress{Type: v1.NodeHostName, Address: kl.GetHostname()} nodeAddresses = append(nodeAddresses, hostnameAddress) } else { glog.V(2).Infof("Using Node Hostname from cloudprovider: %q", addressNodeHostName.Address) @@ -440,21 +440,21 @@ func (kl *Kubelet) setNodeAddress(node *api.Node) error { // We tried everything we could, but the IP address wasn't fetchable; error out return fmt.Errorf("can't get ip address of node %s. error: %v", node.Name, err) } else { - node.Status.Addresses = []api.NodeAddress{ - {Type: api.NodeLegacyHostIP, Address: ipAddr.String()}, - {Type: api.NodeInternalIP, Address: ipAddr.String()}, - {Type: api.NodeHostName, Address: kl.GetHostname()}, + node.Status.Addresses = []v1.NodeAddress{ + {Type: v1.NodeLegacyHostIP, Address: ipAddr.String()}, + {Type: v1.NodeInternalIP, Address: ipAddr.String()}, + {Type: v1.NodeHostName, Address: kl.GetHostname()}, } } } return nil } -func (kl *Kubelet) setNodeStatusMachineInfo(node *api.Node) { +func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) { // Note: avoid blindly overwriting the capacity in case opaque // resources are being advertised. if node.Status.Capacity == nil { - node.Status.Capacity = api.ResourceList{} + node.Status.Capacity = v1.ResourceList{} } // TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start @@ -463,10 +463,10 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *api.Node) { if err != nil { // TODO(roberthbailey): This is required for test-cmd.sh to pass. // See if the test should be updated instead. - node.Status.Capacity[api.ResourceCPU] = *resource.NewMilliQuantity(0, resource.DecimalSI) - node.Status.Capacity[api.ResourceMemory] = resource.MustParse("0Gi") - node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity(int64(kl.maxPods), resource.DecimalSI) - node.Status.Capacity[api.ResourceNvidiaGPU] = *resource.NewQuantity(int64(kl.nvidiaGPUs), resource.DecimalSI) + node.Status.Capacity[v1.ResourceCPU] = *resource.NewMilliQuantity(0, resource.DecimalSI) + node.Status.Capacity[v1.ResourceMemory] = resource.MustParse("0Gi") + node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(int64(kl.maxPods), resource.DecimalSI) + node.Status.Capacity[v1.ResourceNvidiaGPU] = *resource.NewQuantity(int64(kl.nvidiaGPUs), resource.DecimalSI) glog.Errorf("Error getting machine info: %v", err) } else { @@ -478,26 +478,26 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *api.Node) { } if kl.podsPerCore > 0 { - node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity( + node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity( int64(math.Min(float64(info.NumCores*kl.podsPerCore), float64(kl.maxPods))), resource.DecimalSI) } else { - node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity( + node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity( int64(kl.maxPods), resource.DecimalSI) } - node.Status.Capacity[api.ResourceNvidiaGPU] = *resource.NewQuantity( + node.Status.Capacity[v1.ResourceNvidiaGPU] = *resource.NewQuantity( int64(kl.nvidiaGPUs), resource.DecimalSI) if node.Status.NodeInfo.BootID != "" && node.Status.NodeInfo.BootID != info.BootID { // TODO: This requires a transaction, either both node status is updated // and event is recorded or neither should happen, see issue #6055. - kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, events.NodeRebooted, + kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.NodeRebooted, "Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID) } node.Status.NodeInfo.BootID = info.BootID } // Set Allocatable. - node.Status.Allocatable = make(api.ResourceList) + node.Status.Allocatable = make(v1.ResourceList) for k, v := range node.Status.Capacity { value := *(v.Copy()) if kl.reservation.System != nil { @@ -515,7 +515,7 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *api.Node) { } // Set versioninfo for the node. -func (kl *Kubelet) setNodeStatusVersionInfo(node *api.Node) { +func (kl *Kubelet) setNodeStatusVersionInfo(node *v1.Node) { verinfo, err := kl.cadvisor.VersionInfo() if err != nil { glog.Errorf("Error getting version info: %v", err) @@ -537,14 +537,14 @@ func (kl *Kubelet) setNodeStatusVersionInfo(node *api.Node) { } // Set daemonEndpoints for the node. -func (kl *Kubelet) setNodeStatusDaemonEndpoints(node *api.Node) { +func (kl *Kubelet) setNodeStatusDaemonEndpoints(node *v1.Node) { node.Status.DaemonEndpoints = *kl.daemonEndpoints } // Set images list for the node -func (kl *Kubelet) setNodeStatusImages(node *api.Node) { +func (kl *Kubelet) setNodeStatusImages(node *v1.Node) { // Update image list of this node - var imagesOnNode []api.ContainerImage + var imagesOnNode []v1.ContainerImage containerImages, err := kl.imageManager.GetImageList() if err != nil { glog.Errorf("Error getting image list: %v", err) @@ -561,7 +561,7 @@ func (kl *Kubelet) setNodeStatusImages(node *api.Node) { if len(names) > maxNamesPerImageInNodeStatus { names = names[0:maxNamesPerImageInNodeStatus] } - imagesOnNode = append(imagesOnNode, api.ContainerImage{ + imagesOnNode = append(imagesOnNode, v1.ContainerImage{ Names: names, SizeBytes: image.Size, }) @@ -571,13 +571,13 @@ func (kl *Kubelet) setNodeStatusImages(node *api.Node) { } // Set the GOOS and GOARCH for this node -func (kl *Kubelet) setNodeStatusGoRuntime(node *api.Node) { +func (kl *Kubelet) setNodeStatusGoRuntime(node *v1.Node) { node.Status.NodeInfo.OperatingSystem = goRuntime.GOOS node.Status.NodeInfo.Architecture = goRuntime.GOARCH } // Set status for the node. -func (kl *Kubelet) setNodeStatusInfo(node *api.Node) { +func (kl *Kubelet) setNodeStatusInfo(node *v1.Node) { kl.setNodeStatusMachineInfo(node) kl.setNodeStatusVersionInfo(node) kl.setNodeStatusDaemonEndpoints(node) @@ -586,25 +586,25 @@ func (kl *Kubelet) setNodeStatusInfo(node *api.Node) { } // Set Ready condition for the node. -func (kl *Kubelet) setNodeReadyCondition(node *api.Node) { +func (kl *Kubelet) setNodeReadyCondition(node *v1.Node) { // NOTE(aaronlevy): NodeReady condition needs to be the last in the list of node conditions. // This is due to an issue with version skewed kubelet and master components. // ref: https://github.com/kubernetes/kubernetes/issues/16961 currentTime := unversioned.NewTime(kl.clock.Now()) - var newNodeReadyCondition api.NodeCondition + var newNodeReadyCondition v1.NodeCondition rs := append(kl.runtimeState.runtimeErrors(), kl.runtimeState.networkErrors()...) if len(rs) == 0 { - newNodeReadyCondition = api.NodeCondition{ - Type: api.NodeReady, - Status: api.ConditionTrue, + newNodeReadyCondition = v1.NodeCondition{ + Type: v1.NodeReady, + Status: v1.ConditionTrue, Reason: "KubeletReady", Message: "kubelet is posting ready status", LastHeartbeatTime: currentTime, } } else { - newNodeReadyCondition = api.NodeCondition{ - Type: api.NodeReady, - Status: api.ConditionFalse, + newNodeReadyCondition = v1.NodeCondition{ + Type: v1.NodeReady, + Status: v1.ConditionFalse, Reason: "KubeletNotReady", Message: strings.Join(rs, ","), LastHeartbeatTime: currentTime, @@ -613,7 +613,7 @@ func (kl *Kubelet) setNodeReadyCondition(node *api.Node) { // Append AppArmor status if it's enabled. // TODO(timstclair): This is a temporary message until node feature reporting is added. - if newNodeReadyCondition.Status == api.ConditionTrue && + if newNodeReadyCondition.Status == v1.ConditionTrue && kl.appArmorValidator != nil && kl.appArmorValidator.ValidateHost() == nil { newNodeReadyCondition.Message = fmt.Sprintf("%s. AppArmor enabled", newNodeReadyCondition.Message) } @@ -627,7 +627,7 @@ func (kl *Kubelet) setNodeReadyCondition(node *api.Node) { readyConditionUpdated := false needToRecordEvent := false for i := range node.Status.Conditions { - if node.Status.Conditions[i].Type == api.NodeReady { + if node.Status.Conditions[i].Type == v1.NodeReady { if node.Status.Conditions[i].Status == newNodeReadyCondition.Status { newNodeReadyCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime } else { @@ -644,23 +644,23 @@ func (kl *Kubelet) setNodeReadyCondition(node *api.Node) { node.Status.Conditions = append(node.Status.Conditions, newNodeReadyCondition) } if needToRecordEvent { - if newNodeReadyCondition.Status == api.ConditionTrue { - kl.recordNodeStatusEvent(api.EventTypeNormal, events.NodeReady) + if newNodeReadyCondition.Status == v1.ConditionTrue { + kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeReady) } else { - kl.recordNodeStatusEvent(api.EventTypeNormal, events.NodeNotReady) + kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeNotReady) } } } // setNodeMemoryPressureCondition for the node. // TODO: this needs to move somewhere centralized... -func (kl *Kubelet) setNodeMemoryPressureCondition(node *api.Node) { +func (kl *Kubelet) setNodeMemoryPressureCondition(node *v1.Node) { currentTime := unversioned.NewTime(kl.clock.Now()) - var condition *api.NodeCondition + var condition *v1.NodeCondition // Check if NodeMemoryPressure condition already exists and if it does, just pick it up for update. for i := range node.Status.Conditions { - if node.Status.Conditions[i].Type == api.NodeMemoryPressure { + if node.Status.Conditions[i].Type == v1.NodeMemoryPressure { condition = &node.Status.Conditions[i] } } @@ -668,9 +668,9 @@ func (kl *Kubelet) setNodeMemoryPressureCondition(node *api.Node) { newCondition := false // If the NodeMemoryPressure condition doesn't exist, create one if condition == nil { - condition = &api.NodeCondition{ - Type: api.NodeMemoryPressure, - Status: api.ConditionUnknown, + condition = &v1.NodeCondition{ + Type: v1.NodeMemoryPressure, + Status: v1.ConditionUnknown, } // cannot be appended to node.Status.Conditions here because it gets // copied to the slice. So if we append to the slice here none of the @@ -683,25 +683,25 @@ func (kl *Kubelet) setNodeMemoryPressureCondition(node *api.Node) { // Note: The conditions below take care of the case when a new NodeMemoryPressure condition is // created and as well as the case when the condition already exists. When a new condition - // is created its status is set to api.ConditionUnknown which matches either - // condition.Status != api.ConditionTrue or - // condition.Status != api.ConditionFalse in the conditions below depending on whether + // is created its status is set to v1.ConditionUnknown which matches either + // condition.Status != v1.ConditionTrue or + // condition.Status != v1.ConditionFalse in the conditions below depending on whether // the kubelet is under memory pressure or not. if kl.evictionManager.IsUnderMemoryPressure() { - if condition.Status != api.ConditionTrue { - condition.Status = api.ConditionTrue + if condition.Status != v1.ConditionTrue { + condition.Status = v1.ConditionTrue condition.Reason = "KubeletHasInsufficientMemory" condition.Message = "kubelet has insufficient memory available" condition.LastTransitionTime = currentTime - kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasInsufficientMemory") + kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasInsufficientMemory") } } else { - if condition.Status != api.ConditionFalse { - condition.Status = api.ConditionFalse + if condition.Status != v1.ConditionFalse { + condition.Status = v1.ConditionFalse condition.Reason = "KubeletHasSufficientMemory" condition.Message = "kubelet has sufficient memory available" condition.LastTransitionTime = currentTime - kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasSufficientMemory") + kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasSufficientMemory") } } @@ -712,13 +712,13 @@ func (kl *Kubelet) setNodeMemoryPressureCondition(node *api.Node) { // setNodeDiskPressureCondition for the node. // TODO: this needs to move somewhere centralized... -func (kl *Kubelet) setNodeDiskPressureCondition(node *api.Node) { +func (kl *Kubelet) setNodeDiskPressureCondition(node *v1.Node) { currentTime := unversioned.NewTime(kl.clock.Now()) - var condition *api.NodeCondition + var condition *v1.NodeCondition // Check if NodeDiskPressure condition already exists and if it does, just pick it up for update. for i := range node.Status.Conditions { - if node.Status.Conditions[i].Type == api.NodeDiskPressure { + if node.Status.Conditions[i].Type == v1.NodeDiskPressure { condition = &node.Status.Conditions[i] } } @@ -726,9 +726,9 @@ func (kl *Kubelet) setNodeDiskPressureCondition(node *api.Node) { newCondition := false // If the NodeDiskPressure condition doesn't exist, create one if condition == nil { - condition = &api.NodeCondition{ - Type: api.NodeDiskPressure, - Status: api.ConditionUnknown, + condition = &v1.NodeCondition{ + Type: v1.NodeDiskPressure, + Status: v1.ConditionUnknown, } // cannot be appended to node.Status.Conditions here because it gets // copied to the slice. So if we append to the slice here none of the @@ -741,25 +741,25 @@ func (kl *Kubelet) setNodeDiskPressureCondition(node *api.Node) { // Note: The conditions below take care of the case when a new NodeDiskressure condition is // created and as well as the case when the condition already exists. When a new condition - // is created its status is set to api.ConditionUnknown which matches either - // condition.Status != api.ConditionTrue or - // condition.Status != api.ConditionFalse in the conditions below depending on whether + // is created its status is set to v1.ConditionUnknown which matches either + // condition.Status != v1.ConditionTrue or + // condition.Status != v1.ConditionFalse in the conditions below depending on whether // the kubelet is under disk pressure or not. if kl.evictionManager.IsUnderDiskPressure() { - if condition.Status != api.ConditionTrue { - condition.Status = api.ConditionTrue + if condition.Status != v1.ConditionTrue { + condition.Status = v1.ConditionTrue condition.Reason = "KubeletHasDiskPressure" condition.Message = "kubelet has disk pressure" condition.LastTransitionTime = currentTime - kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasDiskPressure") + kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasDiskPressure") } } else { - if condition.Status != api.ConditionFalse { - condition.Status = api.ConditionFalse + if condition.Status != v1.ConditionFalse { + condition.Status = v1.ConditionFalse condition.Reason = "KubeletHasNoDiskPressure" condition.Message = "kubelet has no disk pressure" condition.LastTransitionTime = currentTime - kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasNoDiskPressure") + kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasNoDiskPressure") } } @@ -769,13 +769,13 @@ func (kl *Kubelet) setNodeDiskPressureCondition(node *api.Node) { } // Set OODcondition for the node. -func (kl *Kubelet) setNodeOODCondition(node *api.Node) { +func (kl *Kubelet) setNodeOODCondition(node *v1.Node) { currentTime := unversioned.NewTime(kl.clock.Now()) - var nodeOODCondition *api.NodeCondition + var nodeOODCondition *v1.NodeCondition // Check if NodeOutOfDisk condition already exists and if it does, just pick it up for update. for i := range node.Status.Conditions { - if node.Status.Conditions[i].Type == api.NodeOutOfDisk { + if node.Status.Conditions[i].Type == v1.NodeOutOfDisk { nodeOODCondition = &node.Status.Conditions[i] } } @@ -783,9 +783,9 @@ func (kl *Kubelet) setNodeOODCondition(node *api.Node) { newOODCondition := false // If the NodeOutOfDisk condition doesn't exist, create one. if nodeOODCondition == nil { - nodeOODCondition = &api.NodeCondition{ - Type: api.NodeOutOfDisk, - Status: api.ConditionUnknown, + nodeOODCondition = &v1.NodeCondition{ + Type: v1.NodeOutOfDisk, + Status: v1.ConditionUnknown, } // nodeOODCondition cannot be appended to node.Status.Conditions here because it gets // copied to the slice. So if we append nodeOODCondition to the slice here none of the @@ -798,29 +798,29 @@ func (kl *Kubelet) setNodeOODCondition(node *api.Node) { // Note: The conditions below take care of the case when a new NodeOutOfDisk condition is // created and as well as the case when the condition already exists. When a new condition - // is created its status is set to api.ConditionUnknown which matches either - // nodeOODCondition.Status != api.ConditionTrue or - // nodeOODCondition.Status != api.ConditionFalse in the conditions below depending on whether + // is created its status is set to v1.ConditionUnknown which matches either + // nodeOODCondition.Status != v1.ConditionTrue or + // nodeOODCondition.Status != v1.ConditionFalse in the conditions below depending on whether // the kubelet is out of disk or not. if kl.isOutOfDisk() { - if nodeOODCondition.Status != api.ConditionTrue { - nodeOODCondition.Status = api.ConditionTrue + if nodeOODCondition.Status != v1.ConditionTrue { + nodeOODCondition.Status = v1.ConditionTrue nodeOODCondition.Reason = "KubeletOutOfDisk" nodeOODCondition.Message = "out of disk space" nodeOODCondition.LastTransitionTime = currentTime - kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeOutOfDisk") + kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeOutOfDisk") } } else { - if nodeOODCondition.Status != api.ConditionFalse { + if nodeOODCondition.Status != v1.ConditionFalse { // Update the out of disk condition when the condition status is unknown even if we // are within the outOfDiskTransitionFrequency duration. We do this to set the // condition status correctly at kubelet startup. - if nodeOODCondition.Status == api.ConditionUnknown || kl.clock.Since(nodeOODCondition.LastTransitionTime.Time) >= kl.outOfDiskTransitionFrequency { - nodeOODCondition.Status = api.ConditionFalse + if nodeOODCondition.Status == v1.ConditionUnknown || kl.clock.Since(nodeOODCondition.LastTransitionTime.Time) >= kl.outOfDiskTransitionFrequency { + nodeOODCondition.Status = v1.ConditionFalse nodeOODCondition.Reason = "KubeletHasSufficientDisk" nodeOODCondition.Message = "kubelet has sufficient disk space available" nodeOODCondition.LastTransitionTime = currentTime - kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasSufficientDisk") + kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasSufficientDisk") } else { glog.Infof("Node condition status for OutOfDisk is false, but last transition time is less than %s", kl.outOfDiskTransitionFrequency) } @@ -837,12 +837,12 @@ func (kl *Kubelet) setNodeOODCondition(node *api.Node) { var oldNodeUnschedulable bool // record if node schedulable change. -func (kl *Kubelet) recordNodeSchedulableEvent(node *api.Node) { +func (kl *Kubelet) recordNodeSchedulableEvent(node *v1.Node) { if oldNodeUnschedulable != node.Spec.Unschedulable { if node.Spec.Unschedulable { - kl.recordNodeStatusEvent(api.EventTypeNormal, events.NodeNotSchedulable) + kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeNotSchedulable) } else { - kl.recordNodeStatusEvent(api.EventTypeNormal, events.NodeSchedulable) + kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeSchedulable) } oldNodeUnschedulable = node.Spec.Unschedulable } @@ -850,7 +850,7 @@ func (kl *Kubelet) recordNodeSchedulableEvent(node *api.Node) { // Update VolumesInUse field in Node Status only after states are synced up at least once // in volume reconciler. -func (kl *Kubelet) setNodeVolumesInUseStatus(node *api.Node) { +func (kl *Kubelet) setNodeVolumesInUseStatus(node *v1.Node) { // Make sure to only update node status after reconciler starts syncing up states if kl.volumeManager.ReconcilerStatesHasBeenSynced() { node.Status.VolumesInUse = kl.volumeManager.GetVolumesInUse() @@ -861,7 +861,7 @@ func (kl *Kubelet) setNodeVolumesInUseStatus(node *api.Node) { // any fields that are currently set. // TODO(madhusudancs): Simplify the logic for setting node conditions and // refactor the node status condition code out to a different file. -func (kl *Kubelet) setNodeStatus(node *api.Node) error { +func (kl *Kubelet) setNodeStatus(node *v1.Node) error { for _, f := range kl.setNodeStatusFuncs { if err := f(node); err != nil { return err @@ -872,15 +872,15 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error { // defaultNodeStatusFuncs is a factory that generates the default set of // setNodeStatus funcs -func (kl *Kubelet) defaultNodeStatusFuncs() []func(*api.Node) error { +func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error { // initial set of node status update handlers, can be modified by Option's - withoutError := func(f func(*api.Node)) func(*api.Node) error { - return func(n *api.Node) error { + withoutError := func(f func(*v1.Node)) func(*v1.Node) error { + return func(n *v1.Node) error { f(n) return nil } } - return []func(*api.Node) error{ + return []func(*v1.Node) error{ kl.setNodeAddress, withoutError(kl.setNodeStatusInfo), withoutError(kl.setNodeOODCondition), @@ -894,7 +894,7 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*api.Node) error { // SetNodeStatus returns a functional Option that adds the given node status // update handler to the Kubelet -func SetNodeStatus(f func(*api.Node) error) Option { +func SetNodeStatus(f func(*v1.Node) error) Option { return func(k *Kubelet) { k.setNodeStatusFuncs = append(k.setNodeStatusFuncs, f) } diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index 16124ce3650..74b25fc6776 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -27,11 +27,11 @@ import ( cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapiv2 "github.com/google/cadvisor/info/v2" - "k8s.io/kubernetes/pkg/api" apierrors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" "k8s.io/kubernetes/pkg/client/testing/core" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/util/sliceutils" @@ -49,7 +49,7 @@ const ( ) // generateTestingImageList generate randomly generated image list and corresponding expectedImageList. -func generateTestingImageList(count int) ([]kubecontainer.Image, []api.ContainerImage) { +func generateTestingImageList(count int) ([]kubecontainer.Image, []v1.ContainerImage) { // imageList is randomly generated image list var imageList []kubecontainer.Image for ; count > 0; count-- { @@ -64,10 +64,10 @@ func generateTestingImageList(count int) ([]kubecontainer.Image, []api.Container // expectedImageList is generated by imageList according to size and maxImagesInNodeStatus // 1. sort the imageList by size sort.Sort(sliceutils.ByImageSize(imageList)) - // 2. convert sorted imageList to api.ContainerImage list - var expectedImageList []api.ContainerImage + // 2. convert sorted imageList to v1.ContainerImage list + var expectedImageList []v1.ContainerImage for _, kubeImage := range imageList { - apiImage := api.ContainerImage{ + apiImage := v1.ContainerImage{ Names: kubeImage.RepoTags[0:maxNamesPerImageInNodeStatus], SizeBytes: kubeImage.Size, } @@ -96,8 +96,8 @@ func TestUpdateNewNodeStatus(t *testing.T) { t, inputImageList, false /* controllerAttachDetachEnabled */) kubelet := testKubelet.kubelet kubeClient := testKubelet.fakeKubeClient - kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{ - {ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}}, + kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}}, }}).ReactionChain machineInfo := &cadvisorapi.MachineInfo{ MachineID: "123", @@ -120,45 +120,45 @@ func TestUpdateNewNodeStatus(t *testing.T) { t.Fatalf("can't update disk space manager: %v", err) } - expectedNode := &api.Node{ - ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, - Spec: api.NodeSpec{}, - Status: api.NodeStatus{ - Conditions: []api.NodeCondition{ + expectedNode := &v1.Node{ + ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}, + Spec: v1.NodeSpec{}, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ { - Type: api.NodeOutOfDisk, - Status: api.ConditionFalse, + Type: v1.NodeOutOfDisk, + Status: v1.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, { - Type: api.NodeMemoryPressure, - Status: api.ConditionFalse, + Type: v1.NodeMemoryPressure, + Status: v1.ConditionFalse, Reason: "KubeletHasSufficientMemory", Message: fmt.Sprintf("kubelet has sufficient memory available"), LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, { - Type: api.NodeDiskPressure, - Status: api.ConditionFalse, + Type: v1.NodeDiskPressure, + Status: v1.ConditionFalse, Reason: "KubeletHasNoDiskPressure", Message: fmt.Sprintf("kubelet has no disk pressure"), LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, { - Type: api.NodeReady, - Status: api.ConditionTrue, + Type: v1.NodeReady, + Status: v1.ConditionTrue, Reason: "KubeletReady", Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, }, - NodeInfo: api.NodeSystemInfo{ + NodeInfo: v1.NodeSystemInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", @@ -170,22 +170,22 @@ func TestUpdateNewNodeStatus(t *testing.T) { KubeletVersion: version.Get().String(), KubeProxyVersion: version.Get().String(), }, - Capacity: api.ResourceList{ - api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), - api.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), - api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), - api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), + Capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), }, - Allocatable: api.ResourceList{ - api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), - api.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI), - api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), - api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), + Allocatable: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), }, - Addresses: []api.NodeAddress{ - {Type: api.NodeLegacyHostIP, Address: "127.0.0.1"}, - {Type: api.NodeInternalIP, Address: "127.0.0.1"}, - {Type: api.NodeHostName, Address: testKubeletHostname}, + Addresses: []v1.NodeAddress{ + {Type: v1.NodeLegacyHostIP, Address: "127.0.0.1"}, + {Type: v1.NodeInternalIP, Address: "127.0.0.1"}, + {Type: v1.NodeHostName, Address: testKubeletHostname}, }, Images: expectedImageList, }, @@ -202,7 +202,7 @@ func TestUpdateNewNodeStatus(t *testing.T) { if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" { t.Fatalf("unexpected actions: %v", actions) } - updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*api.Node) + updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*v1.Node) if !ok { t.Errorf("unexpected object type") } @@ -218,14 +218,14 @@ func TestUpdateNewNodeStatus(t *testing.T) { } // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 - if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady { + if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != v1.NodeReady { t.Errorf("unexpected node condition order. NodeReady should be last.") } if maxImagesInNodeStatus != len(updatedNode.Status.Images) { t.Errorf("unexpected image list length in node status, expected: %v, got: %v", maxImagesInNodeStatus, len(updatedNode.Status.Images)) } else { - if !api.Semantic.DeepEqual(expectedNode, updatedNode) { + if !v1.Semantic.DeepEqual(expectedNode, updatedNode) { t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode)) } } @@ -236,8 +236,8 @@ func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) kubelet := testKubelet.kubelet kubeClient := testKubelet.fakeKubeClient - kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{ - {ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}}, + kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}}, }}).ReactionChain machineInfo := &cadvisorapi.MachineInfo{ MachineID: "123", @@ -262,9 +262,9 @@ func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) { kubelet.outOfDiskTransitionFrequency = 10 * time.Second - expectedNodeOutOfDiskCondition := api.NodeCondition{ - Type: api.NodeOutOfDisk, - Status: api.ConditionFalse, + expectedNodeOutOfDiskCondition := v1.NodeCondition{ + Type: v1.NodeOutOfDisk, + Status: v1.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: unversioned.Time{}, @@ -282,12 +282,12 @@ func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) { if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" { t.Fatalf("unexpected actions: %v", actions) } - updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*api.Node) + updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*v1.Node) if !ok { t.Errorf("unexpected object type") } - var oodCondition api.NodeCondition + var oodCondition v1.NodeCondition for i, cond := range updatedNode.Status.Conditions { if cond.LastHeartbeatTime.IsZero() { t.Errorf("unexpected zero last probe timestamp for %v condition", cond.Type) @@ -297,7 +297,7 @@ func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) { } updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{} updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{} - if cond.Type == api.NodeOutOfDisk { + if cond.Type == v1.NodeOutOfDisk { oodCondition = updatedNode.Status.Conditions[i] } } @@ -311,54 +311,54 @@ func TestUpdateExistingNodeStatus(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) kubelet := testKubelet.kubelet kubeClient := testKubelet.fakeKubeClient - kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{ + kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{ { - ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, - Spec: api.NodeSpec{}, - Status: api.NodeStatus{ - Conditions: []api.NodeCondition{ + ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}, + Spec: v1.NodeSpec{}, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ { - Type: api.NodeOutOfDisk, - Status: api.ConditionTrue, + Type: v1.NodeOutOfDisk, + Status: v1.ConditionTrue, Reason: "KubeletOutOfDisk", Message: "out of disk space", LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, { - Type: api.NodeMemoryPressure, - Status: api.ConditionFalse, + Type: v1.NodeMemoryPressure, + Status: v1.ConditionFalse, Reason: "KubeletHasSufficientMemory", Message: fmt.Sprintf("kubelet has sufficient memory available"), LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, { - Type: api.NodeDiskPressure, - Status: api.ConditionFalse, + Type: v1.NodeDiskPressure, + Status: v1.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, { - Type: api.NodeReady, - Status: api.ConditionTrue, + Type: v1.NodeReady, + Status: v1.ConditionTrue, Reason: "KubeletReady", Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, }, - Capacity: api.ResourceList{ - api.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI), - api.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI), - api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + Capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), }, - Allocatable: api.ResourceList{ - api.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI), - api.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI), - api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + Allocatable: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), }, }, }, @@ -384,45 +384,45 @@ func TestUpdateExistingNodeStatus(t *testing.T) { t.Fatalf("can't update disk space manager: %v", err) } - expectedNode := &api.Node{ - ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, - Spec: api.NodeSpec{}, - Status: api.NodeStatus{ - Conditions: []api.NodeCondition{ + expectedNode := &v1.Node{ + ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}, + Spec: v1.NodeSpec{}, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ { - Type: api.NodeOutOfDisk, - Status: api.ConditionTrue, + Type: v1.NodeOutOfDisk, + Status: v1.ConditionTrue, Reason: "KubeletOutOfDisk", Message: "out of disk space", LastHeartbeatTime: unversioned.Time{}, // placeholder LastTransitionTime: unversioned.Time{}, // placeholder }, { - Type: api.NodeMemoryPressure, - Status: api.ConditionFalse, + Type: v1.NodeMemoryPressure, + Status: v1.ConditionFalse, Reason: "KubeletHasSufficientMemory", Message: fmt.Sprintf("kubelet has sufficient memory available"), LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, { - Type: api.NodeDiskPressure, - Status: api.ConditionFalse, + Type: v1.NodeDiskPressure, + Status: v1.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, { - Type: api.NodeReady, - Status: api.ConditionTrue, + Type: v1.NodeReady, + Status: v1.ConditionTrue, Reason: "KubeletReady", Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: unversioned.Time{}, // placeholder LastTransitionTime: unversioned.Time{}, // placeholder }, }, - NodeInfo: api.NodeSystemInfo{ + NodeInfo: v1.NodeSystemInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", @@ -434,25 +434,25 @@ func TestUpdateExistingNodeStatus(t *testing.T) { KubeletVersion: version.Get().String(), KubeProxyVersion: version.Get().String(), }, - Capacity: api.ResourceList{ - api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), - api.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI), - api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), - api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), + Capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), }, - Allocatable: api.ResourceList{ - api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), - api.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI), - api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), - api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), + Allocatable: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), }, - Addresses: []api.NodeAddress{ - {Type: api.NodeLegacyHostIP, Address: "127.0.0.1"}, - {Type: api.NodeInternalIP, Address: "127.0.0.1"}, - {Type: api.NodeHostName, Address: testKubeletHostname}, + Addresses: []v1.NodeAddress{ + {Type: v1.NodeLegacyHostIP, Address: "127.0.0.1"}, + {Type: v1.NodeInternalIP, Address: "127.0.0.1"}, + {Type: v1.NodeHostName, Address: testKubeletHostname}, }, // images will be sorted from max to min in node status. - Images: []api.ContainerImage{ + Images: []v1.ContainerImage{ { Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"}, SizeBytes: 456, @@ -477,7 +477,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) { if !ok { t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1]) } - updatedNode, ok := updateAction.GetObject().(*api.Node) + updatedNode, ok := updateAction.GetObject().(*v1.Node) if !ok { t.Errorf("unexpected object type") } @@ -494,11 +494,11 @@ func TestUpdateExistingNodeStatus(t *testing.T) { } // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 - if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady { + if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != v1.NodeReady { t.Errorf("unexpected node condition order. NodeReady should be last.") } - if !api.Semantic.DeepEqual(expectedNode, updatedNode) { + if !v1.Semantic.DeepEqual(expectedNode, updatedNode) { t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode)) } } @@ -508,23 +508,23 @@ func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) kubelet := testKubelet.kubelet clock := testKubelet.fakeClock kubeClient := testKubelet.fakeKubeClient - kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{ + kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{ { - ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, - Spec: api.NodeSpec{}, - Status: api.NodeStatus{ - Conditions: []api.NodeCondition{ + ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}, + Spec: v1.NodeSpec{}, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ { - Type: api.NodeReady, - Status: api.ConditionTrue, + Type: v1.NodeReady, + Status: v1.ConditionTrue, Reason: "KubeletReady", Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: unversioned.NewTime(clock.Now()), LastTransitionTime: unversioned.NewTime(clock.Now()), }, { - Type: api.NodeOutOfDisk, - Status: api.ConditionTrue, + Type: v1.NodeOutOfDisk, + Status: v1.ConditionTrue, Reason: "KubeletOutOfDisk", Message: "out of disk space", LastHeartbeatTime: unversioned.NewTime(clock.Now()), @@ -558,17 +558,17 @@ func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) kubelet.outOfDiskTransitionFrequency = 5 * time.Second - ood := api.NodeCondition{ - Type: api.NodeOutOfDisk, - Status: api.ConditionTrue, + ood := v1.NodeCondition{ + Type: v1.NodeOutOfDisk, + Status: v1.ConditionTrue, Reason: "KubeletOutOfDisk", Message: "out of disk space", LastHeartbeatTime: unversioned.NewTime(clock.Now()), // placeholder LastTransitionTime: unversioned.NewTime(clock.Now()), // placeholder } - noOod := api.NodeCondition{ - Type: api.NodeOutOfDisk, - Status: api.ConditionFalse, + noOod := v1.NodeCondition{ + Type: v1.NodeOutOfDisk, + Status: v1.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: unversioned.NewTime(clock.Now()), // placeholder @@ -578,7 +578,7 @@ func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) testCases := []struct { rootFsAvail uint64 dockerFsAvail uint64 - expected api.NodeCondition + expected v1.NodeCondition }{ { // NodeOutOfDisk==false @@ -640,15 +640,15 @@ func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) if !ok { t.Errorf("%d. unexpected action type. expected UpdateAction, got %#v", tcIdx, actions[1]) } - updatedNode, ok := updateAction.GetObject().(*api.Node) + updatedNode, ok := updateAction.GetObject().(*v1.Node) if !ok { t.Errorf("%d. unexpected object type", tcIdx) } kubeClient.ClearActions() - var oodCondition api.NodeCondition + var oodCondition v1.NodeCondition for i, cond := range updatedNode.Status.Conditions { - if cond.Type == api.NodeOutOfDisk { + if cond.Type == v1.NodeOutOfDisk { oodCondition = updatedNode.Status.Conditions[i] } } @@ -665,8 +665,8 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { kubelet := testKubelet.kubelet clock := testKubelet.fakeClock kubeClient := testKubelet.fakeKubeClient - kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{ - {ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}}, + kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}}, }}).ReactionChain mockCadvisor := testKubelet.fakeCadvisor mockCadvisor.On("Start").Return(nil) @@ -689,30 +689,30 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { t.Fatalf("can't update disk space manager: %v", err) } - expectedNode := &api.Node{ - ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, - Spec: api.NodeSpec{}, - Status: api.NodeStatus{ - Conditions: []api.NodeCondition{ + expectedNode := &v1.Node{ + ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}, + Spec: v1.NodeSpec{}, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ { - Type: api.NodeOutOfDisk, - Status: api.ConditionFalse, + Type: v1.NodeOutOfDisk, + Status: v1.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: "kubelet has sufficient disk space available", LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, { - Type: api.NodeMemoryPressure, - Status: api.ConditionFalse, + Type: v1.NodeMemoryPressure, + Status: v1.ConditionFalse, Reason: "KubeletHasSufficientMemory", Message: fmt.Sprintf("kubelet has sufficient memory available"), LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, { - Type: api.NodeDiskPressure, - Status: api.ConditionFalse, + Type: v1.NodeDiskPressure, + Status: v1.ConditionFalse, Reason: "KubeletHasNoDiskPressure", Message: fmt.Sprintf("kubelet has no disk pressure"), LastHeartbeatTime: unversioned.Time{}, @@ -720,7 +720,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { }, {}, //placeholder }, - NodeInfo: api.NodeSystemInfo{ + NodeInfo: v1.NodeSystemInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", @@ -732,24 +732,24 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { KubeletVersion: version.Get().String(), KubeProxyVersion: version.Get().String(), }, - Capacity: api.ResourceList{ - api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), - api.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), - api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), - api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), + Capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), }, - Allocatable: api.ResourceList{ - api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), - api.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI), - api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), - api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), + Allocatable: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), }, - Addresses: []api.NodeAddress{ - {Type: api.NodeLegacyHostIP, Address: "127.0.0.1"}, - {Type: api.NodeInternalIP, Address: "127.0.0.1"}, - {Type: api.NodeHostName, Address: testKubeletHostname}, + Addresses: []v1.NodeAddress{ + {Type: v1.NodeLegacyHostIP, Address: "127.0.0.1"}, + {Type: v1.NodeInternalIP, Address: "127.0.0.1"}, + {Type: v1.NodeHostName, Address: testKubeletHostname}, }, - Images: []api.ContainerImage{ + Images: []v1.ContainerImage{ { Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"}, SizeBytes: 456, @@ -762,7 +762,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { }, } - checkNodeStatus := func(status api.ConditionStatus, reason string) { + checkNodeStatus := func(status v1.ConditionStatus, reason string) { kubeClient.ClearActions() if err := kubelet.updateNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) @@ -774,7 +774,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" { t.Fatalf("unexpected actions: %v", actions) } - updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*api.Node) + updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*v1.Node) if !ok { t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1]) } @@ -792,21 +792,21 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 lastIndex := len(updatedNode.Status.Conditions) - 1 - if updatedNode.Status.Conditions[lastIndex].Type != api.NodeReady { + if updatedNode.Status.Conditions[lastIndex].Type != v1.NodeReady { t.Errorf("unexpected node condition order. NodeReady should be last.") } if updatedNode.Status.Conditions[lastIndex].Message == "" { t.Errorf("unexpected empty condition message") } updatedNode.Status.Conditions[lastIndex].Message = "" - expectedNode.Status.Conditions[lastIndex] = api.NodeCondition{ - Type: api.NodeReady, + expectedNode.Status.Conditions[lastIndex] = v1.NodeCondition{ + Type: v1.NodeReady, Status: status, Reason: reason, LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, } - if !api.Semantic.DeepEqual(expectedNode, updatedNode) { + if !v1.Semantic.DeepEqual(expectedNode, updatedNode) { t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode)) } } @@ -815,17 +815,17 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { // Should report kubelet not ready if the runtime check is out of date clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime)) kubelet.updateRuntimeUp() - checkNodeStatus(api.ConditionFalse, "KubeletNotReady") + checkNodeStatus(v1.ConditionFalse, "KubeletNotReady") // Should report kubelet ready if the runtime check is updated clock.SetTime(time.Now()) kubelet.updateRuntimeUp() - checkNodeStatus(api.ConditionTrue, "KubeletReady") + checkNodeStatus(v1.ConditionTrue, "KubeletReady") // Should report kubelet not ready if the runtime check is out of date clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime)) kubelet.updateRuntimeUp() - checkNodeStatus(api.ConditionFalse, "KubeletNotReady") + checkNodeStatus(v1.ConditionFalse, "KubeletNotReady") // Should report kubelet not ready if the runtime check failed fakeRuntime := testKubelet.fakeRuntime @@ -833,7 +833,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error") clock.SetTime(time.Now()) kubelet.updateRuntimeUp() - checkNodeStatus(api.ConditionFalse, "KubeletNotReady") + checkNodeStatus(v1.ConditionFalse, "KubeletNotReady") // Test cri integration. kubelet.kubeletConfiguration.EnableCRI = true @@ -842,12 +842,12 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { // Should report node not ready if runtime status is nil. fakeRuntime.RuntimeStatus = nil kubelet.updateRuntimeUp() - checkNodeStatus(api.ConditionFalse, "KubeletNotReady") + checkNodeStatus(v1.ConditionFalse, "KubeletNotReady") // Should report node not ready if runtime status is empty. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{} kubelet.updateRuntimeUp() - checkNodeStatus(api.ConditionFalse, "KubeletNotReady") + checkNodeStatus(v1.ConditionFalse, "KubeletNotReady") // Should report node not ready if RuntimeReady is false. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{ @@ -857,7 +857,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { }, } kubelet.updateRuntimeUp() - checkNodeStatus(api.ConditionFalse, "KubeletNotReady") + checkNodeStatus(v1.ConditionFalse, "KubeletNotReady") // Should report node ready if RuntimeReady is true. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{ @@ -867,7 +867,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { }, } kubelet.updateRuntimeUp() - checkNodeStatus(api.ConditionTrue, "KubeletReady") + checkNodeStatus(v1.ConditionTrue, "KubeletReady") // Should report node not ready if NetworkReady is false. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{ @@ -877,14 +877,14 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { }, } kubelet.updateRuntimeUp() - checkNodeStatus(api.ConditionFalse, "KubeletNotReady") + checkNodeStatus(v1.ConditionFalse, "KubeletNotReady") } func TestUpdateNodeStatusError(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) kubelet := testKubelet.kubelet // No matching node for the kubelet - testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{}}).ReactionChain + testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain if err := kubelet.updateNodeStatus(); err == nil { t.Errorf("unexpected non error: %v", err) @@ -900,15 +900,15 @@ func TestRegisterWithApiServer(t *testing.T) { kubeClient := testKubelet.fakeKubeClient kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) { // Return an error on create. - return true, &api.Node{}, &apierrors.StatusError{ + return true, &v1.Node{}, &apierrors.StatusError{ ErrStatus: unversioned.Status{Reason: unversioned.StatusReasonAlreadyExists}, } }) kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { // Return an existing (matching) node on get. - return true, &api.Node{ - ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, - Spec: api.NodeSpec{ExternalID: testKubeletHostname}, + return true, &v1.Node{ + ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}, + Spec: v1.NodeSpec{ExternalID: testKubeletHostname}, }, nil }) kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { @@ -961,10 +961,10 @@ func TestTryRegisterWithApiServer(t *testing.T) { ErrStatus: unversioned.Status{Reason: unversioned.StatusReasonConflict}, } - newNode := func(cmad bool, externalID string) *api.Node { - node := &api.Node{ - ObjectMeta: api.ObjectMeta{}, - Spec: api.NodeSpec{ + newNode := func(cmad bool, externalID string) *v1.Node { + node := &v1.Node{ + ObjectMeta: v1.ObjectMeta{}, + Spec: v1.NodeSpec{ ExternalID: externalID, }, } @@ -979,8 +979,8 @@ func TestTryRegisterWithApiServer(t *testing.T) { cases := []struct { name string - newNode *api.Node - existingNode *api.Node + newNode *v1.Node + existingNode *v1.Node createError error getError error updateError error @@ -993,7 +993,7 @@ func TestTryRegisterWithApiServer(t *testing.T) { }{ { name: "success case - new node", - newNode: &api.Node{}, + newNode: &v1.Node{}, expectedResult: true, expectedActions: 1, }, @@ -1111,23 +1111,23 @@ func TestTryRegisterWithApiServer(t *testing.T) { } if tc.testSavedNode { - var savedNode *api.Node + var savedNode *v1.Node var ok bool t.Logf("actions: %v: %+v", len(actions), actions) action := actions[tc.savedNodeIndex] if action.GetVerb() == "create" { createAction := action.(core.CreateAction) - savedNode, ok = createAction.GetObject().(*api.Node) + savedNode, ok = createAction.GetObject().(*v1.Node) if !ok { - t.Errorf("%v: unexpected type; couldn't convert to *api.Node: %+v", tc.name, createAction.GetObject()) + t.Errorf("%v: unexpected type; couldn't convert to *v1.Node: %+v", tc.name, createAction.GetObject()) continue } } else if action.GetVerb() == "update" { updateAction := action.(core.UpdateAction) - savedNode, ok = updateAction.GetObject().(*api.Node) + savedNode, ok = updateAction.GetObject().(*v1.Node) if !ok { - t.Errorf("%v: unexpected type; couldn't convert to *api.Node: %+v", tc.name, updateAction.GetObject()) + t.Errorf("%v: unexpected type; couldn't convert to *v1.Node: %+v", tc.name, updateAction.GetObject()) continue } } diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index c56f25745ef..d8b1f17012e 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -33,9 +33,10 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" - utilpod "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/api/v1" + utilpod "k8s.io/kubernetes/pkg/api/v1/pod" + "k8s.io/kubernetes/pkg/api/v1/validation" "k8s.io/kubernetes/pkg/fieldpath" "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -72,7 +73,7 @@ func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) { } // getActivePods returns non-terminal pods -func (kl *Kubelet) getActivePods() []*api.Pod { +func (kl *Kubelet) getActivePods() []*v1.Pod { allPods := kl.podManager.GetPods() activePods := kl.filterOutTerminatedPods(allPods) return activePods @@ -82,7 +83,7 @@ func (kl *Kubelet) getActivePods() []*api.Pod { // Experimental. For now, we hardcode /dev/nvidia0 no matter what the user asks for // (we only support one device per node). // TODO: add support for more than 1 GPU after #28216. -func makeDevices(container *api.Container) []kubecontainer.DeviceInfo { +func makeDevices(container *v1.Container) []kubecontainer.DeviceInfo { nvidiaGPULimit := container.Resources.Limits.NvidiaGPU() if nvidiaGPULimit.Value() != 0 { return []kubecontainer.DeviceInfo{ @@ -96,14 +97,14 @@ func makeDevices(container *api.Container) []kubecontainer.DeviceInfo { } // makeMounts determines the mount points for the given container. -func makeMounts(pod *api.Pod, podDir string, container *api.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap) ([]kubecontainer.Mount, error) { +func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap) ([]kubecontainer.Mount, error) { // Kubernetes only mounts on /etc/hosts if : // - container does not use hostNetwork and // - container is not an infrastructure(pause) container // - container is not already mounting on /etc/hosts // When the pause container is being created, its IP is still unknown. Hence, PodIP will not have been set. // OS is not Windows - mountEtcHostsFile := (pod.Spec.SecurityContext == nil || !pod.Spec.SecurityContext.HostNetwork) && len(podIP) > 0 && runtime.GOOS != "windows" + mountEtcHostsFile := (pod.Spec.SecurityContext == nil || !pod.Spec.HostNetwork) && len(podIP) > 0 && runtime.GOOS != "windows" glog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile) mounts := []kubecontainer.Mount{} for _, mount := range container.VolumeMounts { @@ -198,7 +199,7 @@ func ensureHostsFile(fileName, hostIP, hostName, hostDomainName string) error { return ioutil.WriteFile(fileName, buffer.Bytes(), 0644) } -func makePortMappings(container *api.Container) (ports []kubecontainer.PortMapping) { +func makePortMappings(container *v1.Container) (ports []kubecontainer.PortMapping) { names := make(map[string]struct{}) for _, p := range container.Ports { pm := kubecontainer.PortMapping{ @@ -248,7 +249,7 @@ func truncatePodHostnameIfNeeded(podName, hostname string) (string, error) { // GeneratePodHostNameAndDomain creates a hostname and domain name for a pod, // given that pod's spec and annotations or returns an error. -func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *api.Pod) (string, string, error) { +func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) { // TODO(vmarmol): Handle better. clusterDomain := kl.clusterDomain podAnnotations := pod.Annotations @@ -290,7 +291,7 @@ func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *api.Pod) (string, string, e // GenerateRunContainerOptions generates the RunContainerOptions, which can be used by // the container runtime to set parameters for launching a container. -func (kl *Kubelet) GenerateRunContainerOptions(pod *api.Pod, container *api.Container, podIP string) (*kubecontainer.RunContainerOptions, error) { +func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, error) { var err error pcm := kl.containerManager.NewPodContainerManager() _, podContainerName := pcm.GetPodContainerName(pod) @@ -345,7 +346,7 @@ var masterServices = sets.NewString("kubernetes") // pod in namespace ns should see. func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { var ( - serviceMap = make(map[string]*api.Service) + serviceMap = make(map[string]*v1.Service) m = make(map[string]string) ) @@ -364,7 +365,7 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { for i := range services { service := services[i] // ignore services where ClusterIP is "None" or empty - if !api.IsServiceIPSet(service) { + if !v1.IsServiceIPSet(service) { continue } serviceName := service.Name @@ -385,7 +386,7 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { } } - mappedServices := []*api.Service{} + mappedServices := []*v1.Service{} for key := range serviceMap { mappedServices = append(mappedServices, serviceMap[key]) } @@ -397,11 +398,11 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { } // Make the environment variables for a pod in the given namespace. -func (kl *Kubelet) makeEnvironmentVariables(pod *api.Pod, container *api.Container, podIP string) ([]kubecontainer.EnvVar, error) { +func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string) ([]kubecontainer.EnvVar, error) { var result []kubecontainer.EnvVar // Note: These are added to the docker Config, but are not included in the checksum computed // by dockertools.BuildDockerName(...). That way, we can still determine whether an - // api.Container is already running by its hash. (We don't want to restart a container just + // v1.Container is already running by its hash. (We don't want to restart a container just // because some service changed.) // // Note that there is a race between Kubelet seeing the pod and kubelet seeing the service. @@ -424,8 +425,8 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *api.Pod, container *api.Contain // 3. Add remaining service environment vars var ( tmpEnv = make(map[string]string) - configMaps = make(map[string]*api.ConfigMap) - secrets = make(map[string]*api.Secret) + configMaps = make(map[string]*v1.ConfigMap) + secrets = make(map[string]*v1.Secret) mappingFunc = expansion.MappingFuncFor(tmpEnv, serviceEnv) ) for _, envVar := range container.Env { @@ -510,7 +511,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *api.Pod, container *api.Contain // podFieldSelectorRuntimeValue returns the runtime value of the given // selector for a pod. -func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *api.ObjectFieldSelector, pod *api.Pod, podIP string) (string, error) { +func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP string) (string, error) { internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "") if err != nil { return "", err @@ -527,7 +528,7 @@ func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *api.ObjectFieldSelector, pod } // containerResourceRuntimeValue returns the value of the provided container resource -func containerResourceRuntimeValue(fs *api.ResourceFieldSelector, pod *api.Pod, container *api.Container) (string, error) { +func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, container *v1.Container) (string, error) { containerName := fs.ContainerName if len(containerName) == 0 { return fieldpath.ExtractContainerResourceValue(fs, container) @@ -538,7 +539,7 @@ func containerResourceRuntimeValue(fs *api.ResourceFieldSelector, pod *api.Pod, // One of the following arguments must be non-nil: runningPod, status. // TODO: Modify containerRuntime.KillPod() to accept the right arguments. -func (kl *Kubelet) killPod(pod *api.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error { +func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error { var p kubecontainer.Pod if runningPod != nil { p = *runningPod @@ -577,7 +578,7 @@ func (kl *Kubelet) killPod(pod *api.Pod, runningPod *kubecontainer.Pod, status * } // makePodDataDirs creates the dirs for the pod datas. -func (kl *Kubelet) makePodDataDirs(pod *api.Pod) error { +func (kl *Kubelet) makePodDataDirs(pod *v1.Pod) error { uid := pod.UID if err := os.MkdirAll(kl.getPodDir(uid), 0750); err != nil && !os.IsExist(err) { return err @@ -592,8 +593,8 @@ func (kl *Kubelet) makePodDataDirs(pod *api.Pod) error { } // returns whether the pod uses the host network namespace. -func podUsesHostNetwork(pod *api.Pod) bool { - return pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostNetwork +func podUsesHostNetwork(pod *v1.Pod) bool { + return pod.Spec.HostNetwork } // getPullSecretsForPod inspects the Pod and retrieves the referenced pull @@ -601,8 +602,8 @@ func podUsesHostNetwork(pod *api.Pod) bool { // TODO: duplicate secrets are being retrieved multiple times and there // is no cache. Creating and using a secret manager interface will make this // easier to address. -func (kl *Kubelet) getPullSecretsForPod(pod *api.Pod) ([]api.Secret, error) { - pullSecrets := []api.Secret{} +func (kl *Kubelet) getPullSecretsForPod(pod *v1.Pod) ([]v1.Secret, error) { + pullSecrets := []v1.Secret{} for _, secretRef := range pod.Spec.ImagePullSecrets { secret, err := kl.kubeClient.Core().Secrets(pod.Namespace).Get(secretRef.Name) @@ -618,8 +619,8 @@ func (kl *Kubelet) getPullSecretsForPod(pod *api.Pod) ([]api.Secret, error) { } // Returns true if pod is in the terminated state ("Failed" or "Succeeded"). -func (kl *Kubelet) podIsTerminated(pod *api.Pod) bool { - var status api.PodStatus +func (kl *Kubelet) podIsTerminated(pod *v1.Pod) bool { + var status v1.PodStatus // Check the cached pod status which was set after the last sync. status, ok := kl.statusManager.GetPodStatus(pod.UID) if !ok { @@ -628,7 +629,7 @@ func (kl *Kubelet) podIsTerminated(pod *api.Pod) bool { // restarted. status = pod.Status } - if status.Phase == api.PodFailed || status.Phase == api.PodSucceeded { + if status.Phase == v1.PodFailed || status.Phase == v1.PodSucceeded { return true } @@ -637,8 +638,8 @@ func (kl *Kubelet) podIsTerminated(pod *api.Pod) bool { // filterOutTerminatedPods returns the given pods which the status manager // does not consider failed or succeeded. -func (kl *Kubelet) filterOutTerminatedPods(pods []*api.Pod) []*api.Pod { - var filteredPods []*api.Pod +func (kl *Kubelet) filterOutTerminatedPods(pods []*v1.Pod) []*v1.Pod { + var filteredPods []*v1.Pod for _, p := range pods { if kl.podIsTerminated(p) { continue @@ -650,7 +651,7 @@ func (kl *Kubelet) filterOutTerminatedPods(pods []*api.Pod) []*api.Pod { // removeOrphanedPodStatuses removes obsolete entries in podStatus where // the pod is no longer considered bound to this node. -func (kl *Kubelet) removeOrphanedPodStatuses(pods []*api.Pod, mirrorPods []*api.Pod) { +func (kl *Kubelet) removeOrphanedPodStatuses(pods []*v1.Pod, mirrorPods []*v1.Pod) { podUIDs := make(map[types.UID]bool) for _, pod := range pods { podUIDs[pod.UID] = true @@ -778,7 +779,7 @@ func (kl *Kubelet) podKiller() { break } killing.Insert(string(runningPod.ID)) - go func(apiPod *api.Pod, runningPod *kubecontainer.Pod, ch chan types.UID) { + go func(apiPod *v1.Pod, runningPod *kubecontainer.Pod, ch chan types.UID) { defer func() { ch <- runningPod.ID }() @@ -796,7 +797,7 @@ func (kl *Kubelet) podKiller() { } // checkHostPortConflicts detects pods with conflicted host ports. -func hasHostPortConflicts(pods []*api.Pod) bool { +func hasHostPortConflicts(pods []*v1.Pod) bool { ports := sets.String{} for _, pod := range pods { if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 { @@ -815,13 +816,13 @@ func hasHostPortConflicts(pods []*api.Pod) bool { // of the container. The previous flag will only return the logs for the last terminated container, otherwise, the current // running container is preferred over a previous termination. If info about the container is not available then a specific // error is returned to the end user. -func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *api.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) { +func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *v1.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) { var cID string - cStatus, found := api.GetContainerStatus(podStatus.ContainerStatuses, containerName) + cStatus, found := v1.GetContainerStatus(podStatus.ContainerStatuses, containerName) // if not found, check the init containers if !found { - cStatus, found = api.GetContainerStatus(podStatus.InitContainerStatuses, containerName) + cStatus, found = v1.GetContainerStatus(podStatus.InitContainerStatuses, containerName) } if !found { return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is not available", containerName, podName) @@ -866,7 +867,7 @@ func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *api.Pod // GetKubeletContainerLogs returns logs from the container // TODO: this method is returning logs of random container attempts, when it should be returning the most recent attempt // or all of them. -func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error { +func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error { // Pod workers periodically write status to statusManager. If status is not // cached there, something is wrong (or kubelet just restarted and hasn't // caught up yet). Just assume the pod is not ready yet. @@ -914,12 +915,12 @@ func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, lo // GetPhase returns the phase of a pod given its container info. // This func is exported to simplify integration with 3rd party kubelet // integrations like kubernetes-mesos. -func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase { +func GetPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase { initialized := 0 pendingInitialization := 0 failedInitialization := 0 for _, container := range spec.InitContainers { - containerStatus, ok := api.GetContainerStatus(info, container.Name) + containerStatus, ok := v1.GetContainerStatus(info, container.Name) if !ok { pendingInitialization++ continue @@ -956,7 +957,7 @@ func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase { failed := 0 succeeded := 0 for _, container := range spec.Containers { - containerStatus, ok := api.GetContainerStatus(info, container.Name) + containerStatus, ok := v1.GetContainerStatus(info, container.Name) if !ok { unknown++ continue @@ -983,8 +984,8 @@ func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase { } } - if failedInitialization > 0 && spec.RestartPolicy == api.RestartPolicyNever { - return api.PodFailed + if failedInitialization > 0 && spec.RestartPolicy == v1.RestartPolicyNever { + return v1.PodFailed } switch { @@ -993,46 +994,46 @@ func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase { case waiting > 0: glog.V(5).Infof("pod waiting > 0, pending") // One or more containers has not been started - return api.PodPending + return v1.PodPending case running > 0 && unknown == 0: // All containers have been started, and at least // one container is running - return api.PodRunning + return v1.PodRunning case running == 0 && stopped > 0 && unknown == 0: // All containers are terminated - if spec.RestartPolicy == api.RestartPolicyAlways { + if spec.RestartPolicy == v1.RestartPolicyAlways { // All containers are in the process of restarting - return api.PodRunning + return v1.PodRunning } if stopped == succeeded { // RestartPolicy is not Always, and all // containers are terminated in success - return api.PodSucceeded + return v1.PodSucceeded } - if spec.RestartPolicy == api.RestartPolicyNever { + if spec.RestartPolicy == v1.RestartPolicyNever { // RestartPolicy is Never, and all containers are // terminated with at least one in failure - return api.PodFailed + return v1.PodFailed } // RestartPolicy is OnFailure, and at least one in failure // and in the process of restarting - return api.PodRunning + return v1.PodRunning default: glog.V(5).Infof("pod default case, pending") - return api.PodPending + return v1.PodPending } } // generateAPIPodStatus creates the final API pod status for a pod, given the // internal pod status. -func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) api.PodStatus { +func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus { glog.V(3).Infof("Generating status for %q", format.Pod(pod)) // check if an internal module has requested the pod is evicted. for _, podSyncHandler := range kl.PodSyncHandlers { if result := podSyncHandler.ShouldEvict(pod); result.Evict { - return api.PodStatus{ - Phase: api.PodFailed, + return v1.PodStatus{ + Phase: v1.PodFailed, Reason: result.Reason, Message: result.Message, } @@ -1043,7 +1044,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.P // Assume info is ready to process spec := &pod.Spec - allStatus := append(append([]api.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...) + allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...) s.Phase = GetPhase(spec, allStatus) kl.probeManager.UpdatePodStatus(pod.UID, s) s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase)) @@ -1051,12 +1052,12 @@ func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.P // s (the PodStatus we are creating) will not have a PodScheduled condition yet, because converStatusToAPIStatus() // does not create one. If the existing PodStatus has a PodScheduled condition, then copy it into s and make sure // it is set to true. If the existing PodStatus does not have a PodScheduled condition, then create one that is set to true. - if _, oldPodScheduled := api.GetPodCondition(&pod.Status, api.PodScheduled); oldPodScheduled != nil { + if _, oldPodScheduled := v1.GetPodCondition(&pod.Status, v1.PodScheduled); oldPodScheduled != nil { s.Conditions = append(s.Conditions, *oldPodScheduled) } - api.UpdatePodCondition(&pod.Status, &api.PodCondition{ - Type: api.PodScheduled, - Status: api.ConditionTrue, + v1.UpdatePodCondition(&pod.Status, &v1.PodCondition{ + Type: v1.PodScheduled, + Status: v1.ConditionTrue, }) if !kl.standaloneMode { @@ -1077,8 +1078,8 @@ func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.P // convertStatusToAPIStatus creates an api PodStatus for the given pod from // the given internal pod status. It is purely transformative and does not // alter the kubelet state at all. -func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) *api.PodStatus { - var apiPodStatus api.PodStatus +func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *v1.PodStatus { + var apiPodStatus v1.PodStatus apiPodStatus.PodIP = podStatus.IP apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses( @@ -1101,10 +1102,10 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain // convertToAPIContainerStatuses converts the given internal container // statuses into API container statuses. -func (kl *Kubelet) convertToAPIContainerStatuses(pod *api.Pod, podStatus *kubecontainer.PodStatus, previousStatus []api.ContainerStatus, containers []api.Container, hasInitContainers, isInitContainer bool) []api.ContainerStatus { - convertContainerStatus := func(cs *kubecontainer.ContainerStatus) *api.ContainerStatus { +func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecontainer.PodStatus, previousStatus []v1.ContainerStatus, containers []v1.Container, hasInitContainers, isInitContainer bool) []v1.ContainerStatus { + convertContainerStatus := func(cs *kubecontainer.ContainerStatus) *v1.ContainerStatus { cid := cs.ID.String() - status := &api.ContainerStatus{ + status := &v1.ContainerStatus{ Name: cs.Name, RestartCount: int32(cs.RestartCount), Image: cs.Image, @@ -1113,9 +1114,9 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *api.Pod, podStatus *kubeco } switch cs.State { case kubecontainer.ContainerStateRunning: - status.State.Running = &api.ContainerStateRunning{StartedAt: unversioned.NewTime(cs.StartedAt)} + status.State.Running = &v1.ContainerStateRunning{StartedAt: unversioned.NewTime(cs.StartedAt)} case kubecontainer.ContainerStateExited: - status.State.Terminated = &api.ContainerStateTerminated{ + status.State.Terminated = &v1.ContainerStateTerminated{ ExitCode: int32(cs.ExitCode), Reason: cs.Reason, Message: cs.Message, @@ -1124,26 +1125,26 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *api.Pod, podStatus *kubeco ContainerID: cid, } default: - status.State.Waiting = &api.ContainerStateWaiting{} + status.State.Waiting = &v1.ContainerStateWaiting{} } return status } // Fetch old containers statuses from old pod status. - oldStatuses := make(map[string]api.ContainerStatus, len(containers)) + oldStatuses := make(map[string]v1.ContainerStatus, len(containers)) for _, status := range previousStatus { oldStatuses[status.Name] = status } // Set all container statuses to default waiting state - statuses := make(map[string]*api.ContainerStatus, len(containers)) - defaultWaitingState := api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerCreating"}} + statuses := make(map[string]*v1.ContainerStatus, len(containers)) + defaultWaitingState := v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "ContainerCreating"}} if hasInitContainers { - defaultWaitingState = api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "PodInitializing"}} + defaultWaitingState = v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "PodInitializing"}} } for _, container := range containers { - status := &api.ContainerStatus{ + status := &v1.ContainerStatus{ Name: container.Name, Image: container.Image, State: defaultWaitingState, @@ -1206,8 +1207,8 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *api.Pod, podStatus *kubeco if status.State.Terminated != nil { status.LastTerminationState = status.State } - status.State = api.ContainerState{ - Waiting: &api.ContainerStateWaiting{ + status.State = v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ Reason: reason.Error(), Message: message, }, @@ -1215,7 +1216,7 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *api.Pod, podStatus *kubeco statuses[container.Name] = status } - var containerStatuses []api.ContainerStatus + var containerStatuses []v1.ContainerStatus for _, status := range statuses { containerStatuses = append(containerStatuses, *status) } @@ -1386,7 +1387,7 @@ func (kl *Kubelet) GetPortForward(podName, podNamespace string, podUID types.UID // running and whose volumes have been cleaned up. func (kl *Kubelet) cleanupOrphanedPodCgroups( cgroupPods map[types.UID]cm.CgroupName, - pods []*api.Pod, runningPods []*kubecontainer.Pod) error { + pods []*v1.Pod, runningPods []*kubecontainer.Pod) error { // Add all running and existing terminated pods to a set allPods allPods := sets.NewString() for _, pod := range pods { @@ -1426,7 +1427,7 @@ func (kl *Kubelet) cleanupOrphanedPodCgroups( // NOTE: when if a container shares any namespace with another container it must also share the user namespace // or it will not have the correct capabilities in the namespace. This means that host user namespace // is enabled per pod, not per container. -func (kl *Kubelet) enableHostUserNamespace(pod *api.Pod) bool { +func (kl *Kubelet) enableHostUserNamespace(pod *v1.Pod) bool { if hasPrivilegedContainer(pod) || hasHostNamespace(pod) || hasHostVolume(pod) || hasNonNamespacedCapability(pod) || kl.hasHostMountPVC(pod) { return true @@ -1435,7 +1436,7 @@ func (kl *Kubelet) enableHostUserNamespace(pod *api.Pod) bool { } // hasPrivilegedContainer returns true if any of the containers in the pod are privileged. -func hasPrivilegedContainer(pod *api.Pod) bool { +func hasPrivilegedContainer(pod *v1.Pod) bool { for _, c := range pod.Spec.Containers { if c.SecurityContext != nil && c.SecurityContext.Privileged != nil && @@ -1447,7 +1448,7 @@ func hasPrivilegedContainer(pod *api.Pod) bool { } // hasNonNamespacedCapability returns true if MKNOD, SYS_TIME, or SYS_MODULE is requested for any container. -func hasNonNamespacedCapability(pod *api.Pod) bool { +func hasNonNamespacedCapability(pod *v1.Pod) bool { for _, c := range pod.Spec.Containers { if c.SecurityContext != nil && c.SecurityContext.Capabilities != nil { for _, cap := range c.SecurityContext.Capabilities.Add { @@ -1462,7 +1463,7 @@ func hasNonNamespacedCapability(pod *api.Pod) bool { } // hasHostVolume returns true if the pod spec has a HostPath volume. -func hasHostVolume(pod *api.Pod) bool { +func hasHostVolume(pod *v1.Pod) bool { for _, v := range pod.Spec.Volumes { if v.HostPath != nil { return true @@ -1472,15 +1473,15 @@ func hasHostVolume(pod *api.Pod) bool { } // hasHostNamespace returns true if hostIPC, hostNetwork, or hostPID are set to true. -func hasHostNamespace(pod *api.Pod) bool { +func hasHostNamespace(pod *v1.Pod) bool { if pod.Spec.SecurityContext == nil { return false } - return pod.Spec.SecurityContext.HostIPC || pod.Spec.SecurityContext.HostNetwork || pod.Spec.SecurityContext.HostPID + return pod.Spec.HostIPC || pod.Spec.HostNetwork || pod.Spec.HostPID } // hasHostMountPVC returns true if a PVC is referencing a HostPath volume. -func (kl *Kubelet) hasHostMountPVC(pod *api.Pod) bool { +func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool { for _, volume := range pod.Spec.Volumes { if volume.PersistentVolumeClaim != nil { pvc, err := kl.kubeClient.Core().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName) diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index b38372e4c35..7cac9533835 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -26,8 +26,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/client/testing/core" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -39,8 +39,8 @@ import ( ) func TestMakeMounts(t *testing.T) { - container := api.Container{ - VolumeMounts: []api.VolumeMount{ + container := v1.Container{ + VolumeMounts: []v1.VolumeMount{ { MountPath: "/etc/hosts", Name: "disk", @@ -70,11 +70,9 @@ func TestMakeMounts(t *testing.T) { "disk5": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/var/lib/kubelet/podID/volumes/empty/disk5"}}, } - pod := api.Pod{ - Spec: api.PodSpec{ - SecurityContext: &api.PodSecurityContext{ - HostNetwork: true, - }, + pod := v1.Pod{ + Spec: v1.PodSpec{ + HostNetwork: true, }, } @@ -123,7 +121,7 @@ func TestRunInContainerNoSuchPod(t *testing.T) { podNamespace := "nsFoo" containerName := "containerFoo" output, err := kubelet.RunInContainer( - kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}), + kubecontainer.GetPodFullName(&v1.Pod{ObjectMeta: v1.ObjectMeta{Name: podName, Namespace: podNamespace}}), "", containerName, []string{"ls"}) @@ -174,13 +172,13 @@ func TestGenerateRunContainerOptions_DNSConfigurationParams(t *testing.T) { kubelet.clusterDNS = net.ParseIP(clusterNS) pods := newTestPods(2) - pods[0].Spec.DNSPolicy = api.DNSClusterFirst - pods[1].Spec.DNSPolicy = api.DNSDefault + pods[0].Spec.DNSPolicy = v1.DNSClusterFirst + pods[1].Spec.DNSPolicy = v1.DNSDefault options := make([]*kubecontainer.RunContainerOptions, 2) for i, pod := range pods { var err error - options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{}, "") + options[i], err = kubelet.GenerateRunContainerOptions(pod, &v1.Container{}, "") if err != nil { t.Fatalf("failed to generate container options: %v", err) } @@ -201,7 +199,7 @@ func TestGenerateRunContainerOptions_DNSConfigurationParams(t *testing.T) { kubelet.resolverConfig = "/etc/resolv.conf" for i, pod := range pods { var err error - options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{}, "") + options[i], err = kubelet.GenerateRunContainerOptions(pod, &v1.Container{}, "") if err != nil { t.Fatalf("failed to generate container options: %v", err) } @@ -220,10 +218,10 @@ func TestGenerateRunContainerOptions_DNSConfigurationParams(t *testing.T) { } type testServiceLister struct { - services []*api.Service + services []*v1.Service } -func (ls testServiceLister) List(labels.Selector) ([]*api.Service, error) { +func (ls testServiceLister) List(labels.Selector) ([]*v1.Service, error) { return ls.services, nil } @@ -237,12 +235,12 @@ func (e envs) Swap(i, j int) { e[i], e[j] = e[j], e[i] } func (e envs) Less(i, j int) bool { return e[i].Name < e[j].Name } -func buildService(name, namespace, clusterIP, protocol string, port int) *api.Service { - return &api.Service{ - ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace}, - Spec: api.ServiceSpec{ - Ports: []api.ServicePort{{ - Protocol: api.Protocol(protocol), +func buildService(name, namespace, clusterIP, protocol string, port int) *v1.Service { + return &v1.Service{ + ObjectMeta: v1.ObjectMeta{Name: name, Namespace: namespace}, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Protocol: v1.Protocol(protocol), Port: int32(port), }}, ClusterIP: clusterIP, @@ -251,8 +249,8 @@ func buildService(name, namespace, clusterIP, protocol string, port int) *api.Se } func TestMakeEnvironmentVariables(t *testing.T) { - services := []*api.Service{ - buildService("kubernetes", api.NamespaceDefault, "1.2.3.1", "TCP", 8081), + services := []*v1.Service{ + buildService("kubernetes", v1.NamespaceDefault, "1.2.3.1", "TCP", 8081), buildService("test", "test1", "1.2.3.3", "TCP", 8083), buildService("kubernetes", "test2", "1.2.3.4", "TCP", 8084), buildService("test", "test2", "1.2.3.5", "TCP", 8085), @@ -267,7 +265,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { testCases := []struct { name string // the name of the test case ns string // the namespace to generate environment for - container *api.Container // the container to use + container *v1.Container // the container to use masterServiceNs string // the namespace to read master service info from nilLister bool // whether the lister should be nil expectedEnvs []kubecontainer.EnvVar // a set of expected environment vars @@ -275,8 +273,8 @@ func TestMakeEnvironmentVariables(t *testing.T) { { name: "api server = Y, kubelet = Y", ns: "test1", - container: &api.Container{ - Env: []api.EnvVar{ + container: &v1.Container{ + Env: []v1.EnvVar{ {Name: "FOO", Value: "BAR"}, {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"}, {Name: "TEST_SERVICE_PORT", Value: "8083"}, @@ -287,7 +285,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, }, }, - masterServiceNs: api.NamespaceDefault, + masterServiceNs: v1.NamespaceDefault, nilLister: false, expectedEnvs: []kubecontainer.EnvVar{ {Name: "FOO", Value: "BAR"}, @@ -310,8 +308,8 @@ func TestMakeEnvironmentVariables(t *testing.T) { { name: "api server = Y, kubelet = N", ns: "test1", - container: &api.Container{ - Env: []api.EnvVar{ + container: &v1.Container{ + Env: []v1.EnvVar{ {Name: "FOO", Value: "BAR"}, {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"}, {Name: "TEST_SERVICE_PORT", Value: "8083"}, @@ -322,7 +320,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, }, }, - masterServiceNs: api.NamespaceDefault, + masterServiceNs: v1.NamespaceDefault, nilLister: true, expectedEnvs: []kubecontainer.EnvVar{ {Name: "FOO", Value: "BAR"}, @@ -338,12 +336,12 @@ func TestMakeEnvironmentVariables(t *testing.T) { { name: "api server = N; kubelet = Y", ns: "test1", - container: &api.Container{ - Env: []api.EnvVar{ + container: &v1.Container{ + Env: []v1.EnvVar{ {Name: "FOO", Value: "BAZ"}, }, }, - masterServiceNs: api.NamespaceDefault, + masterServiceNs: v1.NamespaceDefault, nilLister: false, expectedEnvs: []kubecontainer.EnvVar{ {Name: "FOO", Value: "BAZ"}, @@ -366,8 +364,8 @@ func TestMakeEnvironmentVariables(t *testing.T) { { name: "master service in pod ns", ns: "test2", - container: &api.Container{ - Env: []api.EnvVar{ + container: &v1.Container{ + Env: []v1.EnvVar{ {Name: "FOO", Value: "ZAP"}, }, }, @@ -394,7 +392,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { { name: "pod in master service ns", ns: "kubernetes", - container: &api.Container{}, + container: &v1.Container{}, masterServiceNs: "kubernetes", nilLister: false, expectedEnvs: []kubecontainer.EnvVar{ @@ -417,49 +415,49 @@ func TestMakeEnvironmentVariables(t *testing.T) { { name: "downward api pod", ns: "downward-api", - container: &api.Container{ - Env: []api.EnvVar{ + container: &v1.Container{ + Env: []v1.EnvVar{ { Name: "POD_NAME", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(), FieldPath: "metadata.name", }, }, }, { Name: "POD_NAMESPACE", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(), FieldPath: "metadata.namespace", }, }, }, { Name: "POD_NODE_NAME", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(), FieldPath: "spec.nodeName", }, }, }, { Name: "POD_SERVICE_ACCOUNT_NAME", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(), FieldPath: "spec.serviceAccountName", }, }, }, { Name: "POD_IP", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(), FieldPath: "status.podIP", }, }, @@ -479,17 +477,17 @@ func TestMakeEnvironmentVariables(t *testing.T) { { name: "env expansion", ns: "test1", - container: &api.Container{ - Env: []api.EnvVar{ + container: &v1.Container{ + Env: []v1.EnvVar{ { Name: "TEST_LITERAL", Value: "test-test-test", }, { Name: "POD_NAME", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(), FieldPath: "metadata.name", }, }, @@ -619,12 +617,12 @@ func TestMakeEnvironmentVariables(t *testing.T) { kl.serviceLister = testServiceLister{services} } - testPod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + testPod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Namespace: tc.ns, Name: "dapi-test-pod-name", }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ ServiceAccountName: "special", NodeName: "node-name", }, @@ -640,58 +638,58 @@ func TestMakeEnvironmentVariables(t *testing.T) { } } -func waitingState(cName string) api.ContainerStatus { - return api.ContainerStatus{ +func waitingState(cName string) v1.ContainerStatus { + return v1.ContainerStatus{ Name: cName, - State: api.ContainerState{ - Waiting: &api.ContainerStateWaiting{}, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{}, }, } } -func waitingStateWithLastTermination(cName string) api.ContainerStatus { - return api.ContainerStatus{ +func waitingStateWithLastTermination(cName string) v1.ContainerStatus { + return v1.ContainerStatus{ Name: cName, - State: api.ContainerState{ - Waiting: &api.ContainerStateWaiting{}, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{}, }, - LastTerminationState: api.ContainerState{ - Terminated: &api.ContainerStateTerminated{ + LastTerminationState: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ ExitCode: 0, }, }, } } -func runningState(cName string) api.ContainerStatus { - return api.ContainerStatus{ +func runningState(cName string) v1.ContainerStatus { + return v1.ContainerStatus{ Name: cName, - State: api.ContainerState{ - Running: &api.ContainerStateRunning{}, + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, }, } } -func stoppedState(cName string) api.ContainerStatus { - return api.ContainerStatus{ +func stoppedState(cName string) v1.ContainerStatus { + return v1.ContainerStatus{ Name: cName, - State: api.ContainerState{ - Terminated: &api.ContainerStateTerminated{}, + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{}, }, } } -func succeededState(cName string) api.ContainerStatus { - return api.ContainerStatus{ +func succeededState(cName string) v1.ContainerStatus { + return v1.ContainerStatus{ Name: cName, - State: api.ContainerState{ - Terminated: &api.ContainerStateTerminated{ + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ ExitCode: 0, }, }, } } -func failedState(cName string) api.ContainerStatus { - return api.ContainerStatus{ +func failedState(cName string) v1.ContainerStatus { + return v1.ContainerStatus{ Name: cName, - State: api.ContainerState{ - Terminated: &api.ContainerStateTerminated{ + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ ExitCode: -1, }, }, @@ -699,96 +697,96 @@ func failedState(cName string) api.ContainerStatus { } func TestPodPhaseWithRestartAlways(t *testing.T) { - desiredState := api.PodSpec{ + desiredState := v1.PodSpec{ NodeName: "machine", - Containers: []api.Container{ + Containers: []v1.Container{ {Name: "containerA"}, {Name: "containerB"}, }, - RestartPolicy: api.RestartPolicyAlways, + RestartPolicy: v1.RestartPolicyAlways, } tests := []struct { - pod *api.Pod - status api.PodPhase + pod *v1.Pod + status v1.PodPhase test string }{ - {&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"}, + {&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "waiting"}, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ runningState("containerA"), runningState("containerB"), }, }, }, - api.PodRunning, + v1.PodRunning, "all running", }, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ stoppedState("containerA"), stoppedState("containerB"), }, }, }, - api.PodRunning, + v1.PodRunning, "all stopped with restart always", }, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ runningState("containerA"), stoppedState("containerB"), }, }, }, - api.PodRunning, + v1.PodRunning, "mixed state #1 with restart always", }, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ runningState("containerA"), }, }, }, - api.PodPending, + v1.PodPending, "mixed state #2 with restart always", }, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ runningState("containerA"), waitingState("containerB"), }, }, }, - api.PodPending, + v1.PodPending, "mixed state #3 with restart always", }, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ runningState("containerA"), waitingStateWithLastTermination("containerB"), }, }, }, - api.PodRunning, + v1.PodRunning, "backoff crashloop container with restart always", }, } @@ -799,96 +797,96 @@ func TestPodPhaseWithRestartAlways(t *testing.T) { } func TestPodPhaseWithRestartNever(t *testing.T) { - desiredState := api.PodSpec{ + desiredState := v1.PodSpec{ NodeName: "machine", - Containers: []api.Container{ + Containers: []v1.Container{ {Name: "containerA"}, {Name: "containerB"}, }, - RestartPolicy: api.RestartPolicyNever, + RestartPolicy: v1.RestartPolicyNever, } tests := []struct { - pod *api.Pod - status api.PodPhase + pod *v1.Pod + status v1.PodPhase test string }{ - {&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"}, + {&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "waiting"}, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ runningState("containerA"), runningState("containerB"), }, }, }, - api.PodRunning, + v1.PodRunning, "all running with restart never", }, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ succeededState("containerA"), succeededState("containerB"), }, }, }, - api.PodSucceeded, + v1.PodSucceeded, "all succeeded with restart never", }, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ failedState("containerA"), failedState("containerB"), }, }, }, - api.PodFailed, + v1.PodFailed, "all failed with restart never", }, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ runningState("containerA"), succeededState("containerB"), }, }, }, - api.PodRunning, + v1.PodRunning, "mixed state #1 with restart never", }, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ runningState("containerA"), }, }, }, - api.PodPending, + v1.PodPending, "mixed state #2 with restart never", }, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ runningState("containerA"), waitingState("containerB"), }, }, }, - api.PodPending, + v1.PodPending, "mixed state #3 with restart never", }, } @@ -899,109 +897,109 @@ func TestPodPhaseWithRestartNever(t *testing.T) { } func TestPodPhaseWithRestartOnFailure(t *testing.T) { - desiredState := api.PodSpec{ + desiredState := v1.PodSpec{ NodeName: "machine", - Containers: []api.Container{ + Containers: []v1.Container{ {Name: "containerA"}, {Name: "containerB"}, }, - RestartPolicy: api.RestartPolicyOnFailure, + RestartPolicy: v1.RestartPolicyOnFailure, } tests := []struct { - pod *api.Pod - status api.PodPhase + pod *v1.Pod + status v1.PodPhase test string }{ - {&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"}, + {&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "waiting"}, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ runningState("containerA"), runningState("containerB"), }, }, }, - api.PodRunning, + v1.PodRunning, "all running with restart onfailure", }, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ succeededState("containerA"), succeededState("containerB"), }, }, }, - api.PodSucceeded, + v1.PodSucceeded, "all succeeded with restart onfailure", }, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ failedState("containerA"), failedState("containerB"), }, }, }, - api.PodRunning, + v1.PodRunning, "all failed with restart never", }, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ runningState("containerA"), succeededState("containerB"), }, }, }, - api.PodRunning, + v1.PodRunning, "mixed state #1 with restart onfailure", }, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ runningState("containerA"), }, }, }, - api.PodPending, + v1.PodPending, "mixed state #2 with restart onfailure", }, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ runningState("containerA"), waitingState("containerB"), }, }, }, - api.PodPending, + v1.PodPending, "mixed state #3 with restart onfailure", }, { - &api.Pod{ + &v1.Pod{ Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ runningState("containerA"), waitingStateWithLastTermination("containerB"), }, }, }, - api.PodRunning, + v1.PodRunning, "backoff crashloop container with restart onfailure", }, } @@ -1218,17 +1216,17 @@ func TestPortForward(t *testing.T) { // Tests that identify the host port conflicts are detected correctly. func TestGetHostPortConflicts(t *testing.T) { - pods := []*api.Pod{ - {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}}, - {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}}, - {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 82}}}}}}, - {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 83}}}}}}, + pods := []*v1.Pod{ + {Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}}, + {Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 81}}}}}}, + {Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 82}}}}}}, + {Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 83}}}}}}, } // Pods should not cause any conflict. assert.False(t, hasHostPortConflicts(pods), "Should not have port conflicts") - expected := &api.Pod{ - Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}, + expected := &v1.Pod{ + Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 81}}}}}, } // The new pod should cause conflict and be reported. pods = append(pods, expected) @@ -1237,21 +1235,21 @@ func TestGetHostPortConflicts(t *testing.T) { func TestMakeDevices(t *testing.T) { testCases := []struct { - container *api.Container + container *v1.Container devices []kubecontainer.DeviceInfo test string }{ { test: "no device", - container: &api.Container{}, + container: &v1.Container{}, devices: nil, }, { test: "gpu", - container: &api.Container{ - Resources: api.ResourceRequirements{ - Limits: map[api.ResourceName]resource.Quantity{ - api.ResourceNvidiaGPU: resource.MustParse("1000"), + container: &v1.Container{ + Resources: v1.ResourceRequirements{ + Limits: map[v1.ResourceName]resource.Quantity{ + v1.ResourceNvidiaGPU: resource.MustParse("1000"), }, }, }, @@ -1273,7 +1271,7 @@ func TestHasPrivilegedContainer(t *testing.T) { return &b } tests := map[string]struct { - securityContext *api.SecurityContext + securityContext *v1.SecurityContext expected bool }{ "nil sc": { @@ -1281,23 +1279,23 @@ func TestHasPrivilegedContainer(t *testing.T) { expected: false, }, "nil privleged": { - securityContext: &api.SecurityContext{}, + securityContext: &v1.SecurityContext{}, expected: false, }, "false privleged": { - securityContext: &api.SecurityContext{Privileged: newBoolPtr(false)}, + securityContext: &v1.SecurityContext{Privileged: newBoolPtr(false)}, expected: false, }, "true privleged": { - securityContext: &api.SecurityContext{Privileged: newBoolPtr(true)}, + securityContext: &v1.SecurityContext{Privileged: newBoolPtr(true)}, expected: true, }, } for k, v := range tests { - pod := &api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + pod := &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ {SecurityContext: v.securityContext}, }, }, @@ -1342,34 +1340,34 @@ func TestHasHostMountPVC(t *testing.T) { for k, v := range tests { testKubelet := newTestKubelet(t, false) - pod := &api.Pod{ - Spec: api.PodSpec{}, + pod := &v1.Pod{ + Spec: v1.PodSpec{}, } - volumeToReturn := &api.PersistentVolume{ - Spec: api.PersistentVolumeSpec{}, + volumeToReturn := &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{}, } if v.podHasPVC { - pod.Spec.Volumes = []api.Volume{ + pod.Spec.Volumes = []v1.Volume{ { - VolumeSource: api.VolumeSource{ - PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{}, + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{}, }, }, } if v.pvcIsHostPath { - volumeToReturn.Spec.PersistentVolumeSource = api.PersistentVolumeSource{ - HostPath: &api.HostPathVolumeSource{}, + volumeToReturn.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{ + HostPath: &v1.HostPathVolumeSource{}, } } } testKubelet.fakeKubeClient.AddReactor("get", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) { - return true, &api.PersistentVolumeClaim{ - Spec: api.PersistentVolumeClaimSpec{ + return true, &v1.PersistentVolumeClaim{ + Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "foo", }, }, v.pvcError @@ -1387,16 +1385,16 @@ func TestHasHostMountPVC(t *testing.T) { } func TestHasNonNamespacedCapability(t *testing.T) { - createPodWithCap := func(caps []api.Capability) *api.Pod { - pod := &api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{{}}, + createPodWithCap := func(caps []v1.Capability) *v1.Pod { + pod := &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{{}}, }, } if len(caps) > 0 { - pod.Spec.Containers[0].SecurityContext = &api.SecurityContext{ - Capabilities: &api.Capabilities{ + pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{ + Capabilities: &v1.Capabilities{ Add: caps, }, } @@ -1404,19 +1402,19 @@ func TestHasNonNamespacedCapability(t *testing.T) { return pod } - nilCaps := createPodWithCap([]api.Capability{api.Capability("foo")}) + nilCaps := createPodWithCap([]v1.Capability{v1.Capability("foo")}) nilCaps.Spec.Containers[0].SecurityContext = nil tests := map[string]struct { - pod *api.Pod + pod *v1.Pod expected bool }{ "nil security contxt": {createPodWithCap(nil), false}, "nil caps": {nilCaps, false}, - "namespaced cap": {createPodWithCap([]api.Capability{api.Capability("foo")}), false}, - "non-namespaced cap MKNOD": {createPodWithCap([]api.Capability{api.Capability("MKNOD")}), true}, - "non-namespaced cap SYS_TIME": {createPodWithCap([]api.Capability{api.Capability("SYS_TIME")}), true}, - "non-namespaced cap SYS_MODULE": {createPodWithCap([]api.Capability{api.Capability("SYS_MODULE")}), true}, + "namespaced cap": {createPodWithCap([]v1.Capability{v1.Capability("foo")}), false}, + "non-namespaced cap MKNOD": {createPodWithCap([]v1.Capability{v1.Capability("MKNOD")}), true}, + "non-namespaced cap SYS_TIME": {createPodWithCap([]v1.Capability{v1.Capability("SYS_TIME")}), true}, + "non-namespaced cap SYS_MODULE": {createPodWithCap([]v1.Capability{v1.Capability("SYS_MODULE")}), true}, } for k, v := range tests { @@ -1428,12 +1426,12 @@ func TestHasNonNamespacedCapability(t *testing.T) { } func TestHasHostVolume(t *testing.T) { - pod := &api.Pod{ - Spec: api.PodSpec{ - Volumes: []api.Volume{ + pod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { - VolumeSource: api.VolumeSource{ - HostPath: &api.HostPathVolumeSource{}, + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{}, }, }, }, @@ -1454,39 +1452,45 @@ func TestHasHostVolume(t *testing.T) { func TestHasHostNamespace(t *testing.T) { tests := map[string]struct { - psc *api.PodSecurityContext + ps v1.PodSpec expected bool }{ - "nil psc": {psc: nil, expected: false}, + "nil psc": { + ps: v1.PodSpec{}, + expected: false}, + "host pid true": { - psc: &api.PodSecurityContext{ - HostPID: true, + ps: v1.PodSpec{ + HostPID: true, + SecurityContext: &v1.PodSecurityContext{}, }, expected: true, }, "host ipc true": { - psc: &api.PodSecurityContext{ - HostIPC: true, + ps: v1.PodSpec{ + HostIPC: true, + SecurityContext: &v1.PodSecurityContext{}, }, expected: true, }, "host net true": { - psc: &api.PodSecurityContext{ - HostNetwork: true, + ps: v1.PodSpec{ + HostNetwork: true, + SecurityContext: &v1.PodSecurityContext{}, }, expected: true, }, "no host ns": { - psc: &api.PodSecurityContext{}, + ps: v1.PodSpec{ + SecurityContext: &v1.PodSecurityContext{}, + }, expected: false, }, } for k, v := range tests { - pod := &api.Pod{ - Spec: api.PodSpec{ - SecurityContext: v.psc, - }, + pod := &v1.Pod{ + Spec: v.ps, } actual := hasHostNamespace(pod) if actual != v.expected { diff --git a/pkg/kubelet/kubelet_resources.go b/pkg/kubelet/kubelet_resources.go index 53a1107ce37..ea2a3033b64 100644 --- a/pkg/kubelet/kubelet_resources.go +++ b/pkg/kubelet/kubelet_resources.go @@ -20,6 +20,7 @@ import ( "fmt" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/fieldpath" ) @@ -30,7 +31,7 @@ import ( // the node allocatable. // TODO: if/when we have pod level resources, we need to update this function // to use those limits instead of node allocatable. -func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *api.Pod, container *api.Container) (*api.Pod, *api.Container, error) { +func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *v1.Pod, container *v1.Container) (*v1.Pod, *v1.Container, error) { if pod == nil { return nil, nil, fmt.Errorf("invalid input, pod cannot be nil") } @@ -45,7 +46,7 @@ func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *api.Pod, container *api.C if err != nil { return nil, nil, fmt.Errorf("failed to perform a deep copy of pod object: %v", err) } - outputPod, ok := podCopy.(*api.Pod) + outputPod, ok := podCopy.(*v1.Pod) if !ok { return nil, nil, fmt.Errorf("unexpected type returned from deep copy of pod object") } @@ -53,13 +54,13 @@ func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *api.Pod, container *api.C fieldpath.MergeContainerResourceLimits(&outputPod.Spec.Containers[idx], allocatable) } - var outputContainer *api.Container + var outputContainer *v1.Container if container != nil { containerCopy, err := api.Scheme.DeepCopy(container) if err != nil { return nil, nil, fmt.Errorf("failed to perform a deep copy of container object: %v", err) } - outputContainer, ok = containerCopy.(*api.Container) + outputContainer, ok = containerCopy.(*v1.Container) if !ok { return nil, nil, fmt.Errorf("unexpected type returned from deep copy of container object") } diff --git a/pkg/kubelet/kubelet_resources_test.go b/pkg/kubelet/kubelet_resources_test.go index 751df62cd1c..3f379cf9991 100644 --- a/pkg/kubelet/kubelet_resources_test.go +++ b/pkg/kubelet/kubelet_resources_test.go @@ -23,8 +23,8 @@ import ( cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapiv2 "github.com/google/cadvisor/info/v2" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -41,19 +41,19 @@ func TestPodResourceLimitsDefaulting(t *testing.T) { tk.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) tk.kubelet.reservation = kubetypes.Reservation{ - Kubernetes: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("4Gi"), + Kubernetes: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("3"), + v1.ResourceMemory: resource.MustParse("4Gi"), }, - System: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("2Gi"), + System: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("2Gi"), }, } cases := []struct { - pod *api.Pod - expected *api.Pod + pod *v1.Pod + expected *v1.Pod }{ { pod: getPod("0", "0"), @@ -76,26 +76,26 @@ func TestPodResourceLimitsDefaulting(t *testing.T) { for idx, tc := range cases { actual, _, err := tk.kubelet.defaultPodLimitsForDownwardApi(tc.pod, nil) as.Nil(err, "failed to default pod limits: %v", err) - if !api.Semantic.DeepEqual(tc.expected, actual) { + if !v1.Semantic.DeepEqual(tc.expected, actual) { as.Fail("test case [%d] failed. Expected: %+v, Got: %+v", idx, tc.expected, actual) } } } -func getPod(cpuLimit, memoryLimit string) *api.Pod { - resources := api.ResourceRequirements{} +func getPod(cpuLimit, memoryLimit string) *v1.Pod { + resources := v1.ResourceRequirements{} if cpuLimit != "" || memoryLimit != "" { - resources.Limits = make(api.ResourceList) + resources.Limits = make(v1.ResourceList) } if cpuLimit != "" { - resources.Limits[api.ResourceCPU] = resource.MustParse(cpuLimit) + resources.Limits[v1.ResourceCPU] = resource.MustParse(cpuLimit) } if memoryLimit != "" { - resources.Limits[api.ResourceMemory] = resource.MustParse(memoryLimit) + resources.Limits[v1.ResourceMemory] = resource.MustParse(memoryLimit) } - return &api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + return &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Name: "foo", Resources: resources, diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 0c7ad75ad68..c3dd6fca318 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -28,12 +28,12 @@ import ( cadvisorapiv2 "github.com/google/cadvisor/info/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/capabilities" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" "k8s.io/kubernetes/pkg/client/record" cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" "k8s.io/kubernetes/pkg/kubelet/cm" @@ -141,7 +141,7 @@ func newTestKubeletWithImageList( t.Fatalf("can't mkdir(%q): %v", kubelet.rootDirectory, err) } kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return true }) - kubelet.masterServiceNamespace = api.NamespaceDefault + kubelet.masterServiceNamespace = v1.NamespaceDefault kubelet.serviceLister = testServiceLister{} kubelet.nodeLister = testNodeLister{} kubelet.nodeInfo = testNodeInfo{} @@ -149,7 +149,7 @@ func newTestKubeletWithImageList( if err := kubelet.setupDataDirs(); err != nil { t.Fatalf("can't initialize kubelet data dirs: %v", err) } - kubelet.daemonEndpoints = &api.NodeDaemonEndpoints{} + kubelet.daemonEndpoints = &v1.NodeDaemonEndpoints{} mockCadvisor := &cadvisortest.Mock{} kubelet.cadvisor = mockCadvisor @@ -178,7 +178,7 @@ func newTestKubeletWithImageList( kubelet.livenessManager = proberesults.NewManager() kubelet.containerManager = cm.NewStubContainerManager() - fakeNodeRef := &api.ObjectReference{ + fakeNodeRef := &v1.ObjectReference{ Kind: "Node", Name: testKubeletHostname, UID: types.UID(testKubeletHostname), @@ -195,9 +195,9 @@ func newTestKubeletWithImageList( kubelet.podKillingCh = make(chan *kubecontainer.PodPair, 20) kubelet.resyncInterval = 10 * time.Second kubelet.reservation = kubetypes.Reservation{ - Kubernetes: api.ResourceList{ - api.ResourceCPU: resource.MustParse(testReservationCPU), - api.ResourceMemory: resource.MustParse(testReservationMemory), + Kubernetes: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(testReservationCPU), + v1.ResourceMemory: resource.MustParse(testReservationMemory), }, } kubelet.workQueue = queue.NewBasicWorkQueue(fakeClock) @@ -209,7 +209,7 @@ func newTestKubeletWithImageList( // TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency volumeStatsAggPeriod := time.Second * 10 kubelet.resourceAnalyzer = stats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod, kubelet.containerRuntime) - nodeRef := &api.ObjectReference{ + nodeRef := &v1.ObjectReference{ Kind: "Node", Name: string(kubelet.nodeName), UID: types.UID(kubelet.nodeName), @@ -253,16 +253,15 @@ func newTestKubeletWithImageList( return &TestKubelet{kubelet, fakeRuntime, mockCadvisor, fakeKubeClient, fakeMirrorClient, fakeClock, nil, plug} } -func newTestPods(count int) []*api.Pod { - pods := make([]*api.Pod, count) +func newTestPods(count int) []*v1.Pod { + pods := make([]*v1.Pod, count) for i := 0; i < count; i++ { - pods[i] = &api.Pod{ - Spec: api.PodSpec{ - SecurityContext: &api.PodSecurityContext{ + pods[i] = &v1.Pod{ + Spec: v1.PodSpec{ HostNetwork: true, - }, + }, - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ UID: types.UID(10000 + i), Name: fmt.Sprintf("pod%d", i), }, @@ -326,9 +325,9 @@ func TestSyncPodsStartPod(t *testing.T) { testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) kubelet := testKubelet.kubelet fakeRuntime := testKubelet.fakeRuntime - pods := []*api.Pod{ - podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{ - Containers: []api.Container{ + pods := []*v1.Pod{ + podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{ + Containers: []v1.Container{ {Name: "bar"}, }, }), @@ -371,14 +370,14 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) { } type testNodeLister struct { - nodes []api.Node + nodes []v1.Node } type testNodeInfo struct { - nodes []api.Node + nodes []v1.Node } -func (ls testNodeInfo) GetNodeInfo(id string) (*api.Node, error) { +func (ls testNodeInfo) GetNodeInfo(id string) (*v1.Node, error) { for _, node := range ls.nodes { if node.Name == id { return &node, nil @@ -387,8 +386,8 @@ func (ls testNodeInfo) GetNodeInfo(id string) (*api.Node, error) { return nil, fmt.Errorf("Node with name: %s does not exist", id) } -func (ls testNodeLister) List() (api.NodeList, error) { - return api.NodeList{ +func (ls testNodeLister) List() (v1.NodeList, error) { + return v1.NodeList{ Items: ls.nodes, }, nil } @@ -401,29 +400,29 @@ func TestHandlePortConflicts(t *testing.T) { testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) - kl.nodeLister = testNodeLister{nodes: []api.Node{ + kl.nodeLister = testNodeLister{nodes: []v1.Node{ { - ObjectMeta: api.ObjectMeta{Name: string(kl.nodeName)}, - Status: api.NodeStatus{ - Allocatable: api.ResourceList{ - api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + ObjectMeta: v1.ObjectMeta{Name: string(kl.nodeName)}, + Status: v1.NodeStatus{ + Allocatable: v1.ResourceList{ + v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), }, }, }, }} - kl.nodeInfo = testNodeInfo{nodes: []api.Node{ + kl.nodeInfo = testNodeInfo{nodes: []v1.Node{ { - ObjectMeta: api.ObjectMeta{Name: string(kl.nodeName)}, - Status: api.NodeStatus{ - Allocatable: api.ResourceList{ - api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + ObjectMeta: v1.ObjectMeta{Name: string(kl.nodeName)}, + Status: v1.NodeStatus{ + Allocatable: v1.ResourceList{ + v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), }, }, }, }} - spec := api.PodSpec{NodeName: string(kl.nodeName), Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}} - pods := []*api.Pod{ + spec := v1.PodSpec{NodeName: string(kl.nodeName), Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}} + pods := []*v1.Pod{ podWithUidNameNsSpec("123456789", "newpod", "foo", spec), podWithUidNameNsSpec("987654321", "oldpod", "foo", spec), } @@ -439,12 +438,12 @@ func TestHandlePortConflicts(t *testing.T) { // notfittingPod should be Failed status, found := kl.statusManager.GetPodStatus(notfittingPod.UID) require.True(t, found, "Status of pod %q is not found in the status map", notfittingPod.UID) - require.Equal(t, api.PodFailed, status.Phase) + require.Equal(t, v1.PodFailed, status.Phase) // fittingPod should be Pending status, found = kl.statusManager.GetPodStatus(fittingPod.UID) require.True(t, found, "Status of pod %q is not found in the status map", fittingPod.UID) - require.Equal(t, api.PodPending, status.Phase) + require.Equal(t, v1.PodPending, status.Phase) } // Tests that we handle host name conflicts correctly by setting the failed status in status map. @@ -455,31 +454,31 @@ func TestHandleHostNameConflicts(t *testing.T) { testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) - kl.nodeLister = testNodeLister{nodes: []api.Node{ + kl.nodeLister = testNodeLister{nodes: []v1.Node{ { - ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"}, - Status: api.NodeStatus{ - Allocatable: api.ResourceList{ - api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + ObjectMeta: v1.ObjectMeta{Name: "127.0.0.1"}, + Status: v1.NodeStatus{ + Allocatable: v1.ResourceList{ + v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), }, }, }, }} - kl.nodeInfo = testNodeInfo{nodes: []api.Node{ + kl.nodeInfo = testNodeInfo{nodes: []v1.Node{ { - ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"}, - Status: api.NodeStatus{ - Allocatable: api.ResourceList{ - api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + ObjectMeta: v1.ObjectMeta{Name: "127.0.0.1"}, + Status: v1.NodeStatus{ + Allocatable: v1.ResourceList{ + v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), }, }, }, }} // default NodeName in test is 127.0.0.1 - pods := []*api.Pod{ - podWithUidNameNsSpec("123456789", "notfittingpod", "foo", api.PodSpec{NodeName: "127.0.0.2"}), - podWithUidNameNsSpec("987654321", "fittingpod", "foo", api.PodSpec{NodeName: "127.0.0.1"}), + pods := []*v1.Pod{ + podWithUidNameNsSpec("123456789", "notfittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.2"}), + podWithUidNameNsSpec("987654321", "fittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.1"}), } notfittingPod := pods[0] @@ -490,24 +489,24 @@ func TestHandleHostNameConflicts(t *testing.T) { // notfittingPod should be Failed status, found := kl.statusManager.GetPodStatus(notfittingPod.UID) require.True(t, found, "Status of pod %q is not found in the status map", notfittingPod.UID) - require.Equal(t, api.PodFailed, status.Phase) + require.Equal(t, v1.PodFailed, status.Phase) // fittingPod should be Pending status, found = kl.statusManager.GetPodStatus(fittingPod.UID) require.True(t, found, "Status of pod %q is not found in the status map", fittingPod.UID) - require.Equal(t, api.PodPending, status.Phase) + require.Equal(t, v1.PodPending, status.Phase) } // Tests that we handle not matching labels selector correctly by setting the failed status in status map. func TestHandleNodeSelector(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) kl := testKubelet.kubelet - nodes := []api.Node{ + nodes := []v1.Node{ { - ObjectMeta: api.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}}, - Status: api.NodeStatus{ - Allocatable: api.ResourceList{ - api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}}, + Status: v1.NodeStatus{ + Allocatable: v1.ResourceList{ + v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), }, }, }, @@ -517,9 +516,9 @@ func TestHandleNodeSelector(t *testing.T) { testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) - pods := []*api.Pod{ - podWithUidNameNsSpec("123456789", "podA", "foo", api.PodSpec{NodeSelector: map[string]string{"key": "A"}}), - podWithUidNameNsSpec("987654321", "podB", "foo", api.PodSpec{NodeSelector: map[string]string{"key": "B"}}), + pods := []*v1.Pod{ + podWithUidNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "A"}}), + podWithUidNameNsSpec("987654321", "podB", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "B"}}), } // The first pod should be rejected. notfittingPod := pods[0] @@ -530,24 +529,24 @@ func TestHandleNodeSelector(t *testing.T) { // notfittingPod should be Failed status, found := kl.statusManager.GetPodStatus(notfittingPod.UID) require.True(t, found, "Status of pod %q is not found in the status map", notfittingPod.UID) - require.Equal(t, api.PodFailed, status.Phase) + require.Equal(t, v1.PodFailed, status.Phase) // fittingPod should be Pending status, found = kl.statusManager.GetPodStatus(fittingPod.UID) require.True(t, found, "Status of pod %q is not found in the status map", fittingPod.UID) - require.Equal(t, api.PodPending, status.Phase) + require.Equal(t, v1.PodPending, status.Phase) } // Tests that we handle exceeded resources correctly by setting the failed status in status map. func TestHandleMemExceeded(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) kl := testKubelet.kubelet - nodes := []api.Node{ - {ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, - Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: api.ResourceList{ - api.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI), - api.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI), - api.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI), + nodes := []v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}, + Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI), }}}, } kl.nodeLister = testNodeLister{nodes: nodes} @@ -556,13 +555,13 @@ func TestHandleMemExceeded(t *testing.T) { testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) - spec := api.PodSpec{NodeName: string(kl.nodeName), - Containers: []api.Container{{Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ + spec := v1.PodSpec{NodeName: string(kl.nodeName), + Containers: []v1.Container{{Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ "memory": resource.MustParse("90"), }, }}}} - pods := []*api.Pod{ + pods := []*v1.Pod{ podWithUidNameNsSpec("123456789", "newpod", "foo", spec), podWithUidNameNsSpec("987654321", "oldpod", "foo", spec), } @@ -578,12 +577,12 @@ func TestHandleMemExceeded(t *testing.T) { // notfittingPod should be Failed status, found := kl.statusManager.GetPodStatus(notfittingPod.UID) require.True(t, found, "Status of pod %q is not found in the status map", notfittingPod.UID) - require.Equal(t, api.PodFailed, status.Phase) + require.Equal(t, v1.PodFailed, status.Phase) // fittingPod should be Pending status, found = kl.statusManager.GetPodStatus(fittingPod.UID) require.True(t, found, "Status of pod %q is not found in the status map", fittingPod.UID) - require.Equal(t, api.PodPending, status.Phase) + require.Equal(t, v1.PodPending, status.Phase) } // TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal. @@ -600,9 +599,9 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) { testKubelet.fakeCadvisor.On("VersionInfo").Return(versionInfo, nil) kl := testKubelet.kubelet - pods := []*api.Pod{ - {ObjectMeta: api.ObjectMeta{Name: "pod1", UID: "1234"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}}, - {ObjectMeta: api.ObjectMeta{Name: "pod2", UID: "4567"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}}, + pods := []*v1.Pod{ + {ObjectMeta: v1.ObjectMeta{Name: "pod1", UID: "1234"}, Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}}, + {ObjectMeta: v1.ObjectMeta{Name: "pod2", UID: "4567"}, Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}}, } podToTest := pods[1] // Run once to populate the status map. @@ -611,7 +610,7 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) { t.Fatalf("expected to have status cached for pod2") } // Sync with empty pods so that the entry in status map will be removed. - kl.podManager.SetPods([]*api.Pod{}) + kl.podManager.SetPods([]*v1.Pod{}) kl.HandlePodCleanups() if _, found := kl.statusManager.GetPodStatus(podToTest.UID); found { t.Fatalf("expected to not have status cached for pod2") @@ -623,19 +622,19 @@ func TestValidateContainerLogStatus(t *testing.T) { kubelet := testKubelet.kubelet containerName := "x" testCases := []struct { - statuses []api.ContainerStatus + statuses []v1.ContainerStatus success bool // whether getting logs for the container should succeed. pSuccess bool // whether getting logs for the previous container should succeed. }{ { - statuses: []api.ContainerStatus{ + statuses: []v1.ContainerStatus{ { Name: containerName, - State: api.ContainerState{ - Running: &api.ContainerStateRunning{}, + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, }, - LastTerminationState: api.ContainerState{ - Terminated: &api.ContainerStateTerminated{}, + LastTerminationState: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{}, }, }, }, @@ -643,11 +642,11 @@ func TestValidateContainerLogStatus(t *testing.T) { pSuccess: true, }, { - statuses: []api.ContainerStatus{ + statuses: []v1.ContainerStatus{ { Name: containerName, - State: api.ContainerState{ - Running: &api.ContainerStateRunning{}, + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, }, }, }, @@ -655,11 +654,11 @@ func TestValidateContainerLogStatus(t *testing.T) { pSuccess: false, }, { - statuses: []api.ContainerStatus{ + statuses: []v1.ContainerStatus{ { Name: containerName, - State: api.ContainerState{ - Terminated: &api.ContainerStateTerminated{}, + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{}, }, }, }, @@ -667,11 +666,11 @@ func TestValidateContainerLogStatus(t *testing.T) { pSuccess: false, }, { - statuses: []api.ContainerStatus{ + statuses: []v1.ContainerStatus{ { Name: containerName, - State: api.ContainerState{ - Waiting: &api.ContainerStateWaiting{}, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{}, }, }, }, @@ -679,20 +678,20 @@ func TestValidateContainerLogStatus(t *testing.T) { pSuccess: false, }, { - statuses: []api.ContainerStatus{ + statuses: []v1.ContainerStatus{ { Name: containerName, - State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ErrImagePull"}}, + State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "ErrImagePull"}}, }, }, success: false, pSuccess: false, }, { - statuses: []api.ContainerStatus{ + statuses: []v1.ContainerStatus{ { Name: containerName, - State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ErrImagePullBackOff"}}, + State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "ErrImagePullBackOff"}}, }, }, success: false, @@ -703,7 +702,7 @@ func TestValidateContainerLogStatus(t *testing.T) { for i, tc := range testCases { // Access the log of the most recent container previous := false - podStatus := &api.PodStatus{ContainerStatuses: tc.statuses} + podStatus := &v1.PodStatus{ContainerStatuses: tc.statuses} _, err := kubelet.validateContainerLogStatus("podName", podStatus, containerName, previous) if !tc.success { assert.Error(t, err, fmt.Sprintf("[case %d] error", i)) @@ -756,7 +755,7 @@ func TestCreateMirrorPod(t *testing.T) { manager := testKubelet.fakeMirrorClient pod := podWithUidNameNs("12345678", "bar", "foo") pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file" - pods := []*api.Pod{pod} + pods := []*v1.Pod{pod} kl.podManager.SetPods(pods) err := kl.syncPod(syncPodOptions{ pod: pod, @@ -780,23 +779,23 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) { kl := testKubelet.kubelet manager := testKubelet.fakeMirrorClient - pod := podWithUidNameNsSpec("12345678", "foo", "ns", api.PodSpec{ - Containers: []api.Container{ + pod := podWithUidNameNsSpec("12345678", "foo", "ns", v1.PodSpec{ + Containers: []v1.Container{ {Name: "1234", Image: "foo"}, }, }) pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file" // Mirror pod has an outdated spec. - mirrorPod := podWithUidNameNsSpec("11111111", "foo", "ns", api.PodSpec{ - Containers: []api.Container{ + mirrorPod := podWithUidNameNsSpec("11111111", "foo", "ns", v1.PodSpec{ + Containers: []v1.Container{ {Name: "1234", Image: "bar"}, }, }) mirrorPod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "api" mirrorPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = "mirror" - pods := []*api.Pod{pod, mirrorPod} + pods := []*v1.Pod{pod, mirrorPod} kl.podManager.SetPods(pods) err := kl.syncPod(syncPodOptions{ pod: pod, @@ -821,9 +820,9 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) { kl := testKubelet.kubelet manager := testKubelet.fakeMirrorClient - orphanPods := []*api.Pod{ + orphanPods := []*v1.Pod{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "pod1", Namespace: "ns", @@ -834,7 +833,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) { }, }, { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ UID: "12345679", Name: "pod2", Namespace: "ns", @@ -862,9 +861,9 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) { func TestGetContainerInfoForMirrorPods(t *testing.T) { // pods contain one static and one mirror pod with the same name but // different UIDs. - pods := []*api.Pod{ + pods := []*v1.Pod{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ UID: "1234", Name: "qux", Namespace: "ns", @@ -872,14 +871,14 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) { kubetypes.ConfigSourceAnnotationKey: "file", }, }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ {Name: "foo"}, }, }, }, { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ UID: "5678", Name: "qux", Namespace: "ns", @@ -888,8 +887,8 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) { kubetypes.ConfigMirrorAnnotationKey: "mirror", }, }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ {Name: "foo"}, }, }, @@ -948,17 +947,16 @@ func TestHostNetworkAllowed(t *testing.T) { HostNetworkSources: []string{kubetypes.ApiserverSource, kubetypes.FileSource}, }, }) - pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{ - Containers: []api.Container{ + pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{ + Containers: []v1.Container{ {Name: "foo"}, }, - SecurityContext: &api.PodSecurityContext{ HostNetwork: true, - }, + }) pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource - kubelet.podManager.SetPods([]*api.Pod{pod}) + kubelet.podManager.SetPods([]*v1.Pod{pod}) err := kubelet.syncPod(syncPodOptions{ pod: pod, podStatus: &kubecontainer.PodStatus{}, @@ -982,13 +980,12 @@ func TestHostNetworkDisallowed(t *testing.T) { HostNetworkSources: []string{}, }, }) - pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{ - Containers: []api.Container{ + pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{ + Containers: []v1.Container{ {Name: "foo"}, }, - SecurityContext: &api.PodSecurityContext{ HostNetwork: true, - }, + }) pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource @@ -1014,13 +1011,13 @@ func TestPrivilegeContainerAllowed(t *testing.T) { AllowPrivileged: true, }) privileged := true - pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{ - Containers: []api.Container{ - {Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}}, + pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{ + Containers: []v1.Container{ + {Name: "foo", SecurityContext: &v1.SecurityContext{Privileged: &privileged}}, }, }) - kubelet.podManager.SetPods([]*api.Pod{pod}) + kubelet.podManager.SetPods([]*v1.Pod{pod}) err := kubelet.syncPod(syncPodOptions{ pod: pod, podStatus: &kubecontainer.PodStatus{}, @@ -1041,9 +1038,9 @@ func TestPrivilegedContainerDisallowed(t *testing.T) { AllowPrivileged: false, }) privileged := true - pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{ - Containers: []api.Container{ - {Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}}, + pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{ + Containers: []v1.Container{ + {Name: "foo", SecurityContext: &v1.SecurityContext{Privileged: &privileged}}, }, }) @@ -1070,16 +1067,15 @@ func TestNetworkErrorsWithoutHostNetwork(t *testing.T) { }, }) - pod := podWithUidNameNsSpec("12345678", "hostnetwork", "new", api.PodSpec{ - SecurityContext: &api.PodSecurityContext{ + pod := podWithUidNameNsSpec("12345678", "hostnetwork", "new", v1.PodSpec{ HostNetwork: false, - }, - Containers: []api.Container{ + + Containers: []v1.Container{ {Name: "foo"}, }, }) - kubelet.podManager.SetPods([]*api.Pod{pod}) + kubelet.podManager.SetPods([]*v1.Pod{pod}) err := kubelet.syncPod(syncPodOptions{ pod: pod, podStatus: &kubecontainer.PodStatus{}, @@ -1088,7 +1084,7 @@ func TestNetworkErrorsWithoutHostNetwork(t *testing.T) { assert.Error(t, err, "expected pod with hostNetwork=false to fail when network in error") pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource - pod.Spec.SecurityContext.HostNetwork = true + pod.Spec.HostNetwork = true err = kubelet.syncPod(syncPodOptions{ pod: pod, podStatus: &kubecontainer.PodStatus{}, @@ -1101,20 +1097,20 @@ func TestFilterOutTerminatedPods(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) kubelet := testKubelet.kubelet pods := newTestPods(5) - pods[0].Status.Phase = api.PodFailed - pods[1].Status.Phase = api.PodSucceeded - pods[2].Status.Phase = api.PodRunning - pods[3].Status.Phase = api.PodPending + pods[0].Status.Phase = v1.PodFailed + pods[1].Status.Phase = v1.PodSucceeded + pods[2].Status.Phase = v1.PodRunning + pods[3].Status.Phase = v1.PodPending - expected := []*api.Pod{pods[2], pods[3], pods[4]} + expected := []*v1.Pod{pods[2], pods[3], pods[4]} kubelet.podManager.SetPods(pods) actual := kubelet.filterOutTerminatedPods(pods) assert.Equal(t, expected, actual) } func TestMakePortMappings(t *testing.T) { - port := func(name string, protocol api.Protocol, containerPort, hostPort int32, ip string) api.ContainerPort { - return api.ContainerPort{ + port := func(name string, protocol v1.Protocol, containerPort, hostPort int32, ip string) v1.ContainerPort { + return v1.ContainerPort{ Name: name, Protocol: protocol, ContainerPort: containerPort, @@ -1122,7 +1118,7 @@ func TestMakePortMappings(t *testing.T) { HostIP: ip, } } - portMapping := func(name string, protocol api.Protocol, containerPort, hostPort int, ip string) kubecontainer.PortMapping { + portMapping := func(name string, protocol v1.Protocol, containerPort, hostPort int, ip string) kubecontainer.PortMapping { return kubecontainer.PortMapping{ Name: name, Protocol: protocol, @@ -1133,26 +1129,26 @@ func TestMakePortMappings(t *testing.T) { } tests := []struct { - container *api.Container + container *v1.Container expectedPortMappings []kubecontainer.PortMapping }{ { - &api.Container{ + &v1.Container{ Name: "fooContainer", - Ports: []api.ContainerPort{ - port("", api.ProtocolTCP, 80, 8080, "127.0.0.1"), - port("", api.ProtocolTCP, 443, 4343, "192.168.0.1"), - port("foo", api.ProtocolUDP, 555, 5555, ""), + Ports: []v1.ContainerPort{ + port("", v1.ProtocolTCP, 80, 8080, "127.0.0.1"), + port("", v1.ProtocolTCP, 443, 4343, "192.168.0.1"), + port("foo", v1.ProtocolUDP, 555, 5555, ""), // Duplicated, should be ignored. - port("foo", api.ProtocolUDP, 888, 8888, ""), + port("foo", v1.ProtocolUDP, 888, 8888, ""), // Duplicated, should be ignored. - port("", api.ProtocolTCP, 80, 8888, ""), + port("", v1.ProtocolTCP, 80, 8888, ""), }, }, []kubecontainer.PortMapping{ - portMapping("fooContainer-TCP:80", api.ProtocolTCP, 80, 8080, "127.0.0.1"), - portMapping("fooContainer-TCP:443", api.ProtocolTCP, 443, 4343, "192.168.0.1"), - portMapping("fooContainer-foo", api.ProtocolUDP, 555, 5555, ""), + portMapping("fooContainer-TCP:80", v1.ProtocolTCP, 80, 8080, "127.0.0.1"), + portMapping("fooContainer-TCP:443", v1.ProtocolTCP, 443, 4343, "192.168.0.1"), + portMapping("fooContainer-foo", v1.ProtocolUDP, 555, 5555, ""), }, }, } @@ -1173,20 +1169,20 @@ func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) { startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute)) exceededActiveDeadlineSeconds := int64(30) - pods := []*api.Pod{ + pods := []*v1.Pod{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "bar", Namespace: "new", }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ {Name: "foo"}, }, ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds, }, - Status: api.PodStatus{ + Status: v1.PodStatus{ StartTime: &startTime, }, }, @@ -1207,7 +1203,7 @@ func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) { kubelet.HandlePodUpdates(pods) status, found := kubelet.statusManager.GetPodStatus(pods[0].UID) assert.True(t, found, "expected to found status for pod %q", pods[0].UID) - assert.Equal(t, api.PodFailed, status.Phase) + assert.Equal(t, v1.PodFailed, status.Phase) } func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) { @@ -1225,20 +1221,20 @@ func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) { startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute)) exceededActiveDeadlineSeconds := int64(300) - pods := []*api.Pod{ + pods := []*v1.Pod{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "bar", Namespace: "new", }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ {Name: "foo"}, }, ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds, }, - Status: api.PodStatus{ + Status: v1.PodStatus{ StartTime: &startTime, }, }, @@ -1259,12 +1255,12 @@ func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) { kubelet.HandlePodUpdates(pods) status, found := kubelet.statusManager.GetPodStatus(pods[0].UID) assert.True(t, found, "expected to found status for pod %q", pods[0].UID) - assert.NotEqual(t, api.PodFailed, status.Phase) + assert.NotEqual(t, v1.PodFailed, status.Phase) } -func podWithUidNameNs(uid types.UID, name, namespace string) *api.Pod { - return &api.Pod{ - ObjectMeta: api.ObjectMeta{ +func podWithUidNameNs(uid types.UID, name, namespace string) *v1.Pod { + return &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: uid, Name: name, Namespace: namespace, @@ -1273,7 +1269,7 @@ func podWithUidNameNs(uid types.UID, name, namespace string) *api.Pod { } } -func podWithUidNameNsSpec(uid types.UID, name, namespace string, spec api.PodSpec) *api.Pod { +func podWithUidNameNsSpec(uid types.UID, name, namespace string, spec v1.PodSpec) *v1.Pod { pod := podWithUidNameNs(uid, name, namespace) pod.Spec = spec return pod @@ -1287,7 +1283,7 @@ func TestDeletePodDirsForDeletedPods(t *testing.T) { testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) kl := testKubelet.kubelet - pods := []*api.Pod{ + pods := []*v1.Pod{ podWithUidNameNs("12345678", "pod1", "ns"), podWithUidNameNs("12345679", "pod2", "ns"), } @@ -1300,13 +1296,13 @@ func TestDeletePodDirsForDeletedPods(t *testing.T) { } // Pod 1 has been deleted and no longer exists. - kl.podManager.SetPods([]*api.Pod{pods[0]}) + kl.podManager.SetPods([]*v1.Pod{pods[0]}) kl.HandlePodCleanups() assert.True(t, dirExists(kl.getPodDir(pods[0].UID)), "Expected directory to exist for pod 0") assert.False(t, dirExists(kl.getPodDir(pods[1].UID)), "Expected directory to be deleted for pod 1") } -func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*api.Pod, podsToCheck []*api.Pod, shouldExist bool) { +func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*v1.Pod, podsToCheck []*v1.Pod, shouldExist bool) { kl := testKubelet.kubelet kl.podManager.SetPods(pods) @@ -1326,7 +1322,7 @@ func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) { testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) kl := testKubelet.kubelet - pods := []*api.Pod{ + pods := []*v1.Pod{ podWithUidNameNs("12345678", "pod1", "ns"), podWithUidNameNs("12345679", "pod2", "ns"), podWithUidNameNs("12345680", "pod3", "ns"), @@ -1335,8 +1331,8 @@ func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) { syncAndVerifyPodDir(t, testKubelet, pods, pods, true) // Pod 1 failed, and pod 2 succeeded. None of the pod directories should be // deleted. - kl.statusManager.SetPodStatus(pods[1], api.PodStatus{Phase: api.PodFailed}) - kl.statusManager.SetPodStatus(pods[2], api.PodStatus{Phase: api.PodSucceeded}) + kl.statusManager.SetPodStatus(pods[1], v1.PodStatus{Phase: v1.PodFailed}) + kl.statusManager.SetPodStatus(pods[2], v1.PodStatus{Phase: v1.PodSucceeded}) syncAndVerifyPodDir(t, testKubelet, pods, pods, true) } @@ -1356,20 +1352,20 @@ func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) { // Sync once to create pod directory; confirm that the pod directory has // already been created. - pods := []*api.Pod{apiPod} - syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, true) + pods := []*v1.Pod{apiPod} + syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, true) // Pretend the pod is deleted from apiserver, but is still active on the node. // The pod directory should not be removed. - pods = []*api.Pod{} + pods = []*v1.Pod{} testKubelet.fakeRuntime.PodList = []*containertest.FakePod{{runningPod, ""}} - syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, true) + syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, true) // The pod is deleted and also not active on the node. The pod directory // should be removed. - pods = []*api.Pod{} + pods = []*v1.Pod{} testKubelet.fakeRuntime.PodList = []*containertest.FakePod{} - syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, false) + syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, false) } func TestGetPodsToSync(t *testing.T) { @@ -1395,7 +1391,7 @@ func TestGetPodsToSync(t *testing.T) { clock.Step(1 * time.Minute) - expected := []*api.Pod{pods[2], pods[3], pods[0]} + expected := []*v1.Pod{pods[2], pods[3], pods[0]} podsToSync := kubelet.getPodsToSync() sort.Sort(podsByUID(expected)) sort.Sort(podsByUID(podsToSync)) @@ -1412,7 +1408,7 @@ func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) { numContainers := 10 expectedOrder := []string{} cStatuses := []*kubecontainer.ContainerStatus{} - specContainerList := []api.Container{} + specContainerList := []v1.Container{} for i := 0; i < numContainers; i++ { id := fmt.Sprintf("%v", i) containerName := fmt.Sprintf("%vcontainer", id) @@ -1427,10 +1423,10 @@ func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) { } else { cStatuses = append([]*kubecontainer.ContainerStatus{cStatus}, cStatuses...) } - specContainerList = append(specContainerList, api.Container{Name: containerName}) + specContainerList = append(specContainerList, v1.Container{Name: containerName}) } pod := podWithUidNameNs("uid1", "foo", "test") - pod.Spec = api.PodSpec{ + pod.Spec = v1.PodSpec{ Containers: specContainerList, } @@ -1450,7 +1446,7 @@ func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) { } } -func verifyContainerStatuses(t *testing.T, statuses []api.ContainerStatus, state, lastTerminationState map[string]api.ContainerState, message string) { +func verifyContainerStatuses(t *testing.T, statuses []v1.ContainerStatus, state, lastTerminationState map[string]v1.ContainerState, message string) { for _, s := range statuses { assert.Equal(t, s.State, state[s.Name], "%s: state", message) assert.Equal(t, s.LastTerminationState, lastTerminationState[s.Name], "%s: last terminated state", message) @@ -1472,7 +1468,7 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) { testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) kubelet := testKubelet.kubelet pod := podWithUidNameNs("12345678", "foo", "new") - pod.Spec = api.PodSpec{RestartPolicy: api.RestartPolicyOnFailure} + pod.Spec = v1.PodSpec{RestartPolicy: v1.RestartPolicyOnFailure} podStatus := &kubecontainer.PodStatus{ ID: pod.UID, @@ -1480,48 +1476,48 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) { Namespace: pod.Namespace, } tests := []struct { - containers []api.Container + containers []v1.Container statuses []*kubecontainer.ContainerStatus reasons map[string]error - oldStatuses []api.ContainerStatus - expectedState map[string]api.ContainerState + oldStatuses []v1.ContainerStatus + expectedState map[string]v1.ContainerState // Only set expectedInitState when it is different from expectedState - expectedInitState map[string]api.ContainerState - expectedLastTerminationState map[string]api.ContainerState + expectedInitState map[string]v1.ContainerState + expectedLastTerminationState map[string]v1.ContainerState }{ // For container with no historical record, State should be Waiting, LastTerminationState should be retrieved from // old status from apiserver. { - containers: []api.Container{{Name: "without-old-record"}, {Name: "with-old-record"}}, + containers: []v1.Container{{Name: "without-old-record"}, {Name: "with-old-record"}}, statuses: []*kubecontainer.ContainerStatus{}, reasons: map[string]error{}, - oldStatuses: []api.ContainerStatus{{ + oldStatuses: []v1.ContainerStatus{{ Name: "with-old-record", - LastTerminationState: api.ContainerState{Terminated: &api.ContainerStateTerminated{}}, + LastTerminationState: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{}}, }}, - expectedState: map[string]api.ContainerState{ - "without-old-record": {Waiting: &api.ContainerStateWaiting{ + expectedState: map[string]v1.ContainerState{ + "without-old-record": {Waiting: &v1.ContainerStateWaiting{ Reason: startWaitingReason, }}, - "with-old-record": {Waiting: &api.ContainerStateWaiting{ + "with-old-record": {Waiting: &v1.ContainerStateWaiting{ Reason: startWaitingReason, }}, }, - expectedInitState: map[string]api.ContainerState{ - "without-old-record": {Waiting: &api.ContainerStateWaiting{ + expectedInitState: map[string]v1.ContainerState{ + "without-old-record": {Waiting: &v1.ContainerStateWaiting{ Reason: initWaitingReason, }}, - "with-old-record": {Waiting: &api.ContainerStateWaiting{ + "with-old-record": {Waiting: &v1.ContainerStateWaiting{ Reason: initWaitingReason, }}, }, - expectedLastTerminationState: map[string]api.ContainerState{ - "with-old-record": {Terminated: &api.ContainerStateTerminated{}}, + expectedLastTerminationState: map[string]v1.ContainerState{ + "with-old-record": {Terminated: &v1.ContainerStateTerminated{}}, }, }, // For running container, State should be Running, LastTerminationState should be retrieved from latest terminated status. { - containers: []api.Container{{Name: "running"}}, + containers: []v1.Container{{Name: "running"}}, statuses: []*kubecontainer.ContainerStatus{ { Name: "running", @@ -1535,14 +1531,14 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) { }, }, reasons: map[string]error{}, - oldStatuses: []api.ContainerStatus{}, - expectedState: map[string]api.ContainerState{ - "running": {Running: &api.ContainerStateRunning{ + oldStatuses: []v1.ContainerStatus{}, + expectedState: map[string]v1.ContainerState{ + "running": {Running: &v1.ContainerStateRunning{ StartedAt: unversioned.NewTime(testTimestamp), }}, }, - expectedLastTerminationState: map[string]api.ContainerState{ - "running": {Terminated: &api.ContainerStateTerminated{ + expectedLastTerminationState: map[string]v1.ContainerState{ + "running": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 1, ContainerID: emptyContainerID, }}, @@ -1557,7 +1553,7 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) { // recent start error or not, State should be Terminated, LastTerminationState should be retrieved from second latest // terminated status. { - containers: []api.Container{{Name: "without-reason"}, {Name: "with-reason"}}, + containers: []v1.Container{{Name: "without-reason"}, {Name: "with-reason"}}, statuses: []*kubecontainer.ContainerStatus{ { Name: "without-reason", @@ -1591,28 +1587,28 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) { }, }, reasons: map[string]error{"with-reason": testErrorReason, "succeed": testErrorReason}, - oldStatuses: []api.ContainerStatus{}, - expectedState: map[string]api.ContainerState{ - "without-reason": {Terminated: &api.ContainerStateTerminated{ + oldStatuses: []v1.ContainerStatus{}, + expectedState: map[string]v1.ContainerState{ + "without-reason": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 1, ContainerID: emptyContainerID, }}, - "with-reason": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}}, - "succeed": {Terminated: &api.ContainerStateTerminated{ + "with-reason": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}}, + "succeed": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 0, ContainerID: emptyContainerID, }}, }, - expectedLastTerminationState: map[string]api.ContainerState{ - "without-reason": {Terminated: &api.ContainerStateTerminated{ + expectedLastTerminationState: map[string]v1.ContainerState{ + "without-reason": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 3, ContainerID: emptyContainerID, }}, - "with-reason": {Terminated: &api.ContainerStateTerminated{ + "with-reason": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 2, ContainerID: emptyContainerID, }}, - "succeed": {Terminated: &api.ContainerStateTerminated{ + "succeed": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 5, ContainerID: emptyContainerID, }}, @@ -1661,7 +1657,7 @@ func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) { testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) kubelet := testKubelet.kubelet pod := podWithUidNameNs("12345678", "foo", "new") - containers := []api.Container{{Name: "succeed"}, {Name: "failed"}} + containers := []v1.Container{{Name: "succeed"}, {Name: "failed"}} podStatus := &kubecontainer.PodStatus{ ID: pod.UID, Name: pod.Name, @@ -1692,88 +1688,88 @@ func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) { kubelet.reasonCache.add(pod.UID, "succeed", testErrorReason, "") kubelet.reasonCache.add(pod.UID, "failed", testErrorReason, "") for c, test := range []struct { - restartPolicy api.RestartPolicy - expectedState map[string]api.ContainerState - expectedLastTerminationState map[string]api.ContainerState + restartPolicy v1.RestartPolicy + expectedState map[string]v1.ContainerState + expectedLastTerminationState map[string]v1.ContainerState // Only set expectedInitState when it is different from expectedState - expectedInitState map[string]api.ContainerState + expectedInitState map[string]v1.ContainerState // Only set expectedInitLastTerminationState when it is different from expectedLastTerminationState - expectedInitLastTerminationState map[string]api.ContainerState + expectedInitLastTerminationState map[string]v1.ContainerState }{ { - restartPolicy: api.RestartPolicyNever, - expectedState: map[string]api.ContainerState{ - "succeed": {Terminated: &api.ContainerStateTerminated{ + restartPolicy: v1.RestartPolicyNever, + expectedState: map[string]v1.ContainerState{ + "succeed": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 0, ContainerID: emptyContainerID, }}, - "failed": {Terminated: &api.ContainerStateTerminated{ + "failed": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 1, ContainerID: emptyContainerID, }}, }, - expectedLastTerminationState: map[string]api.ContainerState{ - "succeed": {Terminated: &api.ContainerStateTerminated{ + expectedLastTerminationState: map[string]v1.ContainerState{ + "succeed": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 2, ContainerID: emptyContainerID, }}, - "failed": {Terminated: &api.ContainerStateTerminated{ + "failed": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 3, ContainerID: emptyContainerID, }}, }, }, { - restartPolicy: api.RestartPolicyOnFailure, - expectedState: map[string]api.ContainerState{ - "succeed": {Terminated: &api.ContainerStateTerminated{ + restartPolicy: v1.RestartPolicyOnFailure, + expectedState: map[string]v1.ContainerState{ + "succeed": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 0, ContainerID: emptyContainerID, }}, - "failed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}}, + "failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}}, }, - expectedLastTerminationState: map[string]api.ContainerState{ - "succeed": {Terminated: &api.ContainerStateTerminated{ + expectedLastTerminationState: map[string]v1.ContainerState{ + "succeed": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 2, ContainerID: emptyContainerID, }}, - "failed": {Terminated: &api.ContainerStateTerminated{ + "failed": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 1, ContainerID: emptyContainerID, }}, }, }, { - restartPolicy: api.RestartPolicyAlways, - expectedState: map[string]api.ContainerState{ - "succeed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}}, - "failed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}}, + restartPolicy: v1.RestartPolicyAlways, + expectedState: map[string]v1.ContainerState{ + "succeed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}}, + "failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}}, }, - expectedLastTerminationState: map[string]api.ContainerState{ - "succeed": {Terminated: &api.ContainerStateTerminated{ + expectedLastTerminationState: map[string]v1.ContainerState{ + "succeed": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 0, ContainerID: emptyContainerID, }}, - "failed": {Terminated: &api.ContainerStateTerminated{ + "failed": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 1, ContainerID: emptyContainerID, }}, }, // If the init container is terminated with exit code 0, it won't be restarted even when the // restart policy is RestartAlways. - expectedInitState: map[string]api.ContainerState{ - "succeed": {Terminated: &api.ContainerStateTerminated{ + expectedInitState: map[string]v1.ContainerState{ + "succeed": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 0, ContainerID: emptyContainerID, }}, - "failed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}}, + "failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}}, }, - expectedInitLastTerminationState: map[string]api.ContainerState{ - "succeed": {Terminated: &api.ContainerStateTerminated{ + expectedInitLastTerminationState: map[string]v1.ContainerState{ + "succeed": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 2, ContainerID: emptyContainerID, }}, - "failed": {Terminated: &api.ContainerStateTerminated{ + "failed": {Terminated: &v1.ContainerStateTerminated{ ExitCode: 1, ContainerID: emptyContainerID, }}, @@ -1805,7 +1801,7 @@ func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) { // testPodAdmitHandler is a lifecycle.PodAdmitHandler for testing. type testPodAdmitHandler struct { // list of pods to reject. - podsToReject []*api.Pod + podsToReject []*v1.Pod } // Admit rejects all pods in the podsToReject list with a matching UID. @@ -1822,22 +1818,22 @@ func (a *testPodAdmitHandler) Admit(attrs *lifecycle.PodAdmitAttributes) lifecyc func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) kl := testKubelet.kubelet - kl.nodeLister = testNodeLister{nodes: []api.Node{ + kl.nodeLister = testNodeLister{nodes: []v1.Node{ { - ObjectMeta: api.ObjectMeta{Name: string(kl.nodeName)}, - Status: api.NodeStatus{ - Allocatable: api.ResourceList{ - api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + ObjectMeta: v1.ObjectMeta{Name: string(kl.nodeName)}, + Status: v1.NodeStatus{ + Allocatable: v1.ResourceList{ + v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), }, }, }, }} - kl.nodeInfo = testNodeInfo{nodes: []api.Node{ + kl.nodeInfo = testNodeInfo{nodes: []v1.Node{ { - ObjectMeta: api.ObjectMeta{Name: string(kl.nodeName)}, - Status: api.NodeStatus{ - Allocatable: api.ResourceList{ - api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + ObjectMeta: v1.ObjectMeta{Name: string(kl.nodeName)}, + Status: v1.NodeStatus{ + Allocatable: v1.ResourceList{ + v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), }, }, }, @@ -1846,16 +1842,16 @@ func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) { testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) - pods := []*api.Pod{ + pods := []*v1.Pod{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ UID: "123456789", Name: "podA", Namespace: "foo", }, }, { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ UID: "987654321", Name: "podB", Namespace: "foo", @@ -1864,7 +1860,7 @@ func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) { } podToReject := pods[0] podToAdmit := pods[1] - podsToReject := []*api.Pod{podToReject} + podsToReject := []*v1.Pod{podToReject} kl.admitHandlers.AddPodAdmitHandler(&testPodAdmitHandler{podsToReject: podsToReject}) @@ -1873,22 +1869,22 @@ func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) { // podToReject should be Failed status, found := kl.statusManager.GetPodStatus(podToReject.UID) require.True(t, found, "Status of pod %q is not found in the status map", podToAdmit.UID) - require.Equal(t, api.PodFailed, status.Phase) + require.Equal(t, v1.PodFailed, status.Phase) // podToAdmit should be Pending status, found = kl.statusManager.GetPodStatus(podToAdmit.UID) require.True(t, found, "Status of pod %q is not found in the status map", podToAdmit.UID) - require.Equal(t, api.PodPending, status.Phase) + require.Equal(t, v1.PodPending, status.Phase) } // testPodSyncLoopHandler is a lifecycle.PodSyncLoopHandler that is used for testing. type testPodSyncLoopHandler struct { // list of pods to sync - podsToSync []*api.Pod + podsToSync []*v1.Pod } // ShouldSync evaluates if the pod should be synced from the kubelet. -func (a *testPodSyncLoopHandler) ShouldSync(pod *api.Pod) bool { +func (a *testPodSyncLoopHandler) ShouldSync(pod *v1.Pod) bool { for _, podToSync := range a.podsToSync { if podToSync.UID == pod.UID { return true @@ -1902,7 +1898,7 @@ func TestGetPodsToSyncInvokesPodSyncLoopHandlers(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) kubelet := testKubelet.kubelet pods := newTestPods(5) - expected := []*api.Pod{pods[0]} + expected := []*v1.Pod{pods[0]} kubelet.AddPodSyncLoopHandler(&testPodSyncLoopHandler{expected}) kubelet.podManager.SetPods(pods) @@ -1915,7 +1911,7 @@ func TestGetPodsToSyncInvokesPodSyncLoopHandlers(t *testing.T) { // testPodSyncHandler is a lifecycle.PodSyncHandler that is used for testing. type testPodSyncHandler struct { // list of pods to evict. - podsToEvict []*api.Pod + podsToEvict []*v1.Pod // the reason for the eviction reason string // the message for the eviction @@ -1923,7 +1919,7 @@ type testPodSyncHandler struct { } // ShouldEvict evaluates if the pod should be evicted from the kubelet. -func (a *testPodSyncHandler) ShouldEvict(pod *api.Pod) lifecycle.ShouldEvictResponse { +func (a *testPodSyncHandler) ShouldEvict(pod *v1.Pod) lifecycle.ShouldEvictResponse { for _, podToEvict := range a.podsToEvict { if podToEvict.UID == pod.UID { return lifecycle.ShouldEvictResponse{Evict: true, Reason: a.reason, Message: a.message} @@ -1937,7 +1933,7 @@ func TestGenerateAPIPodStatusInvokesPodSyncHandlers(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) kubelet := testKubelet.kubelet pod := newTestPods(1)[0] - podsToEvict := []*api.Pod{pod} + podsToEvict := []*v1.Pod{pod} kubelet.AddPodSyncHandler(&testPodSyncHandler{podsToEvict, "Evicted", "because"}) status := &kubecontainer.PodStatus{ ID: pod.UID, @@ -1945,7 +1941,7 @@ func TestGenerateAPIPodStatusInvokesPodSyncHandlers(t *testing.T) { Namespace: pod.Namespace, } apiStatus := kubelet.generateAPIPodStatus(pod, status) - require.Equal(t, api.PodFailed, apiStatus.Phase) + require.Equal(t, v1.PodFailed, apiStatus.Phase) require.Equal(t, "Evicted", apiStatus.Reason) require.Equal(t, "because", apiStatus.Message) } @@ -1953,14 +1949,14 @@ func TestGenerateAPIPodStatusInvokesPodSyncHandlers(t *testing.T) { func TestSyncPodKillPod(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) kl := testKubelet.kubelet - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "bar", Namespace: "foo", }, } - pods := []*api.Pod{pod} + pods := []*v1.Pod{pod} kl.podManager.SetPods(pods) gracePeriodOverride := int64(0) err := kl.syncPod(syncPodOptions{ @@ -1968,9 +1964,9 @@ func TestSyncPodKillPod(t *testing.T) { podStatus: &kubecontainer.PodStatus{}, updateType: kubetypes.SyncPodKill, killPodOptions: &KillPodOptions{ - PodStatusFunc: func(p *api.Pod, podStatus *kubecontainer.PodStatus) api.PodStatus { - return api.PodStatus{ - Phase: api.PodFailed, + PodStatusFunc: func(p *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus { + return v1.PodStatus{ + Phase: v1.PodFailed, Reason: "reason", Message: "message", } @@ -1982,12 +1978,12 @@ func TestSyncPodKillPod(t *testing.T) { // Check pod status stored in the status map. status, found := kl.statusManager.GetPodStatus(pod.UID) require.True(t, found, "Status of pod %q is not found in the status map", pod.UID) - require.Equal(t, api.PodFailed, status.Phase) + require.Equal(t, v1.PodFailed, status.Phase) } func waitForVolumeUnmount( volumeManager kubeletvolume.VolumeManager, - pod *api.Pod) error { + pod *v1.Pod) error { var podVolumes kubecontainer.VolumeMap err := retryWithExponentialBackOff( time.Duration(50*time.Millisecond), @@ -2013,9 +2009,9 @@ func waitForVolumeUnmount( } func waitForVolumeDetach( - volumeName api.UniqueVolumeName, + volumeName v1.UniqueVolumeName, volumeManager kubeletvolume.VolumeManager) error { - attachedVolumes := []api.UniqueVolumeName{} + attachedVolumes := []v1.UniqueVolumeName{} err := retryWithExponentialBackOff( time.Duration(50*time.Millisecond), func() (bool, error) { @@ -2044,7 +2040,7 @@ func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.Conditio } func simulateVolumeInUseUpdate( - volumeName api.UniqueVolumeName, + volumeName v1.UniqueVolumeName, stopCh <-chan struct{}, volumeManager kubeletvolume.VolumeManager) { ticker := time.NewTicker(100 * time.Millisecond) @@ -2053,7 +2049,7 @@ func simulateVolumeInUseUpdate( select { case <-ticker.C: volumeManager.MarkVolumesAsReportedInUse( - []api.UniqueVolumeName{volumeName}) + []v1.UniqueVolumeName{volumeName}) case <-stopCh: return } @@ -2067,7 +2063,7 @@ func runVolumeManager(kubelet *Kubelet) chan struct{} { } // Sort pods by UID. -type podsByUID []*api.Pod +type podsByUID []*v1.Pod func (p podsByUID) Len() int { return len(p) } func (p podsByUID) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/pkg/kubelet/kubelet_volumes.go b/pkg/kubelet/kubelet_volumes.go index c7ae64050a6..f9fabcdb9e3 100644 --- a/pkg/kubelet/kubelet_volumes.go +++ b/pkg/kubelet/kubelet_volumes.go @@ -21,7 +21,7 @@ import ( "os" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/types" utilerrors "k8s.io/kubernetes/pkg/util/errors" @@ -65,7 +65,7 @@ func (kl *Kubelet) podVolumesExist(podUID types.UID) bool { // newVolumeMounterFromPlugins attempts to find a plugin by volume spec, pod // and volume options and then creates a Mounter. // Returns a valid Unmounter or an error. -func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) { +func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) { plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec) if err != nil { return nil, fmt.Errorf("can't use volume plugins for %s: %v", spec.Name(), err) @@ -81,7 +81,7 @@ func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *api.Pod, // cleanupOrphanedPodDirs removes the volumes of pods that should not be // running and that have no containers running. func (kl *Kubelet) cleanupOrphanedPodDirs( - pods []*api.Pod, runningPods []*kubecontainer.Pod) error { + pods []*v1.Pod, runningPods []*kubecontainer.Pod) error { allPods := sets.NewString() for _, pod := range pods { allPods.Insert(string(pod.UID)) diff --git a/pkg/kubelet/kubelet_volumes_test.go b/pkg/kubelet/kubelet_volumes_test.go index ec69020f3b6..fb2412753e2 100644 --- a/pkg/kubelet/kubelet_volumes_test.go +++ b/pkg/kubelet/kubelet_volumes_test.go @@ -21,7 +21,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/volume" @@ -33,18 +33,18 @@ func TestPodVolumesExist(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) kubelet := testKubelet.kubelet - pods := []*api.Pod{ + pods := []*v1.Pod{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ Name: "pod1", UID: "pod1uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "vol1", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device1", }, }, @@ -53,16 +53,16 @@ func TestPodVolumesExist(t *testing.T) { }, }, { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ Name: "pod2", UID: "pod2uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "vol2", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device2", }, }, @@ -71,16 +71,16 @@ func TestPodVolumesExist(t *testing.T) { }, }, { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ Name: "pod3", UID: "pod3uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "vol3", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device3", }, }, @@ -117,12 +117,12 @@ func TestVolumeAttachAndMountControllerDisabled(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) kubelet := testKubelet.kubelet - pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{ - Volumes: []api.Volume{ + pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "vol1", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device", }, }, @@ -135,7 +135,7 @@ func TestVolumeAttachAndMountControllerDisabled(t *testing.T) { close(stopCh) }() - kubelet.podManager.SetPods([]*api.Pod{pod}) + kubelet.podManager.SetPods([]*v1.Pod{pod}) err := kubelet.volumeManager.WaitForAttachAndMount(pod) assert.NoError(t, err) @@ -162,12 +162,12 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) kubelet := testKubelet.kubelet - pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{ - Volumes: []api.Volume{ + pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "vol1", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device", }, }, @@ -181,7 +181,7 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) { }() // Add pod - kubelet.podManager.SetPods([]*api.Pod{pod}) + kubelet.podManager.SetPods([]*v1.Pod{pod}) // Verify volumes attached err := kubelet.volumeManager.WaitForAttachAndMount(pod) @@ -207,7 +207,7 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) { 1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)) // Remove pod - kubelet.podManager.SetPods([]*api.Pod{}) + kubelet.podManager.SetPods([]*v1.Pod{}) assert.NoError(t, waitForVolumeUnmount(kubelet.volumeManager, pod)) @@ -222,7 +222,7 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) { 1 /* expectedTearDownCallCount */, testKubelet.volumePlugin)) // Verify volumes detached and no longer reported as in use - assert.NoError(t, waitForVolumeDetach(api.UniqueVolumeName("fake/vol1"), kubelet.volumeManager)) + assert.NoError(t, waitForVolumeDetach(v1.UniqueVolumeName("fake/vol1"), kubelet.volumeManager)) assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once") assert.NoError(t, volumetest.VerifyDetachCallCount( 1 /* expectedDetachCallCount */, testKubelet.volumePlugin)) @@ -234,28 +234,28 @@ func TestVolumeAttachAndMountControllerEnabled(t *testing.T) { kubeClient := testKubelet.fakeKubeClient kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { - return true, &api.Node{ - ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, - Status: api.NodeStatus{ - VolumesAttached: []api.AttachedVolume{ + return true, &v1.Node{ + ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}, + Status: v1.NodeStatus{ + VolumesAttached: []v1.AttachedVolume{ { Name: "fake/vol1", DevicePath: "fake/path", }, }}, - Spec: api.NodeSpec{ExternalID: testKubeletHostname}, + Spec: v1.NodeSpec{ExternalID: testKubeletHostname}, }, nil }) kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { return true, nil, fmt.Errorf("no reaction implemented for %s", action) }) - pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{ - Volumes: []api.Volume{ + pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "vol1", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device", }, }, @@ -268,11 +268,11 @@ func TestVolumeAttachAndMountControllerEnabled(t *testing.T) { close(stopCh) }() - kubelet.podManager.SetPods([]*api.Pod{pod}) + kubelet.podManager.SetPods([]*v1.Pod{pod}) // Fake node status update go simulateVolumeInUseUpdate( - api.UniqueVolumeName("fake/vol1"), + v1.UniqueVolumeName("fake/vol1"), stopCh, kubelet.volumeManager) @@ -302,28 +302,28 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) { kubeClient := testKubelet.fakeKubeClient kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { - return true, &api.Node{ - ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, - Status: api.NodeStatus{ - VolumesAttached: []api.AttachedVolume{ + return true, &v1.Node{ + ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}, + Status: v1.NodeStatus{ + VolumesAttached: []v1.AttachedVolume{ { Name: "fake/vol1", DevicePath: "fake/path", }, }}, - Spec: api.NodeSpec{ExternalID: testKubeletHostname}, + Spec: v1.NodeSpec{ExternalID: testKubeletHostname}, }, nil }) kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { return true, nil, fmt.Errorf("no reaction implemented for %s", action) }) - pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{ - Volumes: []api.Volume{ + pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "vol1", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device", }, }, @@ -337,11 +337,11 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) { }() // Add pod - kubelet.podManager.SetPods([]*api.Pod{pod}) + kubelet.podManager.SetPods([]*v1.Pod{pod}) // Fake node status update go simulateVolumeInUseUpdate( - api.UniqueVolumeName("fake/vol1"), + v1.UniqueVolumeName("fake/vol1"), stopCh, kubelet.volumeManager) @@ -367,7 +367,7 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) { 1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)) // Remove pod - kubelet.podManager.SetPods([]*api.Pod{}) + kubelet.podManager.SetPods([]*v1.Pod{}) assert.NoError(t, waitForVolumeUnmount(kubelet.volumeManager, pod)) @@ -382,7 +382,7 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) { 1 /* expectedTearDownCallCount */, testKubelet.volumePlugin)) // Verify volumes detached and no longer reported as in use - assert.NoError(t, waitForVolumeDetach(api.UniqueVolumeName("fake/vol1"), kubelet.volumeManager)) + assert.NoError(t, waitForVolumeDetach(v1.UniqueVolumeName("fake/vol1"), kubelet.volumeManager)) assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once") assert.NoError(t, volumetest.VerifyZeroDetachCallCount(testKubelet.volumePlugin)) } diff --git a/pkg/kubelet/kuberuntime/doc.go b/pkg/kubelet/kuberuntime/doc.go index c1284ee28c0..61a022910a0 100644 --- a/pkg/kubelet/kuberuntime/doc.go +++ b/pkg/kubelet/kuberuntime/doc.go @@ -15,5 +15,5 @@ limitations under the License. */ // Package kuberuntime contains an implementation of kubecontainer.Runtime using -// the interface in pkg/kubelet/api. +// the interface in pkg/kubelet/v1. package kuberuntime diff --git a/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go b/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go index 8d257b4c334..4d67bd6962e 100644 --- a/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go @@ -22,7 +22,7 @@ import ( "time" cadvisorapi "github.com/google/cadvisor/info/v1" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/credentialprovider" internalApi "k8s.io/kubernetes/pkg/kubelet/api" @@ -49,7 +49,7 @@ func (f *fakeHTTP) Get(url string) (*http.Response, error) { // fakeRuntimeHelper implements kubecontainer.RuntimeHelper interfaces for testing purposes. type fakeRuntimeHelper struct{} -func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *api.Pod, container *api.Container, podIP string) (*kubecontainer.RunContainerOptions, error) { +func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, error) { var opts kubecontainer.RunContainerOptions if len(container.TerminationMessagePath) != 0 { testPodContainerDir, err := ioutil.TempDir("", "fooPodContainerDir") @@ -61,12 +61,12 @@ func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *api.Pod, container return &opts, nil } -func (f *fakeRuntimeHelper) GetClusterDNS(pod *api.Pod) ([]string, []string, error) { +func (f *fakeRuntimeHelper) GetClusterDNS(pod *v1.Pod) ([]string, []string, error) { return nil, nil, nil } // This is not used by docker runtime. -func (f *fakeRuntimeHelper) GeneratePodHostNameAndDomain(pod *api.Pod) (string, string, error) { +func (f *fakeRuntimeHelper) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) { return "", "", nil } @@ -74,19 +74,19 @@ func (f *fakeRuntimeHelper) GetPodDir(kubetypes.UID) string { return "" } -func (f *fakeRuntimeHelper) GetExtraSupplementalGroupsForPod(pod *api.Pod) []int64 { +func (f *fakeRuntimeHelper) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 { return nil } type fakePodGetter struct { - pods map[types.UID]*api.Pod + pods map[types.UID]*v1.Pod } func newFakePodGetter() *fakePodGetter { - return &fakePodGetter{make(map[types.UID]*api.Pod)} + return &fakePodGetter{make(map[types.UID]*v1.Pod)} } -func (f *fakePodGetter) GetPodByUID(uid types.UID) (*api.Pod, bool) { +func (f *fakePodGetter) GetPodByUID(uid types.UID) (*v1.Pod, bool) { pod, found := f.pods[uid] return pod, found } diff --git a/pkg/kubelet/kuberuntime/helpers.go b/pkg/kubelet/kuberuntime/helpers.go index 91088e56136..d4d2ed92fa4 100644 --- a/pkg/kubelet/kuberuntime/helpers.go +++ b/pkg/kubelet/kuberuntime/helpers.go @@ -22,7 +22,7 @@ import ( "strconv" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/types" @@ -85,12 +85,12 @@ func toKubeContainerState(state runtimeApi.ContainerState) kubecontainer.Contain return kubecontainer.ContainerStateUnknown } -// toRuntimeProtocol converts api.Protocol to runtimeApi.Protocol. -func toRuntimeProtocol(protocol api.Protocol) runtimeApi.Protocol { +// toRuntimeProtocol converts v1.Protocol to runtimeApi.Protocol. +func toRuntimeProtocol(protocol v1.Protocol) runtimeApi.Protocol { switch protocol { - case api.ProtocolTCP: + case v1.ProtocolTCP: return runtimeApi.Protocol_TCP - case api.ProtocolUDP: + case v1.ProtocolUDP: return runtimeApi.Protocol_UDP } @@ -131,7 +131,7 @@ func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *runtimeApi.PodSand } // getContainerSpec gets the container spec by containerName. -func getContainerSpec(pod *api.Pod, containerName string) *api.Container { +func getContainerSpec(pod *v1.Pod, containerName string) *v1.Container { for i, c := range pod.Spec.Containers { if containerName == c.Name { return &pod.Spec.Containers[i] @@ -217,7 +217,7 @@ func milliCPUToQuota(milliCPU int64) (quota int64, period int64) { // getStableKey generates a key (string) to uniquely identify a // (pod, container) tuple. The key should include the content of the // container, so that any change to the container generates a new key. -func getStableKey(pod *api.Pod, container *api.Container) string { +func getStableKey(pod *v1.Pod, container *v1.Container) string { hash := strconv.FormatUint(kubecontainer.HashContainer(container), 16) return fmt.Sprintf("%s_%s_%s_%s_%s", pod.Name, pod.Namespace, string(pod.UID), container.Name, hash) } diff --git a/pkg/kubelet/kuberuntime/helpers_test.go b/pkg/kubelet/kuberuntime/helpers_test.go index c1b2782bbdc..7338694fae1 100644 --- a/pkg/kubelet/kuberuntime/helpers_test.go +++ b/pkg/kubelet/kuberuntime/helpers_test.go @@ -20,22 +20,22 @@ import ( "testing" "github.com/stretchr/testify/assert" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) func TestStableKey(t *testing.T) { - container := &api.Container{ + container := &v1.Container{ Name: "test_container", Image: "foo/image:v1", } - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "test_pod", Namespace: "test_pod_namespace", UID: "test_pod_uid", }, - Spec: api.PodSpec{ - Containers: []api.Container{*container}, + Spec: v1.PodSpec{ + Containers: []v1.Container{*container}, }, } oldKey := getStableKey(pod, container) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index b781770e30a..9a2a86a4a4d 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -29,8 +29,8 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" @@ -49,7 +49,7 @@ import ( // * create the container // * start the container // * run the post start lifecycle hooks (if applicable) -func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeApi.PodSandboxConfig, container *api.Container, pod *api.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, podIP string) (string, error) { +func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeApi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string) (string, error) { // Step 1: pull the image. err, msg := m.imagePuller.EnsureImageExists(pod, container, pullSecrets) if err != nil { @@ -72,15 +72,15 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb containerConfig, err := m.generateContainerConfig(container, pod, restartCount, podIP) if err != nil { - m.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToCreateContainer, "Failed to create container with error: %v", err) + m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToCreateContainer, "Failed to create container with error: %v", err) return "Generate Container Config Failed", err } containerID, err := m.runtimeService.CreateContainer(podSandboxID, containerConfig, podSandboxConfig) if err != nil { - m.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToCreateContainer, "Failed to create container with error: %v", err) + m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToCreateContainer, "Failed to create container with error: %v", err) return "Create Container Failed", err } - m.recorder.Eventf(ref, api.EventTypeNormal, events.CreatedContainer, "Created container with id %v", containerID) + m.recorder.Eventf(ref, v1.EventTypeNormal, events.CreatedContainer, "Created container with id %v", containerID) if ref != nil { m.containerRefManager.SetRef(kubecontainer.ContainerID{ Type: m.runtimeName, @@ -91,11 +91,11 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb // Step 3: start the container. err = m.runtimeService.StartContainer(containerID) if err != nil { - m.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToStartContainer, + m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToStartContainer, "Failed to start container with id %v with error: %v", containerID, err) return "Start Container Failed", err } - m.recorder.Eventf(ref, api.EventTypeNormal, events.StartedContainer, "Started container with id %v", containerID) + m.recorder.Eventf(ref, v1.EventTypeNormal, events.StartedContainer, "Started container with id %v", containerID) // Symlink container logs to the legacy container log location for cluster logging // support. @@ -119,7 +119,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb msg, handlerErr := m.runner.Run(kubeContainerID, pod, container, container.Lifecycle.PostStart) if handlerErr != nil { err := fmt.Errorf("PostStart handler: %v", handlerErr) - m.generateContainerEvent(kubeContainerID, api.EventTypeWarning, events.FailedPostStartHook, msg) + m.generateContainerEvent(kubeContainerID, v1.EventTypeWarning, events.FailedPostStartHook, msg) m.killContainer(pod, kubeContainerID, container.Name, "FailedPostStartHook", nil) return "PostStart Hook Failed", err } @@ -128,8 +128,8 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb return "", nil } -// generateContainerConfig generates container config for kubelet runtime api. -func (m *kubeGenericRuntimeManager) generateContainerConfig(container *api.Container, pod *api.Pod, restartCount int, podIP string) (*runtimeApi.ContainerConfig, error) { +// generateContainerConfig generates container config for kubelet runtime v1. +func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP string) (*runtimeApi.ContainerConfig, error) { opts, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP) if err != nil { return nil, err @@ -185,8 +185,8 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *api.Conta return config, nil } -// generateLinuxContainerConfig generates linux container config for kubelet runtime api. -func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *api.Container, pod *api.Pod, uid *int64, username *string) *runtimeApi.LinuxContainerConfig { +// generateLinuxContainerConfig generates linux container config for kubelet runtime v1. +func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username *string) *runtimeApi.LinuxContainerConfig { lc := &runtimeApi.LinuxContainerConfig{ Resources: &runtimeApi.LinuxContainerResources{}, SecurityContext: m.determineEffectiveSecurityContext(pod, container, uid, username), @@ -228,7 +228,7 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *api. return lc } -// makeDevices generates container devices for kubelet runtime api. +// makeDevices generates container devices for kubelet runtime v1. func makeDevices(opts *kubecontainer.RunContainerOptions) []*runtimeApi.Device { devices := make([]*runtimeApi.Device, len(opts.Devices)) @@ -244,8 +244,8 @@ func makeDevices(opts *kubecontainer.RunContainerOptions) []*runtimeApi.Device { return devices } -// makeMounts generates container volume mounts for kubelet runtime api. -func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerOptions, container *api.Container) []*runtimeApi.Mount { +// makeMounts generates container volume mounts for kubelet runtime v1. +func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerOptions, container *v1.Container) []*runtimeApi.Mount { volumeMounts := []*runtimeApi.Mount{} for idx := range opts.Mounts { @@ -416,7 +416,7 @@ func (m *kubeGenericRuntimeManager) generateContainerEvent(containerID kubeconta } // executePreStopHook runs the pre-stop lifecycle hooks if applicable and returns the duration it takes. -func (m *kubeGenericRuntimeManager) executePreStopHook(pod *api.Pod, containerID kubecontainer.ContainerID, containerSpec *api.Container, gracePeriod int64) int64 { +func (m *kubeGenericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID kubecontainer.ContainerID, containerSpec *v1.Container, gracePeriod int64) int64 { glog.V(3).Infof("Running preStop hook for container %q", containerID.String()) start := unversioned.Now() @@ -426,7 +426,7 @@ func (m *kubeGenericRuntimeManager) executePreStopHook(pod *api.Pod, containerID defer utilruntime.HandleCrash() if msg, err := m.runner.Run(containerID, pod, containerSpec, containerSpec.Lifecycle.PreStop); err != nil { glog.Errorf("preStop hook for container %q failed: %v", containerSpec.Name, err) - m.generateContainerEvent(containerID, api.EventTypeWarning, events.FailedPreStopHook, msg) + m.generateContainerEvent(containerID, v1.EventTypeWarning, events.FailedPreStopHook, msg) } }() @@ -448,9 +448,9 @@ func (m *kubeGenericRuntimeManager) executePreStopHook(pod *api.Pod, containerID // TODO(random-liu): Add a node e2e test to test this behaviour. // TODO(random-liu): Change the lifecycle handler to just accept information needed, so that we can // just pass the needed function not create the fake object. -func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID kubecontainer.ContainerID) (*api.Pod, *api.Container, error) { - var pod *api.Pod - var container *api.Container +func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID kubecontainer.ContainerID) (*v1.Pod, *v1.Container, error) { + var pod *v1.Pod + var container *v1.Container s, err := m.runtimeService.ContainerStatus(containerID.ID) if err != nil { return nil, nil, err @@ -460,24 +460,24 @@ func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID a := getContainerInfoFromAnnotations(s.Annotations) // Notice that the followings are not full spec. The container killing code should not use // un-restored fields. - pod = &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod = &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: l.PodUID, Name: l.PodName, Namespace: l.PodNamespace, DeletionGracePeriodSeconds: a.PodDeletionGracePeriod, }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ TerminationGracePeriodSeconds: a.PodTerminationGracePeriod, }, } - container = &api.Container{ + container = &v1.Container{ Name: l.ContainerName, Ports: a.ContainerPorts, TerminationMessagePath: a.TerminationMessagePath, } if a.PreStopHandler != nil { - container.Lifecycle = &api.Lifecycle{ + container.Lifecycle = &v1.Lifecycle{ PreStop: a.PreStopHandler, } } @@ -487,8 +487,8 @@ func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID // killContainer kills a container through the following steps: // * Run the pre-stop lifecycle hooks (if applicable). // * Stop the container. -func (m *kubeGenericRuntimeManager) killContainer(pod *api.Pod, containerID kubecontainer.ContainerID, containerName string, reason string, gracePeriodOverride *int64) error { - var containerSpec *api.Container +func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubecontainer.ContainerID, containerName string, reason string, gracePeriodOverride *int64) error { + var containerSpec *v1.Container if pod != nil { containerSpec = getContainerSpec(pod, containerName) } else { @@ -534,14 +534,14 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *api.Pod, containerID kube if reason != "" { message = fmt.Sprint(message, ":", reason) } - m.generateContainerEvent(containerID, api.EventTypeNormal, events.KillingContainer, message) + m.generateContainerEvent(containerID, v1.EventTypeNormal, events.KillingContainer, message) m.containerRefManager.ClearRef(containerID) return err } // killContainersWithSyncResult kills all pod's containers with sync results. -func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (syncResults []*kubecontainer.SyncResult) { +func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (syncResults []*kubecontainer.SyncResult) { containerResults := make(chan *kubecontainer.SyncResult, len(runningPod.Containers)) wg := sync.WaitGroup{} @@ -570,7 +570,7 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *api.Pod, r // pruneInitContainers ensures that before we begin creating init containers, we have reduced the number // of outstanding init containers still present. This reduces load on the container garbage collector // by only preserving the most recent terminated init container. -func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *api.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.ContainerID]int) { +func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *v1.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.ContainerID]int) { // only the last execution of each init container should be preserved, and only preserve it if it is in the // list of init containers to keep. initContainerNames := sets.NewString() @@ -614,7 +614,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *api.Pod, // next init container to start, or done if there are no further init containers. // Status is only returned if an init container is failed, in which case next will // point to the current container. -func findNextInitContainerToRun(pod *api.Pod, podStatus *kubecontainer.PodStatus) (status *kubecontainer.ContainerStatus, next *api.Container, done bool) { +func findNextInitContainerToRun(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (status *kubecontainer.ContainerStatus, next *v1.Container, done bool) { if len(pod.Spec.InitContainers) == 0 { return nil, nil, true } @@ -656,7 +656,7 @@ func findNextInitContainerToRun(pod *api.Pod, podStatus *kubecontainer.PodStatus } // GetContainerLogs returns logs of a specific container. -func (m *kubeGenericRuntimeManager) GetContainerLogs(pod *api.Pod, containerID kubecontainer.ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error) { +func (m *kubeGenericRuntimeManager) GetContainerLogs(pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) { status, err := m.runtimeService.ContainerStatus(containerID.ID) if err != nil { return fmt.Errorf("failed to get container status %q: %v", containerID, err) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_test.go b/pkg/kubelet/kuberuntime/kuberuntime_container_test.go index 05bf6996ea2..0e6d4be0dda 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_test.go @@ -21,7 +21,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" ) @@ -29,18 +29,18 @@ import ( // TestRemoveContainer tests removing the container and its corresponding container logs. func TestRemoveContainer(t *testing.T) { fakeRuntime, _, m, err := createTestRuntimeManager() - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "bar", Namespace: "new", }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Name: "foo", Image: "busybox", - ImagePullPolicy: api.PullIfNotPresent, + ImagePullPolicy: v1.PullIfNotPresent, }, }, }, diff --git a/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go b/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go index 8ba08c31af0..4e371504a8a 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go @@ -24,7 +24,7 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" @@ -34,12 +34,12 @@ func TestSandboxGC(t *testing.T) { fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) - pods := []*api.Pod{ - makeTestPod("foo1", "new", "1234", []api.Container{ + pods := []*v1.Pod{ + makeTestPod("foo1", "new", "1234", []v1.Container{ makeTestContainer("bar1", "busybox"), makeTestContainer("bar2", "busybox"), }), - makeTestPod("foo2", "new", "5678", []api.Container{ + makeTestPod("foo2", "new", "5678", []v1.Container{ makeTestContainer("bar3", "busybox"), }), } @@ -129,7 +129,7 @@ func TestContainerGC(t *testing.T) { fakePodGetter := m.containerGC.podGetter.(*fakePodGetter) makeGCContainer := func(podName, containerName string, attempt int, createdAt int64, state runtimeApi.ContainerState) containerTemplate { container := makeTestContainer(containerName, "test-image") - pod := makeTestPod(podName, "test-ns", podName, []api.Container{container}) + pod := makeTestPod(podName, "test-ns", podName, []v1.Container{container}) if podName != "deleted" { // initialize the pod getter, explicitly exclude deleted pod fakePodGetter.pods[pod.UID] = pod diff --git a/pkg/kubelet/kuberuntime/kuberuntime_image.go b/pkg/kubelet/kuberuntime/kuberuntime_image.go index 29c57000548..1a9c98f2414 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_image.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_image.go @@ -18,7 +18,7 @@ package kuberuntime import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/credentialprovider" runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -28,7 +28,7 @@ import ( // PullImage pulls an image from the network to local storage using the supplied // secrets if necessary. -func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pullSecrets []api.Secret) error { +func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret) error { img := image.Image repoToPull, _, _, err := parsers.ParseImageName(img) if err != nil { diff --git a/pkg/kubelet/kuberuntime/kuberuntime_logs.go b/pkg/kubelet/kuberuntime/kuberuntime_logs.go index 5c425015b31..3cc0eb17640 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_logs.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_logs.go @@ -31,7 +31,7 @@ import ( "github.com/fsnotify/fsnotify" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) // Notice that the current kuberuntime logs implementation doesn't handle @@ -85,8 +85,8 @@ type logOptions struct { timestamp bool } -// newLogOptions convert the api.PodLogOptions to internal logOptions. -func newLogOptions(apiOpts *api.PodLogOptions, now time.Time) *logOptions { +// newLogOptions convert the v1.PodLogOptions to internal logOptions. +func newLogOptions(apiOpts *v1.PodLogOptions, now time.Time) *logOptions { opts := &logOptions{ tail: -1, // -1 by default which means read all logs. bytes: -1, // -1 by default which means read all logs. @@ -109,14 +109,14 @@ func newLogOptions(apiOpts *api.PodLogOptions, now time.Time) *logOptions { } // ReadLogs read the container log and redirect into stdout and stderr. -func ReadLogs(path string, apiOpts *api.PodLogOptions, stdout, stderr io.Writer) error { +func ReadLogs(path string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) error { f, err := os.Open(path) if err != nil { return fmt.Errorf("failed to open log file %q: %v", path, err) } defer f.Close() - // Convert api.PodLogOptions into internal log options. + // Convert v1.PodLogOptions into internal log options. opts := newLogOptions(apiOpts, time.Now()) // Search start point based on tail line. diff --git a/pkg/kubelet/kuberuntime/kuberuntime_logs_test.go b/pkg/kubelet/kuberuntime/kuberuntime_logs_test.go index 18538e25ebf..be27ee9f0ab 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_logs_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_logs_test.go @@ -24,8 +24,8 @@ import ( "github.com/stretchr/testify/assert" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" ) func TestLogOptions(t *testing.T) { @@ -36,27 +36,27 @@ func TestLogOptions(t *testing.T) { sinceseconds = int64(10) ) for c, test := range []struct { - apiOpts *api.PodLogOptions + apiOpts *v1.PodLogOptions expect *logOptions }{ { // empty options - apiOpts: &api.PodLogOptions{}, + apiOpts: &v1.PodLogOptions{}, expect: &logOptions{tail: -1, bytes: -1}, }, { // test tail lines - apiOpts: &api.PodLogOptions{TailLines: &line}, + apiOpts: &v1.PodLogOptions{TailLines: &line}, expect: &logOptions{tail: line, bytes: -1}, }, { // test limit bytes - apiOpts: &api.PodLogOptions{LimitBytes: &bytes}, + apiOpts: &v1.PodLogOptions{LimitBytes: &bytes}, expect: &logOptions{tail: -1, bytes: bytes}, }, { // test since timestamp - apiOpts: &api.PodLogOptions{SinceTime: ×tamp}, + apiOpts: &v1.PodLogOptions{SinceTime: ×tamp}, expect: &logOptions{tail: -1, bytes: -1, since: timestamp.Time}, }, { // test since seconds - apiOpts: &api.PodLogOptions{SinceSeconds: &sinceseconds}, + apiOpts: &v1.PodLogOptions{SinceSeconds: &sinceseconds}, expect: &logOptions{tail: -1, bytes: -1, since: timestamp.Add(-10 * time.Second)}, }, } { diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 587695f2df5..69cd541adb2 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -26,7 +26,7 @@ import ( "github.com/golang/glog" cadvisorapi "github.com/google/cadvisor/info/v1" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/credentialprovider" internalApi "k8s.io/kubernetes/pkg/kubelet/api" @@ -64,7 +64,7 @@ var ( // A subset of the pod.Manager interface extracted for garbage collection purposes. type podGetter interface { - GetPodByUID(kubetypes.UID) (*api.Pod, bool) + GetPodByUID(kubetypes.UID) (*v1.Pod, bool) } type kubeGenericRuntimeManager struct { @@ -349,7 +349,7 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err // containerToKillInfo contains neccessary information to kill a container. type containerToKillInfo struct { // The spec of the container. - container *api.Container + container *v1.Container // The name of the container. name string // The message indicates why the container will be killed. @@ -388,7 +388,7 @@ type podContainerSpecChanges struct { // podSandboxChanged checks whether the spec of the pod is changed and returns // (changed, new attempt, original sandboxID if exist). -func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *api.Pod, podStatus *kubecontainer.PodStatus) (changed bool, attempt uint32, sandboxID string) { +func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (changed bool, attempt uint32, sandboxID string) { if len(podStatus.SandboxStatuses) == 0 { glog.V(2).Infof("No sandbox for pod %q can be found. Need to start a new one", format.Pod(pod)) return true, 0, "" @@ -420,7 +420,7 @@ func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *api.Pod, podStatus *k // checkAndKeepInitContainers keeps all successfully completed init containers. If there // are failing containers, only keep the first failing one. -func checkAndKeepInitContainers(pod *api.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.ContainerID]int) bool { +func checkAndKeepInitContainers(pod *v1.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.ContainerID]int) bool { initFailed := false for i, container := range pod.Spec.InitContainers { @@ -448,7 +448,7 @@ func checkAndKeepInitContainers(pod *api.Pod, podStatus *kubecontainer.PodStatus } // computePodContainerChanges checks whether the pod spec has changed and returns the changes if true. -func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, podStatus *kubecontainer.PodStatus) podContainerSpecChanges { +func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *v1.Pod, podStatus *kubecontainer.PodStatus) podContainerSpecChanges { glog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod) sandboxChanged, attempt, sandboxID := m.podSandboxChanged(pod, podStatus) @@ -484,7 +484,7 @@ func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, pod continue } if sandboxChanged { - if pod.Spec.RestartPolicy != api.RestartPolicyNever { + if pod.Spec.RestartPolicy != v1.RestartPolicyNever { message := fmt.Sprintf("Container %+v's pod sandbox is dead, the container will be recreated.", container) glog.Info(message) changes.ContainersToStart[index] = message @@ -496,7 +496,7 @@ func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, pod // Initialization failed and Container exists. // If we have an initialization failure everything will be killed anyway. // If RestartPolicy is Always or OnFailure we restart containers that were running before. - if pod.Spec.RestartPolicy != api.RestartPolicyNever { + if pod.Spec.RestartPolicy != v1.RestartPolicyNever { message := fmt.Sprintf("Failed to initialize pod. %q will be restarted.", container.Name) glog.V(1).Info(message) changes.ContainersToStart[index] = message @@ -519,7 +519,7 @@ func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, pod changes.ContainersToKeep[containerStatus.ID] = index continue } - if pod.Spec.RestartPolicy != api.RestartPolicyNever { + if pod.Spec.RestartPolicy != v1.RestartPolicyNever { message := fmt.Sprintf("pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name) glog.Info(message) changes.ContainersToStart[index] = message @@ -537,7 +537,7 @@ func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, pod _, keep := changes.ContainersToKeep[containerStatus.ID] _, keepInit := changes.InitContainersToKeep[containerStatus.ID] if !keep && !keepInit { - var podContainer *api.Container + var podContainer *v1.Container var killMessage string for i, c := range pod.Spec.Containers { if c.Name == containerStatus.Name { @@ -566,19 +566,19 @@ func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, pod // 4. Create sandbox if necessary. // 5. Create init containers. // 6. Create normal containers. -func (m *kubeGenericRuntimeManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { +func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { // Step 1: Compute sandbox and container changes. podContainerChanges := m.computePodContainerChanges(pod, podStatus) glog.V(3).Infof("computePodContainerChanges got %+v for pod %q", podContainerChanges, format.Pod(pod)) if podContainerChanges.CreateSandbox { - ref, err := api.GetReference(pod) + ref, err := v1.GetReference(pod) if err != nil { glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err) } if podContainerChanges.SandboxID != "" { - m.recorder.Eventf(ref, api.EventTypeNormal, "SandboxChanged", "Pod sandbox changed, it will be killed and re-created.") + m.recorder.Eventf(ref, v1.EventTypeNormal, "SandboxChanged", "Pod sandbox changed, it will be killed and re-created.") } else { - m.recorder.Eventf(ref, api.EventTypeNormal, "SandboxReceived", "Pod sandbox received, it will be created.") + m.recorder.Eventf(ref, v1.EventTypeNormal, "SandboxReceived", "Pod sandbox received, it will be created.") } } @@ -674,7 +674,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *api.Pod, _ api.PodStatus, podSt initContainerResult := kubecontainer.NewSyncResult(kubecontainer.InitContainer, status.Name) initContainerResult.Fail(kubecontainer.ErrRunInitContainer, fmt.Sprintf("init container %q exited with %d", status.Name, status.ExitCode)) result.AddSyncResult(initContainerResult) - if pod.Spec.RestartPolicy == api.RestartPolicyNever { + if pod.Spec.RestartPolicy == v1.RestartPolicyNever { utilruntime.HandleError(fmt.Errorf("error running pod %q init container %q, restart=Never: %#v", format.Pod(pod), status.Name, status)) return } @@ -745,7 +745,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *api.Pod, _ api.PodStatus, podSt // If a container is still in backoff, the function will return a brief backoff error and // a detailed error message. -func (m *kubeGenericRuntimeManager) doBackOff(pod *api.Pod, container *api.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, string, error) { +func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, string, error) { var cStatus *kubecontainer.ContainerStatus for _, c := range podStatus.ContainerStatuses { if c.Name == container.Name && c.State == kubecontainer.ContainerStateExited { @@ -765,7 +765,7 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *api.Pod, container *api.Conta key := getStableKey(pod, container) if backOff.IsInBackOffSince(key, ts) { if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil { - m.recorder.Eventf(ref, api.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed container") + m.recorder.Eventf(ref, v1.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed container") } err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(key), container.Name, format.Pod(pod)) glog.Infof("%s", err.Error()) @@ -780,14 +780,14 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *api.Pod, container *api.Conta // gracePeriodOverride if specified allows the caller to override the pod default grace period. // only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data. // it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios. -func (m *kubeGenericRuntimeManager) KillPod(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { +func (m *kubeGenericRuntimeManager) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { err := m.killPodWithSyncResult(pod, runningPod, gracePeriodOverride) return err.Error() } // killPodWithSyncResult kills a runningPod and returns SyncResult. // Note: The pod passed in could be *nil* when kubelet restarted. -func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) { +func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) { killContainerResults := m.killContainersWithSyncResult(pod, runningPod, gracePeriodOverride) for _, containerResult := range killContainerResults { result.AddSyncResult(containerResult) @@ -808,7 +808,7 @@ func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *api.Pod, runningP } // isHostNetwork checks whether the pod is running in host-network mode. -func (m *kubeGenericRuntimeManager) isHostNetwork(podSandBoxID string, pod *api.Pod) (bool, error) { +func (m *kubeGenericRuntimeManager) isHostNetwork(podSandBoxID string, pod *v1.Pod) (bool, error) { if pod != nil { return kubecontainer.IsHostNetworkPod(pod), nil } @@ -848,8 +848,8 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp return nil, err } - podFullName := format.Pod(&api.Pod{ - ObjectMeta: api.ObjectMeta{ + podFullName := format.Pod(&v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: name, Namespace: namespace, UID: uid, diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go index 07610e1b92d..3edaf07e405 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go @@ -24,7 +24,7 @@ import ( cadvisorapi "github.com/google/cadvisor/info/v1" "github.com/stretchr/testify/assert" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/componentconfig" apitest "k8s.io/kubernetes/pkg/kubelet/api/testing" runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" @@ -63,7 +63,7 @@ func createTestRuntimeManager() (*apitest.FakeRuntimeService, *apitest.FakeImage // sandboxTemplate is a sandbox template to create fake sandbox. type sandboxTemplate struct { - pod *api.Pod + pod *v1.Pod attempt uint32 createdAt int64 state runtimeApi.PodSandboxState @@ -71,8 +71,8 @@ type sandboxTemplate struct { // containerTemplate is a container template to create fake container. type containerTemplate struct { - pod *api.Pod - container *api.Container + pod *v1.Pod + container *v1.Container sandboxAttempt uint32 attempt int createdAt int64 @@ -82,7 +82,7 @@ type containerTemplate struct { // makeAndSetFakePod is a helper function to create and set one fake sandbox for a pod and // one fake container for each of its container. func makeAndSetFakePod(t *testing.T, m *kubeGenericRuntimeManager, fakeRuntime *apitest.FakeRuntimeService, - pod *api.Pod) (*apitest.FakePodSandbox, []*apitest.FakeContainer) { + pod *v1.Pod) (*apitest.FakePodSandbox, []*apitest.FakeContainer) { sandbox := makeFakePodSandbox(t, m, sandboxTemplate{ pod: pod, createdAt: fakeCreatedAt, @@ -90,7 +90,7 @@ func makeAndSetFakePod(t *testing.T, m *kubeGenericRuntimeManager, fakeRuntime * }) var containers []*apitest.FakeContainer - newTemplate := func(c *api.Container) containerTemplate { + newTemplate := func(c *v1.Container) containerTemplate { return containerTemplate{ pod: pod, container: c, @@ -177,22 +177,22 @@ func makeFakeContainers(t *testing.T, m *kubeGenericRuntimeManager, templates [] } // makeTestContainer creates a test api container. -func makeTestContainer(name, image string) api.Container { - return api.Container{ +func makeTestContainer(name, image string) v1.Container { + return v1.Container{ Name: name, Image: image, } } // makeTestPod creates a test api pod. -func makeTestPod(podName, podNamespace, podUID string, containers []api.Container) *api.Pod { - return &api.Pod{ - ObjectMeta: api.ObjectMeta{ +func makeTestPod(podName, podNamespace, podUID string, containers []v1.Container) *v1.Pod { + return &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: types.UID(podUID), Name: podName, Namespace: podNamespace, }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ Containers: containers, }, } @@ -256,25 +256,25 @@ func TestGetPodStatus(t *testing.T) { fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) - containers := []api.Container{ + containers := []v1.Container{ { Name: "foo1", Image: "busybox", - ImagePullPolicy: api.PullIfNotPresent, + ImagePullPolicy: v1.PullIfNotPresent, }, { Name: "foo2", Image: "busybox", - ImagePullPolicy: api.PullIfNotPresent, + ImagePullPolicy: v1.PullIfNotPresent, }, } - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ Containers: containers, }, } @@ -294,14 +294,14 @@ func TestGetPods(t *testing.T) { fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Name: "foo1", Image: "busybox", @@ -370,14 +370,14 @@ func TestGetPodContainerID(t *testing.T) { fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Name: "foo1", Image: "busybox", @@ -417,14 +417,14 @@ func TestGetNetNS(t *testing.T) { fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Name: "foo1", Image: "busybox", @@ -449,14 +449,14 @@ func TestKillPod(t *testing.T) { fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Name: "foo1", Image: "busybox", @@ -520,31 +520,31 @@ func TestSyncPod(t *testing.T) { fakeRuntime, fakeImage, m, err := createTestRuntimeManager() assert.NoError(t, err) - containers := []api.Container{ + containers := []v1.Container{ { Name: "foo1", Image: "busybox", - ImagePullPolicy: api.PullIfNotPresent, + ImagePullPolicy: v1.PullIfNotPresent, }, { Name: "foo2", Image: "alpine", - ImagePullPolicy: api.PullIfNotPresent, + ImagePullPolicy: v1.PullIfNotPresent, }, } - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ Containers: containers, }, } backOff := flowcontrol.NewBackOff(time.Second, time.Minute) - result := m.SyncPod(pod, api.PodStatus{}, &kubecontainer.PodStatus{}, []api.Secret{}, backOff) + result := m.SyncPod(pod, v1.PodStatus{}, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff) assert.NoError(t, result.Error()) assert.Equal(t, 2, len(fakeRuntime.Containers)) assert.Equal(t, 2, len(fakeImage.Images)) @@ -563,14 +563,14 @@ func TestPruneInitContainers(t *testing.T) { init1 := makeTestContainer("init1", "busybox") init2 := makeTestContainer("init2", "busybox") - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, - Spec: api.PodSpec{ - InitContainers: []api.Container{init1, init2}, + Spec: v1.PodSpec{ + InitContainers: []v1.Container{init1, init2}, }, } @@ -598,32 +598,32 @@ func TestSyncPodWithInitContainers(t *testing.T) { fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) - initContainers := []api.Container{ + initContainers := []v1.Container{ { Name: "init1", Image: "init", - ImagePullPolicy: api.PullIfNotPresent, + ImagePullPolicy: v1.PullIfNotPresent, }, } - containers := []api.Container{ + containers := []v1.Container{ { Name: "foo1", Image: "busybox", - ImagePullPolicy: api.PullIfNotPresent, + ImagePullPolicy: v1.PullIfNotPresent, }, { Name: "foo2", Image: "alpine", - ImagePullPolicy: api.PullIfNotPresent, + ImagePullPolicy: v1.PullIfNotPresent, }, } - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ Containers: containers, InitContainers: initContainers, }, @@ -631,7 +631,7 @@ func TestSyncPodWithInitContainers(t *testing.T) { // buildContainerID is an internal helper function to build container id from api pod // and container with default attempt number 0. - buildContainerID := func(pod *api.Pod, container api.Container) string { + buildContainerID := func(pod *v1.Pod, container v1.Container) string { uid := string(pod.UID) sandboxID := apitest.BuildSandboxName(&runtimeApi.PodSandboxMetadata{ Name: &pod.Name, @@ -646,7 +646,7 @@ func TestSyncPodWithInitContainers(t *testing.T) { // 1. should only create the init container. podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) assert.NoError(t, err) - result := m.SyncPod(pod, api.PodStatus{}, podStatus, []api.Secret{}, backOff) + result := m.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff) assert.NoError(t, result.Error()) assert.Equal(t, 1, len(fakeRuntime.Containers)) initContainerID := buildContainerID(pod, initContainers[0]) @@ -658,7 +658,7 @@ func TestSyncPodWithInitContainers(t *testing.T) { // 2. should not create app container because init container is still running. podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) assert.NoError(t, err) - result = m.SyncPod(pod, api.PodStatus{}, podStatus, []api.Secret{}, backOff) + result = m.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff) assert.NoError(t, result.Error()) assert.Equal(t, 1, len(fakeRuntime.Containers)) expectedContainers = []string{initContainerID} @@ -670,7 +670,7 @@ func TestSyncPodWithInitContainers(t *testing.T) { fakeRuntime.StopContainer(initContainerID, 0) podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) assert.NoError(t, err) - result = m.SyncPod(pod, api.PodStatus{}, podStatus, []api.Secret{}, backOff) + result = m.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff) assert.NoError(t, result.Error()) assert.Equal(t, 3, len(fakeRuntime.Containers)) expectedContainers = []string{initContainerID, buildContainerID(pod, containers[0]), diff --git a/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go b/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go index 2374ecd4e31..bfb4317f604 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go @@ -23,7 +23,7 @@ import ( "sort" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/types" @@ -32,7 +32,7 @@ import ( ) // createPodSandbox creates a pod sandbox and returns (podSandBoxID, message, error). -func (m *kubeGenericRuntimeManager) createPodSandbox(pod *api.Pod, attempt uint32) (string, string, error) { +func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32) (string, string, error) { podSandboxConfig, err := m.generatePodSandboxConfig(pod, attempt) if err != nil { message := fmt.Sprintf("GeneratePodSandboxConfig for pod %q failed: %v", format.Pod(pod), err) @@ -58,8 +58,8 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *api.Pod, attempt uint3 return podSandBoxID, "", nil } -// generatePodSandboxConfig generates pod sandbox config from api.Pod. -func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *api.Pod, attempt uint32) (*runtimeApi.PodSandboxConfig, error) { +// generatePodSandboxConfig generates pod sandbox config from v1.Pod. +func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*runtimeApi.PodSandboxConfig, error) { // TODO: deprecating podsandbox resource requirements in favor of the pod level cgroup // Refer https://github.com/kubernetes/kubernetes/issues/29871 podUID := string(pod.UID) @@ -128,8 +128,8 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *api.Pod, attem return podSandboxConfig, nil } -// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from api.Pod. -func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *api.Pod, cgroupParent string) *runtimeApi.LinuxPodSandboxConfig { +// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from v1.Pod. +func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod, cgroupParent string) *runtimeApi.LinuxPodSandboxConfig { if pod.Spec.SecurityContext == nil && cgroupParent == "" { return nil } @@ -142,9 +142,9 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *api.Pod, sc := pod.Spec.SecurityContext lc.SecurityContext = &runtimeApi.LinuxSandboxSecurityContext{ NamespaceOptions: &runtimeApi.NamespaceOption{ - HostNetwork: &sc.HostNetwork, - HostIpc: &sc.HostIPC, - HostPid: &sc.HostPID, + HostNetwork: &pod.Spec.HostNetwork, + HostIpc: &pod.Spec.HostIPC, + HostPid: &pod.Spec.HostPID, }, RunAsUser: sc.RunAsUser, } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go index b9d9e1c924f..0581a0e5388 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go @@ -22,7 +22,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" ) @@ -30,18 +30,18 @@ import ( // TestCreatePodSandbox tests creating sandbox and its corresponding pod log directory. func TestCreatePodSandbox(t *testing.T) { fakeRuntime, _, m, err := createTestRuntimeManager() - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "bar", Namespace: "new", }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Name: "foo", Image: "busybox", - ImagePullPolicy: api.PullIfNotPresent, + ImagePullPolicy: v1.PullIfNotPresent, }, }, }, diff --git a/pkg/kubelet/kuberuntime/labels.go b/pkg/kubelet/kuberuntime/labels.go index 2fd475bd7d2..88e383d21bd 100644 --- a/pkg/kubelet/kuberuntime/labels.go +++ b/pkg/kubelet/kuberuntime/labels.go @@ -21,7 +21,7 @@ import ( "strconv" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" @@ -45,7 +45,7 @@ const ( ) type labeledPodSandboxInfo struct { - // Labels from api.Pod + // Labels from v1.Pod Labels map[string]string PodName string PodNamespace string @@ -53,7 +53,7 @@ type labeledPodSandboxInfo struct { } type annotatedPodSandboxInfo struct { - // Annotations from api.Pod + // Annotations from v1.Pod Annotations map[string]string } @@ -70,15 +70,15 @@ type annotatedContainerInfo struct { PodDeletionGracePeriod *int64 PodTerminationGracePeriod *int64 TerminationMessagePath string - PreStopHandler *api.Handler - ContainerPorts []api.ContainerPort + PreStopHandler *v1.Handler + ContainerPorts []v1.ContainerPort } -// newPodLabels creates pod labels from api.Pod. -func newPodLabels(pod *api.Pod) map[string]string { +// newPodLabels creates pod labels from v1.Pod. +func newPodLabels(pod *v1.Pod) map[string]string { labels := map[string]string{} - // Get labels from api.Pod + // Get labels from v1.Pod for k, v := range pod.Labels { labels[k] = v } @@ -91,13 +91,13 @@ func newPodLabels(pod *api.Pod) map[string]string { return labels } -// newPodAnnotations creates pod annotations from api.Pod. -func newPodAnnotations(pod *api.Pod) map[string]string { +// newPodAnnotations creates pod annotations from v1.Pod. +func newPodAnnotations(pod *v1.Pod) map[string]string { return pod.Annotations } -// newContainerLabels creates container labels from api.Container and api.Pod. -func newContainerLabels(container *api.Container, pod *api.Pod) map[string]string { +// newContainerLabels creates container labels from v1.Container and v1.Pod. +func newContainerLabels(container *v1.Container, pod *v1.Pod) map[string]string { labels := map[string]string{} labels[types.KubernetesPodNameLabel] = pod.Name labels[types.KubernetesPodNamespaceLabel] = pod.Namespace @@ -108,8 +108,8 @@ func newContainerLabels(container *api.Container, pod *api.Pod) map[string]strin return labels } -// newContainerAnnotations creates container annotations from api.Container and api.Pod. -func newContainerAnnotations(container *api.Container, pod *api.Pod, restartCount int) map[string]string { +// newContainerAnnotations creates container annotations from v1.Container and v1.Pod. +func newContainerAnnotations(container *v1.Container, pod *v1.Pod, restartCount int) map[string]string { annotations := map[string]string{} annotations[containerHashLabel] = strconv.FormatUint(kubecontainer.HashContainer(container), 16) annotations[containerRestartCountLabel] = strconv.Itoa(restartCount) @@ -153,7 +153,7 @@ func getPodSandboxInfoFromLabels(labels map[string]string) *labeledPodSandboxInf PodUID: kubetypes.UID(getStringValueFromLabel(labels, types.KubernetesPodUIDLabel)), } - // Remain only labels from api.Pod + // Remain only labels from v1.Pod for k, v := range labels { if k != types.KubernetesPodNameLabel && k != types.KubernetesPodNamespaceLabel && k != types.KubernetesPodUIDLabel && k != kubernetesManagedLabel { podSandboxInfo.Labels[k] = v @@ -209,14 +209,14 @@ func getContainerInfoFromAnnotations(annotations map[string]string) *annotatedCo glog.Errorf("Unable to get %q from annotations %q: %v", podTerminationGracePeriodLabel, annotations, err) } - preStopHandler := &api.Handler{} + preStopHandler := &v1.Handler{} if found, err := getJSONObjectFromLabel(annotations, containerPreStopHandlerLabel, preStopHandler); err != nil { glog.Errorf("Unable to get %q from annotations %q: %v", containerPreStopHandlerLabel, annotations, err) } else if found { containerInfo.PreStopHandler = preStopHandler } - containerPorts := []api.ContainerPort{} + containerPorts := []v1.ContainerPort{} if found, err := getJSONObjectFromLabel(annotations, containerPortsLabel, &containerPorts); err != nil { glog.Errorf("Unable to get %q from annotations %q: %v", containerPortsLabel, annotations, err) } else if found { diff --git a/pkg/kubelet/kuberuntime/labels_test.go b/pkg/kubelet/kuberuntime/labels_test.go index 0ece3f4843c..5c52ec49a52 100644 --- a/pkg/kubelet/kuberuntime/labels_test.go +++ b/pkg/kubelet/kuberuntime/labels_test.go @@ -20,7 +20,7 @@ import ( "reflect" "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/util/intstr" ) @@ -28,37 +28,37 @@ import ( func TestContainerLabels(t *testing.T) { deletionGracePeriod := int64(10) terminationGracePeriod := int64(10) - lifecycle := &api.Lifecycle{ + lifecycle := &v1.Lifecycle{ // Left PostStart as nil - PreStop: &api.Handler{ - Exec: &api.ExecAction{ + PreStop: &v1.Handler{ + Exec: &v1.ExecAction{ Command: []string{"action1", "action2"}, }, - HTTPGet: &api.HTTPGetAction{ + HTTPGet: &v1.HTTPGetAction{ Path: "path", Host: "host", Port: intstr.FromInt(8080), Scheme: "scheme", }, - TCPSocket: &api.TCPSocketAction{ + TCPSocket: &v1.TCPSocketAction{ Port: intstr.FromString("80"), }, }, } - container := &api.Container{ + container := &v1.Container{ Name: "test_container", TerminationMessagePath: "/somepath", Lifecycle: lifecycle, } - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "test_pod", Namespace: "test_pod_namespace", UID: "test_pod_uid", DeletionGracePeriodSeconds: &deletionGracePeriod, }, - Spec: api.PodSpec{ - Containers: []api.Container{*container}, + Spec: v1.PodSpec{ + Containers: []v1.Container{*container}, TerminationGracePeriodSeconds: &terminationGracePeriod, }, } @@ -81,52 +81,52 @@ func TestContainerAnnotations(t *testing.T) { restartCount := 5 deletionGracePeriod := int64(10) terminationGracePeriod := int64(10) - lifecycle := &api.Lifecycle{ + lifecycle := &v1.Lifecycle{ // Left PostStart as nil - PreStop: &api.Handler{ - Exec: &api.ExecAction{ + PreStop: &v1.Handler{ + Exec: &v1.ExecAction{ Command: []string{"action1", "action2"}, }, - HTTPGet: &api.HTTPGetAction{ + HTTPGet: &v1.HTTPGetAction{ Path: "path", Host: "host", Port: intstr.FromInt(8080), Scheme: "scheme", }, - TCPSocket: &api.TCPSocketAction{ + TCPSocket: &v1.TCPSocketAction{ Port: intstr.FromString("80"), }, }, } - containerPorts := []api.ContainerPort{ + containerPorts := []v1.ContainerPort{ { Name: "http", HostPort: 80, ContainerPort: 8080, - Protocol: api.ProtocolTCP, + Protocol: v1.ProtocolTCP, }, { Name: "https", HostPort: 443, ContainerPort: 6443, - Protocol: api.ProtocolTCP, + Protocol: v1.ProtocolTCP, }, } - container := &api.Container{ + container := &v1.Container{ Name: "test_container", Ports: containerPorts, TerminationMessagePath: "/somepath", Lifecycle: lifecycle, } - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "test_pod", Namespace: "test_pod_namespace", UID: "test_pod_uid", DeletionGracePeriodSeconds: &deletionGracePeriod, }, - Spec: api.PodSpec{ - Containers: []api.Container{*container}, + Spec: v1.PodSpec{ + Containers: []v1.Container{*container}, TerminationGracePeriodSeconds: &terminationGracePeriod, }, } @@ -165,15 +165,15 @@ func TestContainerAnnotations(t *testing.T) { } func TestPodLabels(t *testing.T) { - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "test_pod", Namespace: "test_pod_namespace", UID: "test_pod_uid", Labels: map[string]string{"foo": "bar"}, }, - Spec: api.PodSpec{ - Containers: []api.Container{}, + Spec: v1.PodSpec{ + Containers: []v1.Container{}, }, } expected := &labeledPodSandboxInfo{ @@ -192,15 +192,15 @@ func TestPodLabels(t *testing.T) { } func TestPodAnnotations(t *testing.T) { - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "test_pod", Namespace: "test_pod_namespace", UID: "test_pod_uid", Annotations: map[string]string{"foo": "bar"}, }, - Spec: api.PodSpec{ - Containers: []api.Container{}, + Spec: v1.PodSpec{ + Containers: []v1.Container{}, }, } expected := &annotatedPodSandboxInfo{ diff --git a/pkg/kubelet/kuberuntime/security_context.go b/pkg/kubelet/kuberuntime/security_context.go index fc5940dee5c..80da407900b 100644 --- a/pkg/kubelet/kuberuntime/security_context.go +++ b/pkg/kubelet/kuberuntime/security_context.go @@ -19,13 +19,13 @@ package kuberuntime import ( "fmt" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" "k8s.io/kubernetes/pkg/securitycontext" ) -// determineEffectiveSecurityContext gets container's security context from api.Pod and api.Container. -func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *api.Pod, container *api.Container, uid *int64, username *string) *runtimeapi.LinuxContainerSecurityContext { +// determineEffectiveSecurityContext gets container's security context from v1.Pod and v1.Container. +func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Pod, container *v1.Container, uid *int64, username *string) *runtimeapi.LinuxContainerSecurityContext { effectiveSc := securitycontext.DetermineEffectiveSecurityContext(pod, container) synthesized := convertToRuntimeSecurityContext(effectiveSc) if synthesized == nil { @@ -44,9 +44,9 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *api.P return synthesized } synthesized.NamespaceOptions = &runtimeapi.NamespaceOption{ - HostNetwork: &podSc.HostNetwork, - HostIpc: &podSc.HostIPC, - HostPid: &podSc.HostPID, + HostNetwork: &pod.Spec.HostNetwork, + HostIpc: &pod.Spec.HostIPC, + HostPid: &pod.Spec.HostPID, } if podSc.FSGroup != nil { synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, *podSc.FSGroup) @@ -62,7 +62,7 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *api.P } // verifyRunAsNonRoot verifies RunAsNonRoot. -func verifyRunAsNonRoot(pod *api.Pod, container *api.Container, uid int64) error { +func verifyRunAsNonRoot(pod *v1.Pod, container *v1.Container, uid int64) error { effectiveSc := securitycontext.DetermineEffectiveSecurityContext(pod, container) if effectiveSc == nil || effectiveSc.RunAsNonRoot == nil { return nil @@ -82,8 +82,8 @@ func verifyRunAsNonRoot(pod *api.Pod, container *api.Container, uid int64) error return nil } -// convertToRuntimeSecurityContext converts api.SecurityContext to runtimeapi.SecurityContext. -func convertToRuntimeSecurityContext(securityContext *api.SecurityContext) *runtimeapi.LinuxContainerSecurityContext { +// convertToRuntimeSecurityContext converts v1.SecurityContext to runtimeapi.SecurityContext. +func convertToRuntimeSecurityContext(securityContext *v1.SecurityContext) *runtimeapi.LinuxContainerSecurityContext { if securityContext == nil { return nil } @@ -97,8 +97,8 @@ func convertToRuntimeSecurityContext(securityContext *api.SecurityContext) *runt } } -// convertToRuntimeSELinuxOption converts api.SELinuxOptions to runtimeapi.SELinuxOption. -func convertToRuntimeSELinuxOption(opts *api.SELinuxOptions) *runtimeapi.SELinuxOption { +// convertToRuntimeSELinuxOption converts v1.SELinuxOptions to runtimeapi.SELinuxOption. +func convertToRuntimeSELinuxOption(opts *v1.SELinuxOptions) *runtimeapi.SELinuxOption { if opts == nil { return nil } @@ -111,8 +111,8 @@ func convertToRuntimeSELinuxOption(opts *api.SELinuxOptions) *runtimeapi.SELinux } } -// convertToRuntimeCapabilities converts api.Capabilities to runtimeapi.Capability. -func convertToRuntimeCapabilities(opts *api.Capabilities) *runtimeapi.Capability { +// convertToRuntimeCapabilities converts v1.Capabilities to runtimeapi.Capability. +func convertToRuntimeCapabilities(opts *v1.Capabilities) *runtimeapi.Capability { if opts == nil { return nil } diff --git a/pkg/kubelet/lifecycle/fake_handler_runner.go b/pkg/kubelet/lifecycle/fake_handler_runner.go index 3979204e425..1ebc5479ab0 100644 --- a/pkg/kubelet/lifecycle/fake_handler_runner.go +++ b/pkg/kubelet/lifecycle/fake_handler_runner.go @@ -20,7 +20,7 @@ import ( "fmt" "sync" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/util/format" ) @@ -35,7 +35,7 @@ func NewFakeHandlerRunner() *FakeHandlerRunner { return &FakeHandlerRunner{HandlerRuns: []string{}} } -func (hr *FakeHandlerRunner) Run(containerID kubecontainer.ContainerID, pod *api.Pod, container *api.Container, handler *api.Handler) (string, error) { +func (hr *FakeHandlerRunner) Run(containerID kubecontainer.ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.Handler) (string, error) { hr.Lock() defer hr.Unlock() diff --git a/pkg/kubelet/lifecycle/handlers.go b/pkg/kubelet/lifecycle/handlers.go index 1d6bc1c1f79..f2ba6e71277 100644 --- a/pkg/kubelet/lifecycle/handlers.go +++ b/pkg/kubelet/lifecycle/handlers.go @@ -24,7 +24,7 @@ import ( "strconv" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" @@ -51,7 +51,7 @@ func NewHandlerRunner(httpGetter kubetypes.HttpGetter, commandRunner kubecontain } } -func (hr *HandlerRunner) Run(containerID kubecontainer.ContainerID, pod *api.Pod, container *api.Container, handler *api.Handler) (string, error) { +func (hr *HandlerRunner) Run(containerID kubecontainer.ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.Handler) (string, error) { switch { case handler.Exec != nil: var msg string @@ -83,7 +83,7 @@ func (hr *HandlerRunner) Run(containerID kubecontainer.ContainerID, pod *api.Pod // an attempt is made to find a port with the same name in the container spec. // If a port with the same name is found, it's ContainerPort value is returned. If no matching // port is found, an error is returned. -func resolvePort(portReference intstr.IntOrString, container *api.Container) (int, error) { +func resolvePort(portReference intstr.IntOrString, container *v1.Container) (int, error) { if portReference.Type == intstr.Int { return portReference.IntValue(), nil } @@ -100,7 +100,7 @@ func resolvePort(portReference intstr.IntOrString, container *api.Container) (in return -1, fmt.Errorf("couldn't find port: %v in %v", portReference, container) } -func (hr *HandlerRunner) runHTTPHandler(pod *api.Pod, container *api.Container, handler *api.Handler) (string, error) { +func (hr *HandlerRunner) runHTTPHandler(pod *v1.Pod, container *v1.Container, handler *v1.Handler) (string, error) { host := handler.HTTPGet.Host if len(host) == 0 { status, err := hr.containerManager.GetPodStatus(pod.UID, pod.Name, pod.Namespace) @@ -151,7 +151,7 @@ type appArmorAdmitHandler struct { func (a *appArmorAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult { // If the pod is already running or terminated, no need to recheck AppArmor. - if attrs.Pod.Status.Phase != api.PodPending { + if attrs.Pod.Status.Phase != v1.PodPending { return PodAdmitResult{Admit: true} } diff --git a/pkg/kubelet/lifecycle/handlers_test.go b/pkg/kubelet/lifecycle/handlers_test.go index 23c101e9224..bb238dc2bea 100644 --- a/pkg/kubelet/lifecycle/handlers_test.go +++ b/pkg/kubelet/lifecycle/handlers_test.go @@ -25,14 +25,14 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/util/intstr" ) func TestResolvePortInt(t *testing.T) { expected := 80 - port, err := resolvePort(intstr.FromInt(expected), &api.Container{}) + port, err := resolvePort(intstr.FromInt(expected), &v1.Container{}) if port != expected { t.Errorf("expected: %d, saw: %d", expected, port) } @@ -44,8 +44,8 @@ func TestResolvePortInt(t *testing.T) { func TestResolvePortString(t *testing.T) { expected := 80 name := "foo" - container := &api.Container{ - Ports: []api.ContainerPort{ + container := &v1.Container{ + Ports: []v1.ContainerPort{ {Name: name, ContainerPort: int32(expected)}, }, } @@ -61,8 +61,8 @@ func TestResolvePortString(t *testing.T) { func TestResolvePortStringUnknown(t *testing.T) { expected := int32(80) name := "foo" - container := &api.Container{ - Ports: []api.ContainerPort{ + container := &v1.Container{ + Ports: []v1.ContainerPort{ {Name: "bar", ContainerPort: expected}, }, } @@ -93,21 +93,21 @@ func TestRunHandlerExec(t *testing.T) { containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"} containerName := "containerFoo" - container := api.Container{ + container := v1.Container{ Name: containerName, - Lifecycle: &api.Lifecycle{ - PostStart: &api.Handler{ - Exec: &api.ExecAction{ + Lifecycle: &v1.Lifecycle{ + PostStart: &v1.Handler{ + Exec: &v1.ExecAction{ Command: []string{"ls", "-a"}, }, }, }, } - pod := api.Pod{} + pod := v1.Pod{} pod.ObjectMeta.Name = "podFoo" pod.ObjectMeta.Namespace = "nsFoo" - pod.Spec.Containers = []api.Container{container} + pod.Spec.Containers = []v1.Container{container} _, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) if err != nil { t.Errorf("unexpected error: %v", err) @@ -136,11 +136,11 @@ func TestRunHandlerHttp(t *testing.T) { containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"} containerName := "containerFoo" - container := api.Container{ + container := v1.Container{ Name: containerName, - Lifecycle: &api.Lifecycle{ - PostStart: &api.Handler{ - HTTPGet: &api.HTTPGetAction{ + Lifecycle: &v1.Lifecycle{ + PostStart: &v1.Handler{ + HTTPGet: &v1.HTTPGetAction{ Host: "foo", Port: intstr.FromInt(8080), Path: "bar", @@ -148,10 +148,10 @@ func TestRunHandlerHttp(t *testing.T) { }, }, } - pod := api.Pod{} + pod := v1.Pod{} pod.ObjectMeta.Name = "podFoo" pod.ObjectMeta.Namespace = "nsFoo" - pod.Spec.Containers = []api.Container{container} + pod.Spec.Containers = []v1.Container{container} _, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) if err != nil { @@ -169,16 +169,16 @@ func TestRunHandlerNil(t *testing.T) { podNamespace := "nsFoo" containerName := "containerFoo" - container := api.Container{ + container := v1.Container{ Name: containerName, - Lifecycle: &api.Lifecycle{ - PostStart: &api.Handler{}, + Lifecycle: &v1.Lifecycle{ + PostStart: &v1.Handler{}, }, } - pod := api.Pod{} + pod := v1.Pod{} pod.ObjectMeta.Name = podName pod.ObjectMeta.Namespace = podNamespace - pod.Spec.Containers = []api.Container{container} + pod.Spec.Containers = []v1.Container{container} _, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) if err == nil { t.Errorf("expect error, but got nil") @@ -194,11 +194,11 @@ func TestRunHandlerHttpFailure(t *testing.T) { handlerRunner := NewHandlerRunner(&fakeHttp, &fakeContainerCommandRunner{}, nil) containerName := "containerFoo" containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"} - container := api.Container{ + container := v1.Container{ Name: containerName, - Lifecycle: &api.Lifecycle{ - PostStart: &api.Handler{ - HTTPGet: &api.HTTPGetAction{ + Lifecycle: &v1.Lifecycle{ + PostStart: &v1.Handler{ + HTTPGet: &v1.HTTPGetAction{ Host: "foo", Port: intstr.FromInt(8080), Path: "bar", @@ -206,10 +206,10 @@ func TestRunHandlerHttpFailure(t *testing.T) { }, }, } - pod := api.Pod{} + pod := v1.Pod{} pod.ObjectMeta.Name = "podFoo" pod.ObjectMeta.Namespace = "nsFoo" - pod.Spec.Containers = []api.Container{container} + pod.Spec.Containers = []v1.Container{container} msg, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) if err == nil { t.Errorf("expected error: %v", expectedErr) diff --git a/pkg/kubelet/lifecycle/interfaces.go b/pkg/kubelet/lifecycle/interfaces.go index 38ee01825fe..bde0e51e995 100644 --- a/pkg/kubelet/lifecycle/interfaces.go +++ b/pkg/kubelet/lifecycle/interfaces.go @@ -16,15 +16,15 @@ limitations under the License. package lifecycle -import "k8s.io/kubernetes/pkg/api" +import "k8s.io/kubernetes/pkg/api/v1" // PodAdmitAttributes is the context for a pod admission decision. // The member fields of this struct should never be mutated. type PodAdmitAttributes struct { // the pod to evaluate for admission - Pod *api.Pod + Pod *v1.Pod // all pods bound to the kubelet excluding the pod being evaluated - OtherPods []*api.Pod + OtherPods []*v1.Pod } // PodAdmitResult provides the result of a pod admission decision. @@ -54,7 +54,7 @@ type PodSyncLoopHandler interface { // ShouldSync returns true if the pod needs to be synced. // This operation must return immediately as its called for each pod. // The provided pod should never be modified. - ShouldSync(pod *api.Pod) bool + ShouldSync(pod *v1.Pod) bool } // PodSyncLoopTarget maintains a list of handlers to pod sync loop. @@ -81,7 +81,7 @@ type PodSyncHandler interface { // and the pod is immediately killed. // This operation must return immediately as its called for each sync pod. // The provided pod should never be modified. - ShouldEvict(pod *api.Pod) ShouldEvictResponse + ShouldEvict(pod *v1.Pod) ShouldEvictResponse } // PodSyncTarget maintains a list of handlers to pod sync. diff --git a/pkg/kubelet/lifecycle/predicate.go b/pkg/kubelet/lifecycle/predicate.go index e38e0b6e1ff..6c8f24b7eff 100644 --- a/pkg/kubelet/lifecycle/predicate.go +++ b/pkg/kubelet/lifecycle/predicate.go @@ -20,13 +20,13 @@ import ( "fmt" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) -type getNodeAnyWayFuncType func() (*api.Node, error) +type getNodeAnyWayFuncType func() (*v1.Node, error) type predicateAdmitHandler struct { getNodeAnyWayFunc getNodeAnyWayFuncType } diff --git a/pkg/kubelet/network/cni/cni_test.go b/pkg/kubelet/network/cni/cni_test.go index 8aeed7a93fd..6e932f32054 100644 --- a/pkg/kubelet/network/cni/cni_test.go +++ b/pkg/kubelet/network/cni/cni_test.go @@ -29,11 +29,11 @@ import ( "testing" "text/template" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" cnitypes "github.com/containernetworking/cni/pkg/types" "github.com/stretchr/testify/mock" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/componentconfig" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" @@ -126,7 +126,7 @@ func NewFakeHost(kubeClient clientset.Interface, pods []*containertest.FakePod) return host } -func (fnh *fakeNetworkHost) GetPodByName(name, namespace string) (*api.Pod, bool) { +func (fnh *fakeNetworkHost) GetPodByName(name, namespace string) (*v1.Pod, bool) { return nil, false } diff --git a/pkg/kubelet/network/hostport/hostport.go b/pkg/kubelet/network/hostport/hostport.go index 35d4ac2c8ea..273dab10b5c 100644 --- a/pkg/kubelet/network/hostport/hostport.go +++ b/pkg/kubelet/network/hostport/hostport.go @@ -26,7 +26,7 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables" utildbus "k8s.io/kubernetes/pkg/util/dbus" @@ -47,7 +47,7 @@ type HostportHandler interface { } type ActivePod struct { - Pod *api.Pod + Pod *v1.Pod IP net.IP } @@ -87,7 +87,7 @@ func (hp *hostport) String() string { } //openPodHostports opens all hostport for pod and returns the map of hostport and socket -func (h *handler) openHostports(pod *api.Pod) error { +func (h *handler) openHostports(pod *v1.Pod) error { var retErr error ports := make(map[hostport]closeable) for _, container := range pod.Spec.Containers { @@ -131,15 +131,15 @@ func (h *handler) openHostports(pod *api.Pod) error { // gatherAllHostports returns all hostports that should be presented on node, // given the list of pods running on that node and ignoring host network // pods (which don't need hostport <-> container port mapping). -func gatherAllHostports(activePods []*ActivePod) (map[api.ContainerPort]targetPod, error) { - podHostportMap := make(map[api.ContainerPort]targetPod) +func gatherAllHostports(activePods []*ActivePod) (map[v1.ContainerPort]targetPod, error) { + podHostportMap := make(map[v1.ContainerPort]targetPod) for _, r := range activePods { if r.IP.To4() == nil { return nil, fmt.Errorf("Invalid or missing pod %s IP", kubecontainer.GetPodFullName(r.Pod)) } // should not handle hostports for hostnetwork pods - if r.Pod.Spec.SecurityContext != nil && r.Pod.Spec.SecurityContext.HostNetwork { + if r.Pod.Spec.HostNetwork { continue } @@ -164,7 +164,7 @@ func writeLine(buf *bytes.Buffer, words ...string) { // then encoding to base32 and truncating with the prefix "KUBE-SVC-". We do // this because IPTables Chain Names must be <= 28 chars long, and the longer // they are the harder they are to read. -func hostportChainName(cp api.ContainerPort, podFullName string) utiliptables.Chain { +func hostportChainName(cp v1.ContainerPort, podFullName string) utiliptables.Chain { hash := sha256.Sum256([]byte(string(cp.HostPort) + string(cp.Protocol) + podFullName)) encoded := base32.StdEncoding.EncodeToString(hash[:]) return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16]) @@ -364,7 +364,7 @@ func openLocalPort(hp *hostport) (closeable, error) { } // cleanupHostportMap closes obsolete hostports -func (h *handler) cleanupHostportMap(containerPortMap map[api.ContainerPort]targetPod) { +func (h *handler) cleanupHostportMap(containerPortMap map[v1.ContainerPort]targetPod) { // compute hostports that are supposed to be open currentHostports := make(map[hostport]bool) for containerPort := range containerPortMap { diff --git a/pkg/kubelet/network/hostport/hostport_test.go b/pkg/kubelet/network/hostport/hostport_test.go index 0fc4cf85e17..25cbcc24736 100644 --- a/pkg/kubelet/network/hostport/hostport_test.go +++ b/pkg/kubelet/network/hostport/hostport_test.go @@ -22,7 +22,7 @@ import ( "strings" "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" utiliptables "k8s.io/kubernetes/pkg/util/iptables" ) @@ -61,27 +61,27 @@ func TestOpenPodHostports(t *testing.T) { } tests := []struct { - pod *api.Pod + pod *v1.Pod ip string matches []*ruleMatch }{ // New pod that we are going to add { - &api.Pod{ - ObjectMeta: api.ObjectMeta{ + &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "test-pod", - Namespace: api.NamespaceDefault, + Namespace: v1.NamespaceDefault, }, - Spec: api.PodSpec{ - Containers: []api.Container{{ - Ports: []api.ContainerPort{{ + Spec: v1.PodSpec{ + Containers: []v1.Container{{ + Ports: []v1.ContainerPort{{ HostPort: 4567, ContainerPort: 80, - Protocol: api.ProtocolTCP, + Protocol: v1.ProtocolTCP, }, { HostPort: 5678, ContainerPort: 81, - Protocol: api.ProtocolUDP, + Protocol: v1.ProtocolUDP, }}, }}, }, @@ -122,17 +122,17 @@ func TestOpenPodHostports(t *testing.T) { }, // Already running pod { - &api.Pod{ - ObjectMeta: api.ObjectMeta{ + &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "another-test-pod", - Namespace: api.NamespaceDefault, + Namespace: v1.NamespaceDefault, }, - Spec: api.PodSpec{ - Containers: []api.Container{{ - Ports: []api.ContainerPort{{ + Spec: v1.PodSpec{ + Containers: []v1.Container{{ + Ports: []v1.ContainerPort{{ HostPort: 123, ContainerPort: 654, - Protocol: api.ProtocolTCP, + Protocol: v1.ProtocolTCP, }}, }}, }, diff --git a/pkg/kubelet/network/kubenet/kubenet_linux.go b/pkg/kubelet/network/kubenet/kubenet_linux.go index 7679dce6d76..be5495ee371 100644 --- a/pkg/kubelet/network/kubenet/kubenet_linux.go +++ b/pkg/kubelet/network/kubenet/kubenet_linux.go @@ -27,13 +27,14 @@ import ( "syscall" "time" + "io/ioutil" + "github.com/containernetworking/cni/libcni" cnitypes "github.com/containernetworking/cni/pkg/types" "github.com/golang/glog" "github.com/vishvananda/netlink" "github.com/vishvananda/netlink/nl" - "io/ioutil" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/componentconfig" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/network" @@ -337,7 +338,7 @@ func (plugin *kubenetNetworkPlugin) Capabilities() utilsets.Int { // setup sets up networking through CNI using the given ns/name and sandbox ID. // TODO: Don't pass the pod to this method, it only needs it for bandwidth // shaping and hostport management. -func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kubecontainer.ContainerID, pod *api.Pod) error { +func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kubecontainer.ContainerID, pod *v1.Pod) error { // Bring up container loopback interface if _, err := plugin.addContainerToNetwork(plugin.loConfig, "lo", namespace, name, id); err != nil { return err diff --git a/pkg/kubelet/network/plugins.go b/pkg/kubelet/network/plugins.go index 9e7a1b65c26..651c9eceab2 100644 --- a/pkg/kubelet/network/plugins.go +++ b/pkg/kubelet/network/plugins.go @@ -21,11 +21,11 @@ import ( "net" "strings" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/componentconfig" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" utilerrors "k8s.io/kubernetes/pkg/util/errors" @@ -102,7 +102,7 @@ type PodNetworkStatus struct { type LegacyHost interface { // Get the pod structure by its name, namespace // Only used for hostport management and bw shaping - GetPodByName(namespace, name string) (*api.Pod, bool) + GetPodByName(namespace, name string) (*v1.Pod, bool) // GetKubeClient returns a client interface // Only used in testing diff --git a/pkg/kubelet/network/testing/fake_host.go b/pkg/kubelet/network/testing/fake_host.go index a650be1c2e1..5b278249902 100644 --- a/pkg/kubelet/network/testing/fake_host.go +++ b/pkg/kubelet/network/testing/fake_host.go @@ -20,8 +20,8 @@ package testing // a fake host is created here that can be used by plugins for testing import ( - "k8s.io/kubernetes/pkg/api" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/api/v1" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" ) @@ -38,7 +38,7 @@ func NewFakeHost(kubeClient clientset.Interface) *fakeNetworkHost { return host } -func (fnh *fakeNetworkHost) GetPodByName(name, namespace string) (*api.Pod, bool) { +func (fnh *fakeNetworkHost) GetPodByName(name, namespace string) (*v1.Pod, bool) { return nil, false } diff --git a/pkg/kubelet/networks.go b/pkg/kubelet/networks.go index 74cfcb6427c..1b258e0ecea 100644 --- a/pkg/kubelet/networks.go +++ b/pkg/kubelet/networks.go @@ -17,8 +17,8 @@ limitations under the License. package kubelet import ( - "k8s.io/kubernetes/pkg/api" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/api/v1" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -33,7 +33,7 @@ type networkHost struct { kubelet *Kubelet } -func (nh *networkHost) GetPodByName(name, namespace string) (*api.Pod, bool) { +func (nh *networkHost) GetPodByName(name, namespace string) (*v1.Pod, bool) { return nh.kubelet.GetPodByName(name, namespace) } @@ -71,7 +71,7 @@ func (c *criNetworkHost) GetNetNS(containerID string) (string, error) { // like host port and bandwidth shaping. type noOpLegacyHost struct{} -func (n *noOpLegacyHost) GetPodByName(namespace, name string) (*api.Pod, bool) { +func (n *noOpLegacyHost) GetPodByName(namespace, name string) (*v1.Pod, bool) { return nil, true } diff --git a/pkg/kubelet/oom_watcher.go b/pkg/kubelet/oom_watcher.go index ca4c53d9432..be18463eda2 100644 --- a/pkg/kubelet/oom_watcher.go +++ b/pkg/kubelet/oom_watcher.go @@ -20,15 +20,15 @@ import ( "github.com/golang/glog" "github.com/google/cadvisor/events" cadvisorapi "github.com/google/cadvisor/info/v1" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/util/runtime" ) type OOMWatcher interface { - Start(ref *api.ObjectReference) error + Start(ref *v1.ObjectReference) error } type realOOMWatcher struct { @@ -46,7 +46,7 @@ func NewOOMWatcher(cadvisor cadvisor.Interface, recorder record.EventRecorder) O const systemOOMEvent = "SystemOOM" // Watches cadvisor for system oom's and records an event for every system oom encountered. -func (ow *realOOMWatcher) Start(ref *api.ObjectReference) error { +func (ow *realOOMWatcher) Start(ref *v1.ObjectReference) error { request := events.Request{ EventType: map[cadvisorapi.EventType]bool{ cadvisorapi.EventOom: true, @@ -64,7 +64,7 @@ func (ow *realOOMWatcher) Start(ref *api.ObjectReference) error { for event := range eventChannel.GetChannel() { glog.V(2).Infof("Got sys oom event from cadvisor: %v", event) - ow.recorder.PastEventf(ref, unversioned.Time{Time: event.Timestamp}, api.EventTypeWarning, systemOOMEvent, "System OOM encountered") + ow.recorder.PastEventf(ref, unversioned.Time{Time: event.Timestamp}, v1.EventTypeWarning, systemOOMEvent, "System OOM encountered") } glog.Errorf("Unexpectedly stopped receiving OOM notifications from cAdvisor") }() diff --git a/pkg/kubelet/oom_watcher_test.go b/pkg/kubelet/oom_watcher_test.go index e8915e627ac..6d43bb05d45 100644 --- a/pkg/kubelet/oom_watcher_test.go +++ b/pkg/kubelet/oom_watcher_test.go @@ -19,7 +19,7 @@ package kubelet import ( "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" ) @@ -27,7 +27,7 @@ import ( func TestBasic(t *testing.T) { fakeRecorder := &record.FakeRecorder{} mockCadvisor := &cadvisortest.Fake{} - node := &api.ObjectReference{} + node := &v1.ObjectReference{} oomWatcher := NewOOMWatcher(mockCadvisor, fakeRecorder) err := oomWatcher.Start(node) if err != nil { diff --git a/pkg/kubelet/pod/mirror_client.go b/pkg/kubelet/pod/mirror_client.go index a57d6414d22..071e7ff07b9 100644 --- a/pkg/kubelet/pod/mirror_client.go +++ b/pkg/kubelet/pod/mirror_client.go @@ -18,9 +18,9 @@ package pod import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/api/v1" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -31,7 +31,7 @@ type MirrorClient interface { // pod or returns an error. The mirror pod will have the same annotations // as the given pod as well as an extra annotation containing the hash of // the static pod. - CreateMirrorPod(pod *api.Pod) error + CreateMirrorPod(pod *v1.Pod) error // DeleteMirrorPod deletes the mirror pod with the given full name from // the API server or returns an error. DeleteMirrorPod(podFullName string) error @@ -49,7 +49,7 @@ func NewBasicMirrorClient(apiserverClient clientset.Interface) MirrorClient { return &basicMirrorClient{apiserverClient: apiserverClient} } -func (mc *basicMirrorClient) CreateMirrorPod(pod *api.Pod) error { +func (mc *basicMirrorClient) CreateMirrorPod(pod *v1.Pod) error { if mc.apiserverClient == nil { return nil } @@ -83,28 +83,28 @@ func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string) error { } glog.V(2).Infof("Deleting a mirror pod %q", podFullName) // TODO(random-liu): Delete the mirror pod with uid precondition in mirror pod manager - if err := mc.apiserverClient.Core().Pods(namespace).Delete(name, api.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) { + if err := mc.apiserverClient.Core().Pods(namespace).Delete(name, v1.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) { glog.Errorf("Failed deleting a mirror pod %q: %v", podFullName, err) } return nil } -func IsStaticPod(pod *api.Pod) bool { +func IsStaticPod(pod *v1.Pod) bool { source, err := kubetypes.GetPodSource(pod) return err == nil && source != kubetypes.ApiserverSource } -func IsMirrorPod(pod *api.Pod) bool { +func IsMirrorPod(pod *v1.Pod) bool { _, ok := pod.Annotations[kubetypes.ConfigMirrorAnnotationKey] return ok } -func getHashFromMirrorPod(pod *api.Pod) (string, bool) { +func getHashFromMirrorPod(pod *v1.Pod) (string, bool) { hash, ok := pod.Annotations[kubetypes.ConfigMirrorAnnotationKey] return hash, ok } -func getPodHash(pod *api.Pod) string { +func getPodHash(pod *v1.Pod) string { // The annotation exists for all static pods. return pod.Annotations[kubetypes.ConfigHashAnnotationKey] } diff --git a/pkg/kubelet/pod/pod_manager.go b/pkg/kubelet/pod/pod_manager.go index 712a37d867b..800887e23e2 100644 --- a/pkg/kubelet/pod/pod_manager.go +++ b/pkg/kubelet/pod/pod_manager.go @@ -19,7 +19,7 @@ package pod import ( "sync" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/types" ) @@ -41,35 +41,35 @@ import ( // will also be removed. type Manager interface { // GetPods returns the regular pods bound to the kubelet and their spec. - GetPods() []*api.Pod + GetPods() []*v1.Pod // GetPodByName returns the (non-mirror) pod that matches full name, as well as // whether the pod was found. - GetPodByFullName(podFullName string) (*api.Pod, bool) + GetPodByFullName(podFullName string) (*v1.Pod, bool) // GetPodByName provides the (non-mirror) pod that matches namespace and // name, as well as whether the pod was found. - GetPodByName(namespace, name string) (*api.Pod, bool) + GetPodByName(namespace, name string) (*v1.Pod, bool) // GetPodByUID provides the (non-mirror) pod that matches pod UID, as well as // whether the pod is found. - GetPodByUID(types.UID) (*api.Pod, bool) + GetPodByUID(types.UID) (*v1.Pod, bool) // GetPodByMirrorPod returns the static pod for the given mirror pod and // whether it was known to the pod manger. - GetPodByMirrorPod(*api.Pod) (*api.Pod, bool) + GetPodByMirrorPod(*v1.Pod) (*v1.Pod, bool) // GetMirrorPodByPod returns the mirror pod for the given static pod and // whether it was known to the pod manager. - GetMirrorPodByPod(*api.Pod) (*api.Pod, bool) + GetMirrorPodByPod(*v1.Pod) (*v1.Pod, bool) // GetPodsAndMirrorPods returns the both regular and mirror pods. - GetPodsAndMirrorPods() ([]*api.Pod, []*api.Pod) + GetPodsAndMirrorPods() ([]*v1.Pod, []*v1.Pod) // SetPods replaces the internal pods with the new pods. // It is currently only used for testing. - SetPods(pods []*api.Pod) + SetPods(pods []*v1.Pod) // AddPod adds the given pod to the manager. - AddPod(pod *api.Pod) + AddPod(pod *v1.Pod) // UpdatePod updates the given pod in the manager. - UpdatePod(pod *api.Pod) + UpdatePod(pod *v1.Pod) // DeletePod deletes the given pod from the manager. For mirror pods, // this means deleting the mappings related to mirror pods. For non- // mirror pods, this means deleting from indexes for all non-mirror pods. - DeletePod(pod *api.Pod) + DeletePod(pod *v1.Pod) // DeleteOrphanedMirrorPods deletes all mirror pods which do not have // associated static pods. This method sends deletion requests to the API // server, but does NOT modify the internal pod storage in basicManager. @@ -87,7 +87,7 @@ type Manager interface { GetUIDTranslations() (podToMirror, mirrorToPod map[types.UID]types.UID) // IsMirrorPodOf returns true if mirrorPod is a correct representation of // pod; false otherwise. - IsMirrorPodOf(mirrorPod, pod *api.Pod) bool + IsMirrorPodOf(mirrorPod, pod *v1.Pod) bool MirrorClient } @@ -101,13 +101,13 @@ type basicManager struct { lock sync.RWMutex // Regular pods indexed by UID. - podByUID map[types.UID]*api.Pod + podByUID map[types.UID]*v1.Pod // Mirror pods indexed by UID. - mirrorPodByUID map[types.UID]*api.Pod + mirrorPodByUID map[types.UID]*v1.Pod // Pods indexed by full name for easy access. - podByFullName map[string]*api.Pod - mirrorPodByFullName map[string]*api.Pod + podByFullName map[string]*v1.Pod + mirrorPodByFullName map[string]*v1.Pod // Mirror pod UID to pod UID map. translationByUID map[types.UID]types.UID @@ -125,24 +125,24 @@ func NewBasicPodManager(client MirrorClient) Manager { } // Set the internal pods based on the new pods. -func (pm *basicManager) SetPods(newPods []*api.Pod) { +func (pm *basicManager) SetPods(newPods []*v1.Pod) { pm.lock.Lock() defer pm.lock.Unlock() - pm.podByUID = make(map[types.UID]*api.Pod) - pm.podByFullName = make(map[string]*api.Pod) - pm.mirrorPodByUID = make(map[types.UID]*api.Pod) - pm.mirrorPodByFullName = make(map[string]*api.Pod) + pm.podByUID = make(map[types.UID]*v1.Pod) + pm.podByFullName = make(map[string]*v1.Pod) + pm.mirrorPodByUID = make(map[types.UID]*v1.Pod) + pm.mirrorPodByFullName = make(map[string]*v1.Pod) pm.translationByUID = make(map[types.UID]types.UID) pm.updatePodsInternal(newPods...) } -func (pm *basicManager) AddPod(pod *api.Pod) { +func (pm *basicManager) AddPod(pod *v1.Pod) { pm.UpdatePod(pod) } -func (pm *basicManager) UpdatePod(pod *api.Pod) { +func (pm *basicManager) UpdatePod(pod *v1.Pod) { pm.lock.Lock() defer pm.lock.Unlock() pm.updatePodsInternal(pod) @@ -151,7 +151,7 @@ func (pm *basicManager) UpdatePod(pod *api.Pod) { // updatePodsInternal replaces the given pods in the current state of the // manager, updating the various indices. The caller is assumed to hold the // lock. -func (pm *basicManager) updatePodsInternal(pods ...*api.Pod) { +func (pm *basicManager) updatePodsInternal(pods ...*v1.Pod) { for _, pod := range pods { podFullName := kubecontainer.GetPodFullName(pod) if IsMirrorPod(pod) { @@ -170,7 +170,7 @@ func (pm *basicManager) updatePodsInternal(pods ...*api.Pod) { } } -func (pm *basicManager) DeletePod(pod *api.Pod) { +func (pm *basicManager) DeletePod(pod *v1.Pod) { pm.lock.Lock() defer pm.lock.Unlock() podFullName := kubecontainer.GetPodFullName(pod) @@ -184,13 +184,13 @@ func (pm *basicManager) DeletePod(pod *api.Pod) { } } -func (pm *basicManager) GetPods() []*api.Pod { +func (pm *basicManager) GetPods() []*v1.Pod { pm.lock.RLock() defer pm.lock.RUnlock() return podsMapToPods(pm.podByUID) } -func (pm *basicManager) GetPodsAndMirrorPods() ([]*api.Pod, []*api.Pod) { +func (pm *basicManager) GetPodsAndMirrorPods() ([]*v1.Pod, []*v1.Pod) { pm.lock.RLock() defer pm.lock.RUnlock() pods := podsMapToPods(pm.podByUID) @@ -198,19 +198,19 @@ func (pm *basicManager) GetPodsAndMirrorPods() ([]*api.Pod, []*api.Pod) { return pods, mirrorPods } -func (pm *basicManager) GetPodByUID(uid types.UID) (*api.Pod, bool) { +func (pm *basicManager) GetPodByUID(uid types.UID) (*v1.Pod, bool) { pm.lock.RLock() defer pm.lock.RUnlock() pod, ok := pm.podByUID[uid] return pod, ok } -func (pm *basicManager) GetPodByName(namespace, name string) (*api.Pod, bool) { +func (pm *basicManager) GetPodByName(namespace, name string) (*v1.Pod, bool) { podFullName := kubecontainer.BuildPodFullName(name, namespace) return pm.GetPodByFullName(podFullName) } -func (pm *basicManager) GetPodByFullName(podFullName string) (*api.Pod, bool) { +func (pm *basicManager) GetPodByFullName(podFullName string) (*v1.Pod, bool) { pm.lock.RLock() defer pm.lock.RUnlock() pod, ok := pm.podByFullName[podFullName] @@ -273,7 +273,7 @@ func (pm *basicManager) DeleteOrphanedMirrorPods() { } } -func (pm *basicManager) IsMirrorPodOf(mirrorPod, pod *api.Pod) bool { +func (pm *basicManager) IsMirrorPodOf(mirrorPod, pod *v1.Pod) bool { // Check name and namespace first. if pod.Name != mirrorPod.Name || pod.Namespace != mirrorPod.Namespace { return false @@ -285,22 +285,22 @@ func (pm *basicManager) IsMirrorPodOf(mirrorPod, pod *api.Pod) bool { return hash == getPodHash(pod) } -func podsMapToPods(UIDMap map[types.UID]*api.Pod) []*api.Pod { - pods := make([]*api.Pod, 0, len(UIDMap)) +func podsMapToPods(UIDMap map[types.UID]*v1.Pod) []*v1.Pod { + pods := make([]*v1.Pod, 0, len(UIDMap)) for _, pod := range UIDMap { pods = append(pods, pod) } return pods } -func (pm *basicManager) GetMirrorPodByPod(pod *api.Pod) (*api.Pod, bool) { +func (pm *basicManager) GetMirrorPodByPod(pod *v1.Pod) (*v1.Pod, bool) { pm.lock.RLock() defer pm.lock.RUnlock() mirrorPod, ok := pm.mirrorPodByFullName[kubecontainer.GetPodFullName(pod)] return mirrorPod, ok } -func (pm *basicManager) GetPodByMirrorPod(mirrorPod *api.Pod) (*api.Pod, bool) { +func (pm *basicManager) GetPodByMirrorPod(mirrorPod *v1.Pod) (*v1.Pod, bool) { pm.lock.RLock() defer pm.lock.RUnlock() pod, ok := pm.podByFullName[kubecontainer.GetPodFullName(mirrorPod)] diff --git a/pkg/kubelet/pod/pod_manager_test.go b/pkg/kubelet/pod/pod_manager_test.go index 1d3e61b303c..594a4de11e5 100644 --- a/pkg/kubelet/pod/pod_manager_test.go +++ b/pkg/kubelet/pod/pod_manager_test.go @@ -20,7 +20,7 @@ import ( "reflect" "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -35,8 +35,8 @@ func newTestManager() (*basicManager, *podtest.FakeMirrorClient) { // Tests that pods/maps are properly set after the pod update, and the basic // methods work correctly. func TestGetSetPods(t *testing.T) { - mirrorPod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + mirrorPod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "987654321", Name: "bar", Namespace: "default", @@ -46,8 +46,8 @@ func TestGetSetPods(t *testing.T) { }, }, } - staticPod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + staticPod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "123456789", Name: "bar", Namespace: "default", @@ -55,9 +55,9 @@ func TestGetSetPods(t *testing.T) { }, } - expectedPods := []*api.Pod{ + expectedPods := []*v1.Pod{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ UID: "999999999", Name: "taco", Namespace: "default", diff --git a/pkg/kubelet/pod/testing/fake_mirror_client.go b/pkg/kubelet/pod/testing/fake_mirror_client.go index 72563485a39..0e9ff513db2 100644 --- a/pkg/kubelet/pod/testing/fake_mirror_client.go +++ b/pkg/kubelet/pod/testing/fake_mirror_client.go @@ -19,7 +19,7 @@ package testing import ( "sync" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/util/sets" ) @@ -41,7 +41,7 @@ func NewFakeMirrorClient() *FakeMirrorClient { return &m } -func (fmc *FakeMirrorClient) CreateMirrorPod(pod *api.Pod) error { +func (fmc *FakeMirrorClient) CreateMirrorPod(pod *v1.Pod) error { fmc.mirrorPodLock.Lock() defer fmc.mirrorPodLock.Unlock() podFullName := kubecontainer.GetPodFullName(pod) diff --git a/pkg/kubelet/pod_workers.go b/pkg/kubelet/pod_workers.go index b6c81b4a7c7..00809ae11ea 100644 --- a/pkg/kubelet/pod_workers.go +++ b/pkg/kubelet/pod_workers.go @@ -22,7 +22,7 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" @@ -39,7 +39,7 @@ import ( type OnCompleteFunc func(err error) // PodStatusFunc is a function that is invoked to generate a pod status. -type PodStatusFunc func(pod *api.Pod, podStatus *kubecontainer.PodStatus) api.PodStatus +type PodStatusFunc func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus // KillPodOptions are options when performing a pod update whose update type is kill. type KillPodOptions struct { @@ -52,9 +52,9 @@ type KillPodOptions struct { // UpdatePodOptions is an options struct to pass to a UpdatePod operation. type UpdatePodOptions struct { // pod to update - Pod *api.Pod + Pod *v1.Pod // the mirror pod for the pod to update, if it is a static pod - MirrorPod *api.Pod + MirrorPod *v1.Pod // the type of update (create, update, sync, kill) UpdateType kubetypes.SyncPodType // optional callback function when operation completes @@ -77,9 +77,9 @@ type PodWorkers interface { // syncPodOptions provides the arguments to a SyncPod operation. type syncPodOptions struct { // the mirror pod for the pod to sync, if it is a static pod - mirrorPod *api.Pod + mirrorPod *v1.Pod // pod to sync - pod *api.Pod + pod *v1.Pod // the type of update (create, update, sync) updateType kubetypes.SyncPodType // the current status @@ -182,7 +182,7 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan UpdatePodOptions) { } if err != nil { glog.Errorf("Error syncing pod %s, skipping: %v", update.Pod.UID, err) - p.recorder.Eventf(update.Pod, api.EventTypeWarning, events.FailedSync, "Error syncing pod, skipping: %v", err) + p.recorder.Eventf(update.Pod, v1.EventTypeWarning, events.FailedSync, "Error syncing pod, skipping: %v", err) } p.wrapUp(update.Pod.UID, err) } @@ -283,7 +283,7 @@ func (p *podWorkers) checkForUpdates(uid types.UID) { // killPodNow returns a KillPodFunc that can be used to kill a pod. // It is intended to be injected into other modules that need to kill a pod. func killPodNow(podWorkers PodWorkers, recorder record.EventRecorder) eviction.KillPodFunc { - return func(pod *api.Pod, status api.PodStatus, gracePeriodOverride *int64) error { + return func(pod *v1.Pod, status v1.PodStatus, gracePeriodOverride *int64) error { // determine the grace period to use when killing the pod gracePeriod := int64(0) if gracePeriodOverride != nil { @@ -313,7 +313,7 @@ func killPodNow(podWorkers PodWorkers, recorder record.EventRecorder) eviction.K ch <- response{err: err} }, KillPodOptions: &KillPodOptions{ - PodStatusFunc: func(p *api.Pod, podStatus *kubecontainer.PodStatus) api.PodStatus { + PodStatusFunc: func(p *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus { return status }, PodTerminationGracePeriodSecondsOverride: gracePeriodOverride, @@ -325,7 +325,7 @@ func killPodNow(podWorkers PodWorkers, recorder record.EventRecorder) eviction.K case r := <-ch: return r.err case <-time.After(timeoutDuration): - recorder.Eventf(pod, api.EventTypeWarning, events.ExceededGracePeriod, "Container runtime did not kill the pod within specified grace period.") + recorder.Eventf(pod, v1.EventTypeWarning, events.ExceededGracePeriod, "Container runtime did not kill the pod within specified grace period.") return fmt.Errorf("timeout waiting to kill pod") } } diff --git a/pkg/kubelet/pod_workers_test.go b/pkg/kubelet/pod_workers_test.go index 3231776aa45..a6946528b65 100644 --- a/pkg/kubelet/pod_workers_test.go +++ b/pkg/kubelet/pod_workers_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" @@ -64,9 +64,9 @@ type TestingInterface interface { Errorf(format string, args ...interface{}) } -func newPod(uid, name string) *api.Pod { - return &api.Pod{ - ObjectMeta: api.ObjectMeta{ +func newPod(uid, name string) *v1.Pod { + return &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: types.UID(uid), Name: name, }, @@ -239,8 +239,8 @@ func TestForgetNonExistingPodWorkers(t *testing.T) { } type simpleFakeKubelet struct { - pod *api.Pod - mirrorPod *api.Pod + pod *v1.Pod + mirrorPod *v1.Pod podStatus *kubecontainer.PodStatus wg sync.WaitGroup } @@ -283,12 +283,12 @@ func TestFakePodWorkers(t *testing.T) { fakePodWorkers := &fakePodWorkers{kubeletForFakeWorkers.syncPod, fakeCache, t} tests := []struct { - pod *api.Pod - mirrorPod *api.Pod + pod *v1.Pod + mirrorPod *v1.Pod }{ { - &api.Pod{}, - &api.Pod{}, + &v1.Pod{}, + &v1.Pod{}, }, { podWithUidNameNs("12345678", "foo", "new"), @@ -336,7 +336,7 @@ func TestKillPodNowFunc(t *testing.T) { killPodFunc := killPodNow(podWorkers, fakeRecorder) pod := newPod("test", "test") gracePeriodOverride := int64(0) - err := killPodFunc(pod, api.PodStatus{Phase: api.PodFailed, Reason: "reason", Message: "message"}, &gracePeriodOverride) + err := killPodFunc(pod, v1.PodStatus{Phase: v1.PodFailed, Reason: "reason", Message: "message"}, &gracePeriodOverride) if err != nil { t.Errorf("Unexpected error: %v", err) } diff --git a/pkg/kubelet/prober/common_test.go b/pkg/kubelet/prober/common_test.go index 71f43f98a25..b4ea6f0d45c 100644 --- a/pkg/kubelet/prober/common_test.go +++ b/pkg/kubelet/prober/common_test.go @@ -20,9 +20,9 @@ import ( "reflect" "sync" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubepod "k8s.io/kubernetes/pkg/kubelet/pod" @@ -39,27 +39,27 @@ const ( var testContainerID = kubecontainer.ContainerID{Type: "test", ID: "cOnTaInEr_Id"} -func getTestRunningStatus() api.PodStatus { - containerStatus := api.ContainerStatus{ +func getTestRunningStatus() v1.PodStatus { + containerStatus := v1.ContainerStatus{ Name: testContainerName, ContainerID: testContainerID.String(), } - containerStatus.State.Running = &api.ContainerStateRunning{StartedAt: unversioned.Now()} - podStatus := api.PodStatus{ - Phase: api.PodRunning, - ContainerStatuses: []api.ContainerStatus{containerStatus}, + containerStatus.State.Running = &v1.ContainerStateRunning{StartedAt: unversioned.Now()} + podStatus := v1.PodStatus{ + Phase: v1.PodRunning, + ContainerStatuses: []v1.ContainerStatus{containerStatus}, } return podStatus } -func getTestPod() *api.Pod { - container := api.Container{ +func getTestPod() *v1.Pod { + container := v1.Container{ Name: testContainerName, } - pod := api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{container}, - RestartPolicy: api.RestartPolicyNever, + pod := v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{container}, + RestartPolicy: v1.RestartPolicyNever, }, } pod.Name = "testPod" @@ -67,10 +67,10 @@ func getTestPod() *api.Pod { return &pod } -func setTestProbe(pod *api.Pod, probeType probeType, probeSpec api.Probe) { +func setTestProbe(pod *v1.Pod, probeType probeType, probeSpec v1.Probe) { // All tests rely on the fake exec prober. - probeSpec.Handler = api.Handler{ - Exec: &api.ExecAction{}, + probeSpec.Handler = v1.Handler{ + Exec: &v1.ExecAction{}, } // Apply test defaults, overwridden for test speed. @@ -97,7 +97,7 @@ func setTestProbe(pod *api.Pod, probeType probeType, probeSpec api.Probe) { func newTestManager() *manager { refManager := kubecontainer.NewRefManager() - refManager.SetRef(testContainerID, &api.ObjectReference{}) // Suppress prober warnings. + refManager.SetRef(testContainerID, &v1.ObjectReference{}) // Suppress prober warnings. podManager := kubepod.NewBasicPodManager(nil) // Add test pod to pod manager, so that status manager can get the pod from pod manager if needed. podManager.AddPod(getTestPod()) @@ -113,7 +113,7 @@ func newTestManager() *manager { return m } -func newTestWorker(m *manager, probeType probeType, probeSpec api.Probe) *worker { +func newTestWorker(m *manager, probeType probeType, probeSpec v1.Probe) *worker { pod := getTestPod() setTestProbe(pod, probeType, probeSpec) return newWorker(m, probeType, pod, pod.Spec.Containers[0]) diff --git a/pkg/kubelet/prober/prober.go b/pkg/kubelet/prober/prober.go index 248916062c5..941520cd309 100644 --- a/pkg/kubelet/prober/prober.go +++ b/pkg/kubelet/prober/prober.go @@ -26,7 +26,7 @@ import ( "strings" "time" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" @@ -73,8 +73,8 @@ func newProber( } // probe probes the container. -func (pb *prober) probe(probeType probeType, pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID) (results.Result, error) { - var probeSpec *api.Probe +func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (results.Result, error) { + var probeSpec *v1.Probe switch probeType { case readiness: probeSpec = container.ReadinessProbe @@ -100,12 +100,12 @@ func (pb *prober) probe(probeType probeType, pod *api.Pod, status api.PodStatus, if err != nil { glog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err) if hasRef { - pb.recorder.Eventf(ref, api.EventTypeWarning, events.ContainerUnhealthy, "%s probe errored: %v", probeType, err) + pb.recorder.Eventf(ref, v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe errored: %v", probeType, err) } } else { // result != probe.Success glog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output) if hasRef { - pb.recorder.Eventf(ref, api.EventTypeWarning, events.ContainerUnhealthy, "%s probe failed: %s", probeType, output) + pb.recorder.Eventf(ref, v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe failed: %s", probeType, output) } } return results.Failure, err @@ -116,7 +116,7 @@ func (pb *prober) probe(probeType probeType, pod *api.Pod, status api.PodStatus, // runProbeWithRetries tries to probe the container in a finite loop, it returns the last result // if it never succeeds. -func (pb *prober) runProbeWithRetries(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) { +func (pb *prober) runProbeWithRetries(p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) { var err error var result probe.Result var output string @@ -131,7 +131,7 @@ func (pb *prober) runProbeWithRetries(p *api.Probe, pod *api.Pod, status api.Pod // buildHeaderMap takes a list of HTTPHeader string // pairs and returns a populated string->[]string http.Header map. -func buildHeader(headerList []api.HTTPHeader) http.Header { +func buildHeader(headerList []v1.HTTPHeader) http.Header { headers := make(http.Header) for _, header := range headerList { headers[header.Name] = append(headers[header.Name], header.Value) @@ -139,7 +139,7 @@ func buildHeader(headerList []api.HTTPHeader) http.Header { return headers } -func (pb *prober) runProbe(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) { +func (pb *prober) runProbe(p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) { timeout := time.Duration(p.TimeoutSeconds) * time.Second if p.Exec != nil { glog.V(4).Infof("Exec-Probe Pod: %v, Container: %v, Command: %v", pod, container, p.Exec.Command) @@ -174,7 +174,7 @@ func (pb *prober) runProbe(p *api.Probe, pod *api.Pod, status api.PodStatus, con return probe.Unknown, "", fmt.Errorf("Missing probe handler for %s:%s", format.Pod(pod), container.Name) } -func extractPort(param intstr.IntOrString, container api.Container) (int, error) { +func extractPort(param intstr.IntOrString, container v1.Container) (int, error) { port := -1 var err error switch param.Type { @@ -197,7 +197,7 @@ func extractPort(param intstr.IntOrString, container api.Container) (int, error) } // findPortByName is a helper function to look up a port in a container by name. -func findPortByName(container api.Container, portName string) (int, error) { +func findPortByName(container v1.Container, portName string) (int, error) { for _, port := range container.Ports { if port.Name == portName { return int(port.ContainerPort), nil @@ -226,7 +226,7 @@ type execInContainer struct { run func() ([]byte, error) } -func (pb *prober) newExecInContainer(container api.Container, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) exec.Cmd { +func (pb *prober) newExecInContainer(container v1.Container, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) exec.Cmd { return execInContainer{func() ([]byte, error) { return pb.runner.RunInContainer(containerID, cmd, timeout) }} diff --git a/pkg/kubelet/prober/prober_manager.go b/pkg/kubelet/prober/prober_manager.go index 32f05fd7070..09f942b025d 100644 --- a/pkg/kubelet/prober/prober_manager.go +++ b/pkg/kubelet/prober/prober_manager.go @@ -20,7 +20,7 @@ import ( "sync" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/prober/results" @@ -39,19 +39,19 @@ import ( type Manager interface { // AddPod creates new probe workers for every container probe. This should be called for every // pod created. - AddPod(pod *api.Pod) + AddPod(pod *v1.Pod) // RemovePod handles cleaning up the removed pod state, including terminating probe workers and // deleting cached results. - RemovePod(pod *api.Pod) + RemovePod(pod *v1.Pod) // CleanupPods handles cleaning up pods which should no longer be running. // It takes a list of "active pods" which should not be cleaned up. - CleanupPods(activePods []*api.Pod) + CleanupPods(activePods []*v1.Pod) // UpdatePodStatus modifies the given PodStatus with the appropriate Ready state for each // container based on container running status, cached probe results and worker states. - UpdatePodStatus(types.UID, *api.PodStatus) + UpdatePodStatus(types.UID, *v1.PodStatus) // Start starts the Manager sync loops. Start() @@ -127,7 +127,7 @@ func (t probeType) String() string { } } -func (m *manager) AddPod(pod *api.Pod) { +func (m *manager) AddPod(pod *v1.Pod) { m.workerLock.Lock() defer m.workerLock.Unlock() @@ -161,7 +161,7 @@ func (m *manager) AddPod(pod *api.Pod) { } } -func (m *manager) RemovePod(pod *api.Pod) { +func (m *manager) RemovePod(pod *v1.Pod) { m.workerLock.RLock() defer m.workerLock.RUnlock() @@ -177,7 +177,7 @@ func (m *manager) RemovePod(pod *api.Pod) { } } -func (m *manager) CleanupPods(activePods []*api.Pod) { +func (m *manager) CleanupPods(activePods []*v1.Pod) { desiredPods := make(map[types.UID]sets.Empty) for _, pod := range activePods { desiredPods[pod.UID] = sets.Empty{} @@ -193,7 +193,7 @@ func (m *manager) CleanupPods(activePods []*api.Pod) { } } -func (m *manager) UpdatePodStatus(podUID types.UID, podStatus *api.PodStatus) { +func (m *manager) UpdatePodStatus(podUID types.UID, podStatus *v1.PodStatus) { for i, c := range podStatus.ContainerStatuses { var ready bool if c.State.Running == nil { diff --git a/pkg/kubelet/prober/prober_manager_test.go b/pkg/kubelet/prober/prober_manager_test.go index 7585ba3898e..244f184c2ca 100644 --- a/pkg/kubelet/prober/prober_manager_test.go +++ b/pkg/kubelet/prober/prober_manager_test.go @@ -23,7 +23,7 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/prober/results" "k8s.io/kubernetes/pkg/probe" @@ -36,9 +36,9 @@ func init() { runtime.ReallyCrash = true } -var defaultProbe *api.Probe = &api.Probe{ - Handler: api.Handler{ - Exec: &api.ExecAction{}, +var defaultProbe *v1.Probe = &v1.Probe{ + Handler: v1.Handler{ + Exec: &v1.ExecAction{}, }, TimeoutSeconds: 1, PeriodSeconds: 1, @@ -47,12 +47,12 @@ var defaultProbe *api.Probe = &api.Probe{ } func TestAddRemovePods(t *testing.T) { - noProbePod := api.Pod{ - ObjectMeta: api.ObjectMeta{ + noProbePod := v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "no_probe_pod", }, - Spec: api.PodSpec{ - Containers: []api.Container{{ + Spec: v1.PodSpec{ + Containers: []v1.Container{{ Name: "no_probe1", }, { Name: "no_probe2", @@ -60,12 +60,12 @@ func TestAddRemovePods(t *testing.T) { }, } - probePod := api.Pod{ - ObjectMeta: api.ObjectMeta{ + probePod := v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "probe_pod", }, - Spec: api.PodSpec{ - Containers: []api.Container{{ + Spec: v1.PodSpec{ + Containers: []v1.Container{{ Name: "no_probe1", }, { Name: "readiness", @@ -126,12 +126,12 @@ func TestAddRemovePods(t *testing.T) { func TestCleanupPods(t *testing.T) { m := newTestManager() defer cleanup(t, m) - podToCleanup := api.Pod{ - ObjectMeta: api.ObjectMeta{ + podToCleanup := v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "pod_cleanup", }, - Spec: api.PodSpec{ - Containers: []api.Container{{ + Spec: v1.PodSpec{ + Containers: []v1.Container{{ Name: "prober1", ReadinessProbe: defaultProbe, }, { @@ -140,12 +140,12 @@ func TestCleanupPods(t *testing.T) { }}, }, } - podToKeep := api.Pod{ - ObjectMeta: api.ObjectMeta{ + podToKeep := v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "pod_keep", }, - Spec: api.PodSpec{ - Containers: []api.Container{{ + Spec: v1.PodSpec{ + Containers: []v1.Container{{ Name: "prober1", ReadinessProbe: defaultProbe, }, { @@ -157,7 +157,7 @@ func TestCleanupPods(t *testing.T) { m.AddPod(&podToCleanup) m.AddPod(&podToKeep) - m.CleanupPods([]*api.Pod{&podToKeep}) + m.CleanupPods([]*v1.Pod{&podToKeep}) removedProbes := []probeKey{ {"pod_cleanup", "prober1", readiness}, @@ -178,9 +178,9 @@ func TestCleanupPods(t *testing.T) { func TestCleanupRepeated(t *testing.T) { m := newTestManager() defer cleanup(t, m) - podTemplate := api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{{ + podTemplate := v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{{ Name: "prober1", ReadinessProbe: defaultProbe, LivenessProbe: defaultProbe, @@ -196,49 +196,49 @@ func TestCleanupRepeated(t *testing.T) { } for i := 0; i < 10; i++ { - m.CleanupPods([]*api.Pod{}) + m.CleanupPods([]*v1.Pod{}) } } func TestUpdatePodStatus(t *testing.T) { - unprobed := api.ContainerStatus{ + unprobed := v1.ContainerStatus{ Name: "unprobed_container", ContainerID: "test://unprobed_container_id", - State: api.ContainerState{ - Running: &api.ContainerStateRunning{}, + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, }, } - probedReady := api.ContainerStatus{ + probedReady := v1.ContainerStatus{ Name: "probed_container_ready", ContainerID: "test://probed_container_ready_id", - State: api.ContainerState{ - Running: &api.ContainerStateRunning{}, + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, }, } - probedPending := api.ContainerStatus{ + probedPending := v1.ContainerStatus{ Name: "probed_container_pending", ContainerID: "test://probed_container_pending_id", - State: api.ContainerState{ - Running: &api.ContainerStateRunning{}, + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, }, } - probedUnready := api.ContainerStatus{ + probedUnready := v1.ContainerStatus{ Name: "probed_container_unready", ContainerID: "test://probed_container_unready_id", - State: api.ContainerState{ - Running: &api.ContainerStateRunning{}, + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, }, } - terminated := api.ContainerStatus{ + terminated := v1.ContainerStatus{ Name: "terminated_container", ContainerID: "test://terminated_container_id", - State: api.ContainerState{ - Terminated: &api.ContainerStateTerminated{}, + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{}, }, } - podStatus := api.PodStatus{ - Phase: api.PodRunning, - ContainerStatuses: []api.ContainerStatus{ + podStatus := v1.PodStatus{ + Phase: v1.PodRunning, + ContainerStatuses: []v1.ContainerStatus{ unprobed, probedReady, probedPending, probedUnready, terminated, }, } @@ -254,9 +254,9 @@ func TestUpdatePodStatus(t *testing.T) { probeKey{testPodUID, probedUnready.Name, readiness}: {}, probeKey{testPodUID, terminated.Name, readiness}: {}, } - m.readinessManager.Set(kubecontainer.ParseContainerID(probedReady.ContainerID), results.Success, &api.Pod{}) - m.readinessManager.Set(kubecontainer.ParseContainerID(probedUnready.ContainerID), results.Failure, &api.Pod{}) - m.readinessManager.Set(kubecontainer.ParseContainerID(terminated.ContainerID), results.Success, &api.Pod{}) + m.readinessManager.Set(kubecontainer.ParseContainerID(probedReady.ContainerID), results.Success, &v1.Pod{}) + m.readinessManager.Set(kubecontainer.ParseContainerID(probedUnready.ContainerID), results.Failure, &v1.Pod{}) + m.readinessManager.Set(kubecontainer.ParseContainerID(terminated.ContainerID), results.Success, &v1.Pod{}) m.UpdatePodStatus(testPodUID, &podStatus) @@ -281,7 +281,7 @@ func TestUpdatePodStatus(t *testing.T) { func TestUpdateReadiness(t *testing.T) { testPod := getTestPod() - setTestProbe(testPod, readiness, api.Probe{}) + setTestProbe(testPod, readiness, v1.Probe{}) m := newTestManager() defer cleanup(t, m) @@ -291,7 +291,7 @@ func TestUpdateReadiness(t *testing.T) { defer func() { close(stopCh) // Send an update to exit updateReadiness() - m.readinessManager.Set(kubecontainer.ContainerID{}, results.Success, &api.Pod{}) + m.readinessManager.Set(kubecontainer.ContainerID{}, results.Success, &v1.Pod{}) }() exec := syncExecProber{} diff --git a/pkg/kubelet/prober/prober_test.go b/pkg/kubelet/prober/prober_test.go index 0c529d6c771..5cb6eea6e04 100644 --- a/pkg/kubelet/prober/prober_test.go +++ b/pkg/kubelet/prober/prober_test.go @@ -23,7 +23,7 @@ import ( "reflect" "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" @@ -54,8 +54,8 @@ func TestFormatURL(t *testing.T) { } func TestFindPortByName(t *testing.T) { - container := api.Container{ - Ports: []api.ContainerPort{ + container := v1.Container{ + Ports: []v1.ContainerPort{ { Name: "foo", ContainerPort: 8080, @@ -75,28 +75,28 @@ func TestFindPortByName(t *testing.T) { func TestGetURLParts(t *testing.T) { testCases := []struct { - probe *api.HTTPGetAction + probe *v1.HTTPGetAction ok bool host string port int path string }{ - {&api.HTTPGetAction{Host: "", Port: intstr.FromInt(-1), Path: ""}, false, "", -1, ""}, - {&api.HTTPGetAction{Host: "", Port: intstr.FromString(""), Path: ""}, false, "", -1, ""}, - {&api.HTTPGetAction{Host: "", Port: intstr.FromString("-1"), Path: ""}, false, "", -1, ""}, - {&api.HTTPGetAction{Host: "", Port: intstr.FromString("not-found"), Path: ""}, false, "", -1, ""}, - {&api.HTTPGetAction{Host: "", Port: intstr.FromString("found"), Path: ""}, true, "127.0.0.1", 93, ""}, - {&api.HTTPGetAction{Host: "", Port: intstr.FromInt(76), Path: ""}, true, "127.0.0.1", 76, ""}, - {&api.HTTPGetAction{Host: "", Port: intstr.FromString("118"), Path: ""}, true, "127.0.0.1", 118, ""}, - {&api.HTTPGetAction{Host: "hostname", Port: intstr.FromInt(76), Path: "path"}, true, "hostname", 76, "path"}, + {&v1.HTTPGetAction{Host: "", Port: intstr.FromInt(-1), Path: ""}, false, "", -1, ""}, + {&v1.HTTPGetAction{Host: "", Port: intstr.FromString(""), Path: ""}, false, "", -1, ""}, + {&v1.HTTPGetAction{Host: "", Port: intstr.FromString("-1"), Path: ""}, false, "", -1, ""}, + {&v1.HTTPGetAction{Host: "", Port: intstr.FromString("not-found"), Path: ""}, false, "", -1, ""}, + {&v1.HTTPGetAction{Host: "", Port: intstr.FromString("found"), Path: ""}, true, "127.0.0.1", 93, ""}, + {&v1.HTTPGetAction{Host: "", Port: intstr.FromInt(76), Path: ""}, true, "127.0.0.1", 76, ""}, + {&v1.HTTPGetAction{Host: "", Port: intstr.FromString("118"), Path: ""}, true, "127.0.0.1", 118, ""}, + {&v1.HTTPGetAction{Host: "hostname", Port: intstr.FromInt(76), Path: "path"}, true, "hostname", 76, "path"}, } for _, test := range testCases { - state := api.PodStatus{PodIP: "127.0.0.1"} - container := api.Container{ - Ports: []api.ContainerPort{{Name: "found", ContainerPort: 93}}, - LivenessProbe: &api.Probe{ - Handler: api.Handler{ + state := v1.PodStatus{PodIP: "127.0.0.1"} + container := v1.Container{ + Ports: []v1.ContainerPort{{Name: "found", ContainerPort: 93}}, + LivenessProbe: &v1.Probe{ + Handler: v1.Handler{ HTTPGet: test.probe, }, }, @@ -104,7 +104,7 @@ func TestGetURLParts(t *testing.T) { scheme := test.probe.Scheme if scheme == "" { - scheme = api.URISchemeHTTP + scheme = v1.URISchemeHTTP } host := test.probe.Host if host == "" { @@ -130,26 +130,26 @@ func TestGetURLParts(t *testing.T) { func TestGetTCPAddrParts(t *testing.T) { testCases := []struct { - probe *api.TCPSocketAction + probe *v1.TCPSocketAction ok bool host string port int }{ - {&api.TCPSocketAction{Port: intstr.FromInt(-1)}, false, "", -1}, - {&api.TCPSocketAction{Port: intstr.FromString("")}, false, "", -1}, - {&api.TCPSocketAction{Port: intstr.FromString("-1")}, false, "", -1}, - {&api.TCPSocketAction{Port: intstr.FromString("not-found")}, false, "", -1}, - {&api.TCPSocketAction{Port: intstr.FromString("found")}, true, "1.2.3.4", 93}, - {&api.TCPSocketAction{Port: intstr.FromInt(76)}, true, "1.2.3.4", 76}, - {&api.TCPSocketAction{Port: intstr.FromString("118")}, true, "1.2.3.4", 118}, + {&v1.TCPSocketAction{Port: intstr.FromInt(-1)}, false, "", -1}, + {&v1.TCPSocketAction{Port: intstr.FromString("")}, false, "", -1}, + {&v1.TCPSocketAction{Port: intstr.FromString("-1")}, false, "", -1}, + {&v1.TCPSocketAction{Port: intstr.FromString("not-found")}, false, "", -1}, + {&v1.TCPSocketAction{Port: intstr.FromString("found")}, true, "1.2.3.4", 93}, + {&v1.TCPSocketAction{Port: intstr.FromInt(76)}, true, "1.2.3.4", 76}, + {&v1.TCPSocketAction{Port: intstr.FromString("118")}, true, "1.2.3.4", 118}, } for _, test := range testCases { host := "1.2.3.4" - container := api.Container{ - Ports: []api.ContainerPort{{Name: "found", ContainerPort: 93}}, - LivenessProbe: &api.Probe{ - Handler: api.Handler{ + container := v1.Container{ + Ports: []v1.ContainerPort{{Name: "found", ContainerPort: 93}}, + LivenessProbe: &v1.Probe{ + Handler: v1.Handler{ TCPSocket: test.probe, }, }, @@ -171,19 +171,19 @@ func TestGetTCPAddrParts(t *testing.T) { func TestHTTPHeaders(t *testing.T) { testCases := []struct { - input []api.HTTPHeader + input []v1.HTTPHeader output http.Header }{ - {[]api.HTTPHeader{}, http.Header{}}, - {[]api.HTTPHeader{ + {[]v1.HTTPHeader{}, http.Header{}}, + {[]v1.HTTPHeader{ {Name: "X-Muffins-Or-Cupcakes", Value: "Muffins"}, }, http.Header{"X-Muffins-Or-Cupcakes": {"Muffins"}}}, - {[]api.HTTPHeader{ + {[]v1.HTTPHeader{ {Name: "X-Muffins-Or-Cupcakes", Value: "Muffins"}, {Name: "X-Muffins-Or-Plumcakes", Value: "Muffins!"}, }, http.Header{"X-Muffins-Or-Cupcakes": {"Muffins"}, "X-Muffins-Or-Plumcakes": {"Muffins!"}}}, - {[]api.HTTPHeader{ + {[]v1.HTTPHeader{ {Name: "X-Muffins-Or-Cupcakes", Value: "Muffins"}, {Name: "X-Muffins-Or-Cupcakes", Value: "Cupcakes, too"}, }, http.Header{"X-Muffins-Or-Cupcakes": {"Muffins", "Cupcakes, too"}}}, @@ -203,13 +203,13 @@ func TestProbe(t *testing.T) { } containerID := kubecontainer.ContainerID{Type: "test", ID: "foobar"} - execProbe := &api.Probe{ - Handler: api.Handler{ - Exec: &api.ExecAction{}, + execProbe := &v1.Probe{ + Handler: v1.Handler{ + Exec: &v1.ExecAction{}, }, } tests := []struct { - probe *api.Probe + probe *v1.Probe execError bool expectError bool execResult probe.Result @@ -220,7 +220,7 @@ func TestProbe(t *testing.T) { expectedResult: results.Success, }, { // No handler - probe: &api.Probe{}, + probe: &v1.Probe{}, expectError: true, expectedResult: results.Failure, }, @@ -251,7 +251,7 @@ func TestProbe(t *testing.T) { for i, test := range tests { for _, probeType := range [...]probeType{liveness, readiness} { testID := fmt.Sprintf("%d-%s", i, probeType) - testContainer := api.Container{} + testContainer := v1.Container{} switch probeType { case liveness: testContainer.LivenessProbe = test.probe @@ -264,7 +264,7 @@ func TestProbe(t *testing.T) { prober.exec = fakeExecProber{test.execResult, nil} } - result, err := prober.probe(probeType, &api.Pod{}, api.PodStatus{}, testContainer, containerID) + result, err := prober.probe(probeType, &v1.Pod{}, v1.PodStatus{}, testContainer, containerID) if test.expectError && err == nil { t.Errorf("[%s] Expected probe error but no error was returned.", testID) } @@ -302,7 +302,7 @@ func TestNewExecInContainer(t *testing.T) { runner: runner, } - container := api.Container{} + container := v1.Container{} containerID := kubecontainer.ContainerID{Type: "docker", ID: "containerID"} cmd := []string{"/foo", "bar"} exec := prober.newExecInContainer(container, containerID, cmd, 0) diff --git a/pkg/kubelet/prober/results/results_manager.go b/pkg/kubelet/prober/results/results_manager.go index 28703c6c6bf..b7076a0bfea 100644 --- a/pkg/kubelet/prober/results/results_manager.go +++ b/pkg/kubelet/prober/results/results_manager.go @@ -19,7 +19,7 @@ package results import ( "sync" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/types" ) @@ -30,7 +30,7 @@ type Manager interface { Get(kubecontainer.ContainerID) (Result, bool) // Set sets the cached result for the container with the given ID. // The pod is only included to be sent with the update. - Set(kubecontainer.ContainerID, Result, *api.Pod) + Set(kubecontainer.ContainerID, Result, *v1.Pod) // Remove clears the cached result for the container with the given ID. Remove(kubecontainer.ContainerID) // Updates creates a channel that receives an Update whenever its result changes (but not @@ -92,7 +92,7 @@ func (m *manager) Get(id kubecontainer.ContainerID) (Result, bool) { return result, found } -func (m *manager) Set(id kubecontainer.ContainerID, result Result, pod *api.Pod) { +func (m *manager) Set(id kubecontainer.ContainerID, result Result, pod *v1.Pod) { if m.setInternal(id, result) { m.updates <- Update{id, result, pod.UID} } diff --git a/pkg/kubelet/prober/results/results_manager_test.go b/pkg/kubelet/prober/results/results_manager_test.go index 99fb0064ad9..85ae8a3ce8b 100644 --- a/pkg/kubelet/prober/results/results_manager_test.go +++ b/pkg/kubelet/prober/results/results_manager_test.go @@ -21,7 +21,7 @@ import ( "time" "github.com/stretchr/testify/assert" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/util/wait" ) @@ -35,7 +35,7 @@ func TestCacheOperations(t *testing.T) { _, found := m.Get(unsetID) assert.False(t, found, "unset result found") - m.Set(setID, Success, &api.Pod{}) + m.Set(setID, Success, &v1.Pod{}) result, found := m.Get(setID) assert.True(t, result == Success, "set result") assert.True(t, found, "set result found") @@ -48,7 +48,7 @@ func TestCacheOperations(t *testing.T) { func TestUpdates(t *testing.T) { m := NewManager() - pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "test-pod"}} + pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "test-pod"}} fooID := kubecontainer.ContainerID{Type: "test", ID: "foo"} barID := kubecontainer.ContainerID{Type: "test", ID: "bar"} diff --git a/pkg/kubelet/prober/testing/fake_manager.go b/pkg/kubelet/prober/testing/fake_manager.go index 3fc4212507c..f989cccdcdf 100644 --- a/pkg/kubelet/prober/testing/fake_manager.go +++ b/pkg/kubelet/prober/testing/fake_manager.go @@ -17,19 +17,19 @@ limitations under the License. package testing import ( - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/types" ) type FakeManager struct{} // Unused methods. -func (_ FakeManager) AddPod(_ *api.Pod) {} -func (_ FakeManager) RemovePod(_ *api.Pod) {} -func (_ FakeManager) CleanupPods(_ []*api.Pod) {} -func (_ FakeManager) Start() {} +func (_ FakeManager) AddPod(_ *v1.Pod) {} +func (_ FakeManager) RemovePod(_ *v1.Pod) {} +func (_ FakeManager) CleanupPods(_ []*v1.Pod) {} +func (_ FakeManager) Start() {} -func (_ FakeManager) UpdatePodStatus(_ types.UID, podStatus *api.PodStatus) { +func (_ FakeManager) UpdatePodStatus(_ types.UID, podStatus *v1.PodStatus) { for i := range podStatus.ContainerStatuses { podStatus.ContainerStatuses[i].Ready = true } diff --git a/pkg/kubelet/prober/worker.go b/pkg/kubelet/prober/worker.go index 1bb2e57353b..889c8733f05 100644 --- a/pkg/kubelet/prober/worker.go +++ b/pkg/kubelet/prober/worker.go @@ -21,7 +21,7 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/prober/results" "k8s.io/kubernetes/pkg/kubelet/util/format" @@ -37,13 +37,13 @@ type worker struct { stopCh chan struct{} // The pod containing this probe (read-only) - pod *api.Pod + pod *v1.Pod // The container to probe (read-only) - container api.Container + container v1.Container // Describes the probe configuration (read-only) - spec *api.Probe + spec *v1.Probe // The type of the worker. probeType probeType @@ -70,8 +70,8 @@ type worker struct { func newWorker( m *manager, probeType probeType, - pod *api.Pod, - container api.Container) *worker { + pod *v1.Pod, + container v1.Container) *worker { w := &worker{ stopCh: make(chan struct{}, 1), // Buffer so stop() can be non-blocking. @@ -149,13 +149,13 @@ func (w *worker) doProbe() (keepGoing bool) { } // Worker should terminate if pod is terminated. - if status.Phase == api.PodFailed || status.Phase == api.PodSucceeded { + if status.Phase == v1.PodFailed || status.Phase == v1.PodSucceeded { glog.V(3).Infof("Pod %v %v, exiting probe worker", format.Pod(w.pod), status.Phase) return false } - c, ok := api.GetContainerStatus(status.ContainerStatuses, w.container.Name) + c, ok := v1.GetContainerStatus(status.ContainerStatuses, w.container.Name) if !ok || len(c.ContainerID) == 0 { // Either the container has not been created yet, or it was deleted. glog.V(3).Infof("Probe target container not found: %v - %v", @@ -186,7 +186,7 @@ func (w *worker) doProbe() (keepGoing bool) { } // Abort if the container will not be restarted. return c.State.Terminated == nil || - w.pod.Spec.RestartPolicy != api.RestartPolicyNever + w.pod.Spec.RestartPolicy != v1.RestartPolicyNever } if int32(time.Since(c.State.Running.StartedAt.Time).Seconds()) < w.spec.InitialDelaySeconds { diff --git a/pkg/kubelet/prober/worker_test.go b/pkg/kubelet/prober/worker_test.go index 95ea99213b5..516a75dde4a 100644 --- a/pkg/kubelet/prober/worker_test.go +++ b/pkg/kubelet/prober/worker_test.go @@ -21,9 +21,9 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" "k8s.io/kubernetes/pkg/client/record" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubepod "k8s.io/kubernetes/pkg/kubelet/pod" @@ -48,17 +48,17 @@ func TestDoProbe(t *testing.T) { pendingStatus.ContainerStatuses[0].State.Running = nil terminatedStatus := getTestRunningStatus() terminatedStatus.ContainerStatuses[0].State.Running = nil - terminatedStatus.ContainerStatuses[0].State.Terminated = &api.ContainerStateTerminated{ + terminatedStatus.ContainerStatuses[0].State.Terminated = &v1.ContainerStateTerminated{ StartedAt: unversioned.Now(), } otherStatus := getTestRunningStatus() otherStatus.ContainerStatuses[0].Name = "otherContainer" failedStatus := getTestRunningStatus() - failedStatus.Phase = api.PodFailed + failedStatus.Phase = v1.PodFailed tests := []struct { - probe api.Probe - podStatus *api.PodStatus + probe v1.Probe + podStatus *v1.PodStatus expectContinue bool expectSet bool expectedResult results.Result @@ -90,7 +90,7 @@ func TestDoProbe(t *testing.T) { }, { // Initial delay passed podStatus: &runningStatus, - probe: api.Probe{ + probe: v1.Probe{ InitialDelaySeconds: -100, }, expectContinue: true, @@ -127,7 +127,7 @@ func TestInitialDelay(t *testing.T) { m := newTestManager() for _, probeType := range [...]probeType{liveness, readiness} { - w := newTestWorker(m, probeType, api.Probe{ + w := newTestWorker(m, probeType, v1.Probe{ InitialDelaySeconds: 10, }) m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) @@ -149,7 +149,7 @@ func TestInitialDelay(t *testing.T) { func TestFailureThreshold(t *testing.T) { m := newTestManager() - w := newTestWorker(m, readiness, api.Probe{SuccessThreshold: 1, FailureThreshold: 3}) + w := newTestWorker(m, readiness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3}) m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) for i := 0; i < 2; i++ { @@ -183,11 +183,11 @@ func TestFailureThreshold(t *testing.T) { func TestSuccessThreshold(t *testing.T) { m := newTestManager() - w := newTestWorker(m, readiness, api.Probe{SuccessThreshold: 3, FailureThreshold: 1}) + w := newTestWorker(m, readiness, v1.Probe{SuccessThreshold: 3, FailureThreshold: 1}) m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) // Start out failure. - w.resultsManager.Set(testContainerID, results.Failure, &api.Pod{}) + w.resultsManager.Set(testContainerID, results.Failure, &v1.Pod{}) for i := 0; i < 2; i++ { // Probe defaults to Failure. @@ -220,7 +220,7 @@ func TestCleanUp(t *testing.T) { for _, probeType := range [...]probeType{liveness, readiness} { key := probeKey{testPodUID, testContainerName, probeType} - w := newTestWorker(m, probeType, api.Probe{}) + w := newTestWorker(m, probeType, v1.Probe{}) m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) go w.run() m.workers[key] = w @@ -256,7 +256,7 @@ func TestHandleCrash(t *testing.T) { runtime.ReallyCrash = false // Test that we *don't* really crash. m := newTestManager() - w := newTestWorker(m, readiness, api.Probe{}) + w := newTestWorker(m, readiness, v1.Probe{}) m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) expectContinue(t, w, w.doProbe(), "Initial successful probe.") @@ -308,7 +308,7 @@ func (p crashingExecProber) Probe(_ exec.Cmd) (probe.Result, string, error) { func TestOnHoldOnLivenessCheckFailure(t *testing.T) { m := newTestManager() - w := newTestWorker(m, liveness, api.Probe{SuccessThreshold: 1, FailureThreshold: 1}) + w := newTestWorker(m, liveness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 1}) status := getTestRunningStatus() m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) diff --git a/pkg/kubelet/qos/policy.go b/pkg/kubelet/qos/policy.go index 7c142f5cd10..075bcc85439 100644 --- a/pkg/kubelet/qos/policy.go +++ b/pkg/kubelet/qos/policy.go @@ -16,9 +16,7 @@ limitations under the License. package qos -import ( - "k8s.io/kubernetes/pkg/api" -) +import "k8s.io/kubernetes/pkg/api/v1" const ( // PodInfraOOMAdj is very docker specific. For arbitrary runtime, it may not make @@ -39,7 +37,7 @@ const ( // multiplied by 10 (barring exceptional cases) + a configurable quantity which is between -1000 // and 1000. Containers with higher OOM scores are killed if the system runs out of memory. // See https://lwn.net/Articles/391222/ for more information. -func GetContainerOOMScoreAdjust(pod *api.Pod, container *api.Container, memoryCapacity int64) int { +func GetContainerOOMScoreAdjust(pod *v1.Pod, container *v1.Container, memoryCapacity int64) int { switch GetPodQOS(pod) { case Guaranteed: // Guaranteed containers should be the last to get killed. diff --git a/pkg/kubelet/qos/policy_test.go b/pkg/kubelet/qos/policy_test.go index 50c9b163497..ea35de56527 100644 --- a/pkg/kubelet/qos/policy_test.go +++ b/pkg/kubelet/qos/policy_test.go @@ -20,8 +20,8 @@ import ( "strconv" "testing" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" ) const ( @@ -29,13 +29,13 @@ const ( ) var ( - cpuLimit = api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + cpuLimit = v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { - Resources: api.ResourceRequirements{ - Limits: api.ResourceList{ - api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"), }, }, }, @@ -43,16 +43,16 @@ var ( }, } - memoryLimitCPURequest = api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + memoryLimitCPURequest = v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ - api.ResourceName(api.ResourceCPU): resource.MustParse("0"), + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("0"), }, - Limits: api.ResourceList{ - api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + Limits: v1.ResourceList{ + v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"), }, }, }, @@ -60,13 +60,13 @@ var ( }, } - zeroMemoryLimit = api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + zeroMemoryLimit = v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { - Resources: api.ResourceRequirements{ - Limits: api.ResourceList{ - api.ResourceName(api.ResourceMemory): resource.MustParse("0"), + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceName(v1.ResourceMemory): resource.MustParse("0"), }, }, }, @@ -74,28 +74,28 @@ var ( }, } - noRequestLimit = api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + noRequestLimit = v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { - Resources: api.ResourceRequirements{}, + Resources: v1.ResourceRequirements{}, }, }, }, } - equalRequestLimitCPUMemory = api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + equalRequestLimitCPUMemory = v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ - api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), - api.ResourceName(api.ResourceCPU): resource.MustParse("5m"), + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"), + v1.ResourceName(v1.ResourceCPU): resource.MustParse("5m"), }, - Limits: api.ResourceList{ - api.ResourceName(api.ResourceCPU): resource.MustParse("5m"), - api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + Limits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("5m"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"), }, }, }, @@ -103,17 +103,17 @@ var ( }, } - cpuUnlimitedMemoryLimitedWithRequests = api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + cpuUnlimitedMemoryLimitedWithRequests = v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ - api.ResourceName(api.ResourceMemory): resource.MustParse(strconv.Itoa(standardMemoryAmount / 2)), - api.ResourceName(api.ResourceCPU): resource.MustParse("5m"), + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceMemory): resource.MustParse(strconv.Itoa(standardMemoryAmount / 2)), + v1.ResourceName(v1.ResourceCPU): resource.MustParse("5m"), }, - Limits: api.ResourceList{ - api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + Limits: v1.ResourceList{ + v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"), }, }, }, @@ -121,14 +121,14 @@ var ( }, } - requestNoLimit = api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + requestNoLimit = v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ - api.ResourceName(api.ResourceMemory): resource.MustParse(strconv.Itoa(standardMemoryAmount - 1)), - api.ResourceName(api.ResourceCPU): resource.MustParse("5m"), + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceMemory): resource.MustParse(strconv.Itoa(standardMemoryAmount - 1)), + v1.ResourceName(v1.ResourceCPU): resource.MustParse("5m"), }, }, }, @@ -138,7 +138,7 @@ var ( ) type oomTest struct { - pod *api.Pod + pod *v1.Pod memoryCapacity int64 lowOOMScoreAdj int // The max oom_score_adj score the container should be assigned. highOOMScoreAdj int // The min oom_score_adj score the container should be assigned. diff --git a/pkg/kubelet/qos/qos.go b/pkg/kubelet/qos/qos.go index 00e347f9acd..f515ca30dc2 100644 --- a/pkg/kubelet/qos/qos.go +++ b/pkg/kubelet/qos/qos.go @@ -19,11 +19,12 @@ package qos import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/util/sets" ) // isResourceGuaranteed returns true if the container's resource requirements are Guaranteed. -func isResourceGuaranteed(container *api.Container, resource api.ResourceName) bool { +func isResourceGuaranteed(container *v1.Container, resource v1.ResourceName) bool { // A container resource is guaranteed if its request == limit. // If request == limit, the user is very confident of resource consumption. req, hasReq := container.Resources.Requests[resource] @@ -35,7 +36,7 @@ func isResourceGuaranteed(container *api.Container, resource api.ResourceName) b } // isResourceBestEffort returns true if the container's resource requirements are best-effort. -func isResourceBestEffort(container *api.Container, resource api.ResourceName) bool { +func isResourceBestEffort(container *v1.Container, resource v1.ResourceName) bool { // A container resource is best-effort if its request is unspecified or 0. // If a request is specified, then the user expects some kind of resource guarantee. req, hasReq := container.Resources.Requests[resource] @@ -46,9 +47,9 @@ func isResourceBestEffort(container *api.Container, resource api.ResourceName) b // A pod is besteffort if none of its containers have specified any requests or limits. // A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. // A pod is burstable if limits and requests do not match across all containers. -func GetPodQOS(pod *api.Pod) QOSClass { - requests := api.ResourceList{} - limits := api.ResourceList{} +func GetPodQOS(pod *v1.Pod) QOSClass { + requests := v1.ResourceList{} + limits := v1.ResourceList{} zeroQuantity := resource.MustParse("0") isGuaranteed := true for _, container := range pod.Spec.Containers { @@ -108,11 +109,78 @@ func GetPodQOS(pod *api.Pod) QOSClass { return Burstable } +// InternalGetPodQOS returns the QoS class of a pod. +// A pod is besteffort if none of its containers have specified any requests or limits. +// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. +// A pod is burstable if limits and requests do not match across all containers. +func InternalGetPodQOS(pod *api.Pod) QOSClass { + requests := api.ResourceList{} + limits := api.ResourceList{} + zeroQuantity := resource.MustParse("0") + isGuaranteed := true + var supportedQoSComputeResources = sets.NewString(string(api.ResourceCPU), string(api.ResourceMemory)) + for _, container := range pod.Spec.Containers { + // process requests + for name, quantity := range container.Resources.Requests { + if !supportedQoSComputeResources.Has(string(name)) { + continue + } + if quantity.Cmp(zeroQuantity) == 1 { + delta := quantity.Copy() + if _, exists := requests[name]; !exists { + requests[name] = *delta + } else { + delta.Add(requests[name]) + requests[name] = *delta + } + } + } + // process limits + qosLimitsFound := sets.NewString() + for name, quantity := range container.Resources.Limits { + if !supportedQoSComputeResources.Has(string(name)) { + continue + } + if quantity.Cmp(zeroQuantity) == 1 { + qosLimitsFound.Insert(string(name)) + delta := quantity.Copy() + if _, exists := limits[name]; !exists { + limits[name] = *delta + } else { + delta.Add(limits[name]) + limits[name] = *delta + } + } + } + + if len(qosLimitsFound) != len(supportedQoSComputeResources) { + isGuaranteed = false + } + } + if len(requests) == 0 && len(limits) == 0 { + return BestEffort + } + // Check is requests match limits for all resources. + if isGuaranteed { + for name, req := range requests { + if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 { + isGuaranteed = false + break + } + } + } + if isGuaranteed && + len(requests) == len(limits) { + return Guaranteed + } + return Burstable +} + // QOSList is a set of (resource name, QoS class) pairs. -type QOSList map[api.ResourceName]QOSClass +type QOSList map[v1.ResourceName]QOSClass // GetQOS returns a mapping of resource name to QoS class of a container -func GetQOS(container *api.Container) QOSList { +func GetQOS(container *v1.Container) QOSList { resourceToQOS := QOSList{} for resource := range allResources(container) { switch { @@ -128,13 +196,13 @@ func GetQOS(container *api.Container) QOSList { } // supportedComputeResources is the list of compute resources for with QoS is supported. -var supportedQoSComputeResources = sets.NewString(string(api.ResourceCPU), string(api.ResourceMemory)) +var supportedQoSComputeResources = sets.NewString(string(v1.ResourceCPU), string(v1.ResourceMemory)) // allResources returns a set of all possible resources whose mapped key value is true if present on the container -func allResources(container *api.Container) map[api.ResourceName]bool { - resources := map[api.ResourceName]bool{} +func allResources(container *v1.Container) map[v1.ResourceName]bool { + resources := map[v1.ResourceName]bool{} for _, resource := range supportedQoSComputeResources.List() { - resources[api.ResourceName(resource)] = false + resources[v1.ResourceName(resource)] = false } for resource := range container.Resources.Requests { resources[resource] = true diff --git a/pkg/kubelet/qos/qos_test.go b/pkg/kubelet/qos/qos_test.go index de490d44ef1..9f6ffdc7b60 100644 --- a/pkg/kubelet/qos/qos_test.go +++ b/pkg/kubelet/qos/qos_test.go @@ -19,46 +19,46 @@ package qos import ( "testing" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" ) -func getResourceList(cpu, memory string) api.ResourceList { - res := api.ResourceList{} +func getResourceList(cpu, memory string) v1.ResourceList { + res := v1.ResourceList{} if cpu != "" { - res[api.ResourceCPU] = resource.MustParse(cpu) + res[v1.ResourceCPU] = resource.MustParse(cpu) } if memory != "" { - res[api.ResourceMemory] = resource.MustParse(memory) + res[v1.ResourceMemory] = resource.MustParse(memory) } return res } -func addResource(rName, value string, rl api.ResourceList) api.ResourceList { - rl[api.ResourceName(rName)] = resource.MustParse(value) +func addResource(rName, value string, rl v1.ResourceList) v1.ResourceList { + rl[v1.ResourceName(rName)] = resource.MustParse(value) return rl } -func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements { - res := api.ResourceRequirements{} +func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements { + res := v1.ResourceRequirements{} res.Requests = requests res.Limits = limits return res } -func newContainer(name string, requests api.ResourceList, limits api.ResourceList) api.Container { - return api.Container{ +func newContainer(name string, requests v1.ResourceList, limits v1.ResourceList) v1.Container { + return v1.Container{ Name: name, Resources: getResourceRequirements(requests, limits), } } -func newPod(name string, containers []api.Container) *api.Pod { - return &api.Pod{ - ObjectMeta: api.ObjectMeta{ +func newPod(name string, containers []v1.Container) *v1.Pod { + return &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: name, }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ Containers: containers, }, } @@ -66,103 +66,103 @@ func newPod(name string, containers []api.Container) *api.Pod { func TestGetPodQOS(t *testing.T) { testCases := []struct { - pod *api.Pod + pod *v1.Pod expected QOSClass }{ { - pod: newPod("guaranteed", []api.Container{ + pod: newPod("guaranteed", []v1.Container{ newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), }), expected: Guaranteed, }, { - pod: newPod("guaranteed-with-gpu", []api.Container{ + pod: newPod("guaranteed-with-gpu", []v1.Container{ newContainer("guaranteed", getResourceList("100m", "100Mi"), addResource("nvidia-gpu", "2", getResourceList("100m", "100Mi"))), }), expected: Guaranteed, }, { - pod: newPod("guaranteed-guaranteed", []api.Container{ + pod: newPod("guaranteed-guaranteed", []v1.Container{ newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), }), expected: Guaranteed, }, { - pod: newPod("guaranteed-guaranteed-with-gpu", []api.Container{ + pod: newPod("guaranteed-guaranteed-with-gpu", []v1.Container{ newContainer("guaranteed", getResourceList("100m", "100Mi"), addResource("nvidia-gpu", "2", getResourceList("100m", "100Mi"))), newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), }), expected: Guaranteed, }, { - pod: newPod("best-effort-best-effort", []api.Container{ + pod: newPod("best-effort-best-effort", []v1.Container{ newContainer("best-effort", getResourceList("", ""), getResourceList("", "")), newContainer("best-effort", getResourceList("", ""), getResourceList("", "")), }), expected: BestEffort, }, { - pod: newPod("best-effort-best-effort-with-gpu", []api.Container{ + pod: newPod("best-effort-best-effort-with-gpu", []v1.Container{ newContainer("best-effort", getResourceList("", ""), addResource("nvidia-gpu", "2", getResourceList("", ""))), newContainer("best-effort", getResourceList("", ""), getResourceList("", "")), }), expected: BestEffort, }, { - pod: newPod("best-effort-with-gpu", []api.Container{ + pod: newPod("best-effort-with-gpu", []v1.Container{ newContainer("best-effort", getResourceList("", ""), addResource("nvidia-gpu", "2", getResourceList("", ""))), }), expected: BestEffort, }, { - pod: newPod("best-effort-burstable", []api.Container{ + pod: newPod("best-effort-burstable", []v1.Container{ newContainer("best-effort", getResourceList("", ""), addResource("nvidia-gpu", "2", getResourceList("", ""))), newContainer("burstable", getResourceList("1", ""), getResourceList("2", "")), }), expected: Burstable, }, { - pod: newPod("best-effort-guaranteed", []api.Container{ + pod: newPod("best-effort-guaranteed", []v1.Container{ newContainer("best-effort", getResourceList("", ""), addResource("nvidia-gpu", "2", getResourceList("", ""))), newContainer("guaranteed", getResourceList("10m", "100Mi"), getResourceList("10m", "100Mi")), }), expected: Burstable, }, { - pod: newPod("burstable-cpu-guaranteed-memory", []api.Container{ + pod: newPod("burstable-cpu-guaranteed-memory", []v1.Container{ newContainer("burstable", getResourceList("", "100Mi"), getResourceList("", "100Mi")), }), expected: Burstable, }, { - pod: newPod("burstable-no-limits", []api.Container{ + pod: newPod("burstable-no-limits", []v1.Container{ newContainer("burstable", getResourceList("100m", "100Mi"), getResourceList("", "")), }), expected: Burstable, }, { - pod: newPod("burstable-guaranteed", []api.Container{ + pod: newPod("burstable-guaranteed", []v1.Container{ newContainer("burstable", getResourceList("1", "100Mi"), getResourceList("2", "100Mi")), newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), }), expected: Burstable, }, { - pod: newPod("burstable-unbounded-but-requests-match-limits", []api.Container{ + pod: newPod("burstable-unbounded-but-requests-match-limits", []v1.Container{ newContainer("burstable", getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")), newContainer("burstable-unbounded", getResourceList("100m", "100Mi"), getResourceList("", "")), }), expected: Burstable, }, { - pod: newPod("burstable-1", []api.Container{ + pod: newPod("burstable-1", []v1.Container{ newContainer("burstable", getResourceList("10m", "100Mi"), getResourceList("100m", "200Mi")), }), expected: Burstable, }, { - pod: newPod("burstable-2", []api.Container{ + pod: newPod("burstable-2", []v1.Container{ newContainer("burstable", getResourceList("0", "0"), addResource("nvidia-gpu", "2", getResourceList("100m", "200Mi"))), }), expected: Burstable, diff --git a/pkg/kubelet/rkt/fake_rkt_interface_test.go b/pkg/kubelet/rkt/fake_rkt_interface_test.go index 6de54564edc..39b24135366 100644 --- a/pkg/kubelet/rkt/fake_rkt_interface_test.go +++ b/pkg/kubelet/rkt/fake_rkt_interface_test.go @@ -26,7 +26,7 @@ import ( rktapi "github.com/coreos/rkt/api/v1alpha" "golang.org/x/net/context" "google.golang.org/grpc" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/types" ) @@ -159,15 +159,15 @@ type fakeRuntimeHelper struct { err error } -func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *api.Pod, container *api.Container, podIP string) (*kubecontainer.RunContainerOptions, error) { +func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, error) { return nil, fmt.Errorf("Not implemented") } -func (f *fakeRuntimeHelper) GetClusterDNS(pod *api.Pod) ([]string, []string, error) { +func (f *fakeRuntimeHelper) GetClusterDNS(pod *v1.Pod) ([]string, []string, error) { return f.dnsServers, f.dnsSearches, f.err } -func (f *fakeRuntimeHelper) GeneratePodHostNameAndDomain(pod *api.Pod) (string, string, error) { +func (f *fakeRuntimeHelper) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) { return f.hostName, f.hostDomain, nil } @@ -175,7 +175,7 @@ func (f *fakeRuntimeHelper) GetPodDir(podUID types.UID) string { return "/poddir/" + string(podUID) } -func (f *fakeRuntimeHelper) GetExtraSupplementalGroupsForPod(pod *api.Pod) []int64 { +func (f *fakeRuntimeHelper) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 { return nil } @@ -208,14 +208,14 @@ func (f *fakeRktCli) Reset() { } type fakePodGetter struct { - pods map[types.UID]*api.Pod + pods map[types.UID]*v1.Pod } func newFakePodGetter() *fakePodGetter { - return &fakePodGetter{pods: make(map[types.UID]*api.Pod)} + return &fakePodGetter{pods: make(map[types.UID]*v1.Pod)} } -func (f fakePodGetter) GetPodByUID(uid types.UID) (*api.Pod, bool) { +func (f fakePodGetter) GetPodByUID(uid types.UID) (*v1.Pod, bool) { p, found := f.pods[uid] return p, found } diff --git a/pkg/kubelet/rkt/image.go b/pkg/kubelet/rkt/image.go index 8fefad93098..b16f53bcc05 100644 --- a/pkg/kubelet/rkt/image.go +++ b/pkg/kubelet/rkt/image.go @@ -33,7 +33,7 @@ import ( dockertypes "github.com/docker/engine-api/types" "github.com/golang/glog" "golang.org/x/net/context" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/credentialprovider" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/util/parsers" @@ -45,7 +45,7 @@ import ( // // http://issue.k8s.io/7203 // -func (r *Runtime) PullImage(image kubecontainer.ImageSpec, pullSecrets []api.Secret) error { +func (r *Runtime) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret) error { img := image.Image // TODO(yifan): The credential operation is a copy from dockertools package, // Need to resolve the code duplication. diff --git a/pkg/kubelet/rkt/log.go b/pkg/kubelet/rkt/log.go index 46543820b68..f6f1c059522 100644 --- a/pkg/kubelet/rkt/log.go +++ b/pkg/kubelet/rkt/log.go @@ -26,8 +26,8 @@ import ( "golang.org/x/net/context" rktapi "github.com/coreos/rkt/api/v1alpha" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/util/format" ) @@ -37,7 +37,7 @@ const ( ) // processLines write the lines into stdout in the required format. -func processLines(lines []string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) { +func processLines(lines []string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) { msgKey := "MESSAGE=" for _, line := range lines { @@ -75,7 +75,7 @@ func processLines(lines []string, logOptions *api.PodLogOptions, stdout, stderr // "100" or "all") to tail the log. // // TODO(yifan): This doesn't work with lkvm stage1 yet. -func (r *Runtime) GetContainerLogs(pod *api.Pod, containerID kubecontainer.ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error { +func (r *Runtime) GetContainerLogs(pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error { id, err := parseContainerID(containerID) if err != nil { return err diff --git a/pkg/kubelet/rkt/rkt.go b/pkg/kubelet/rkt/rkt.go index 5be3f8b9db0..b3c9520a146 100644 --- a/pkg/kubelet/rkt/rkt.go +++ b/pkg/kubelet/rkt/rkt.go @@ -41,7 +41,7 @@ import ( "github.com/golang/glog" "golang.org/x/net/context" "google.golang.org/grpc" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/credentialprovider" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -185,7 +185,7 @@ var _ kubecontainer.DirectStreamingRuntime = &Runtime{} // TODO(yifan): This duplicates the podGetter in dockertools. type podGetter interface { - GetPodByUID(kubetypes.UID) (*api.Pod, bool) + GetPodByUID(kubetypes.UID) (*v1.Pod, bool) } // cliInterface wrapps the command line calls for testing purpose. @@ -338,7 +338,7 @@ func getRktUUIDFromServiceFileName(filename string) string { } // setIsolators sets the apps' isolators according to the security context and resource spec. -func setIsolators(app *appctypes.App, c *api.Container, ctx *api.SecurityContext) error { +func setIsolators(app *appctypes.App, c *v1.Container, ctx *v1.SecurityContext) error { var isolators []appctypes.Isolator // Capabilities isolators. @@ -374,7 +374,7 @@ func setIsolators(app *appctypes.App, c *api.Container, ctx *api.SecurityContext } // If limit is empty, populate it with request and vice versa. - resources := make(map[api.ResourceName]*resource) + resources := make(map[v1.ResourceName]*resource) for name, quantity := range c.Resources.Limits { resources[name] = &resource{limit: quantity.String(), request: quantity.String()} } @@ -389,13 +389,13 @@ func setIsolators(app *appctypes.App, c *api.Container, ctx *api.SecurityContext for name, res := range resources { switch name { - case api.ResourceCPU: + case v1.ResourceCPU: cpu, err := appctypes.NewResourceCPUIsolator(res.request, res.limit) if err != nil { return err } isolators = append(isolators, cpu.AsIsolator()) - case api.ResourceMemory: + case v1.ResourceMemory: memory, err := appctypes.NewResourceMemoryIsolator(res.request, res.limit) if err != nil { return err @@ -493,7 +493,7 @@ func mergePortMappings(app *appctypes.App, containerPorts []appctypes.Port) { } } -func verifyNonRoot(app *appctypes.App, ctx *api.SecurityContext) error { +func verifyNonRoot(app *appctypes.App, ctx *v1.SecurityContext) error { if ctx != nil && ctx.RunAsNonRoot != nil && *ctx.RunAsNonRoot { if ctx.RunAsUser != nil && *ctx.RunAsUser == 0 { return fmt.Errorf("container's runAsUser breaks non-root policy") @@ -505,7 +505,7 @@ func verifyNonRoot(app *appctypes.App, ctx *api.SecurityContext) error { return nil } -func setSupplementalGIDs(app *appctypes.App, podCtx *api.PodSecurityContext, supplementalGids []int64) { +func setSupplementalGIDs(app *appctypes.App, podCtx *v1.PodSecurityContext, supplementalGids []int64) { if podCtx != nil || len(supplementalGids) != 0 { app.SupplementaryGIDs = app.SupplementaryGIDs[:0] } @@ -523,9 +523,9 @@ func setSupplementalGIDs(app *appctypes.App, podCtx *api.PodSecurityContext, sup } // setApp merges the container spec with the image's manifest. -func setApp(imgManifest *appcschema.ImageManifest, c *api.Container, +func setApp(imgManifest *appcschema.ImageManifest, c *v1.Container, mountPoints []appctypes.MountPoint, containerPorts []appctypes.Port, envs []kubecontainer.EnvVar, - ctx *api.SecurityContext, podCtx *api.PodSecurityContext, supplementalGids []int64) error { + ctx *v1.SecurityContext, podCtx *v1.PodSecurityContext, supplementalGids []int64) error { app := imgManifest.App @@ -598,7 +598,7 @@ func setApp(imgManifest *appcschema.ImageManifest, c *api.Container, } // makePodManifest transforms a kubelet pod spec to the rkt pod manifest. -func (r *Runtime) makePodManifest(pod *api.Pod, podIP string, pullSecrets []api.Secret) (*appcschema.PodManifest, error) { +func (r *Runtime) makePodManifest(pod *v1.Pod, podIP string, pullSecrets []v1.Secret) (*appcschema.PodManifest, error) { manifest := appcschema.BlankPodManifest() ctx, cancel := context.WithTimeout(context.Background(), r.requestTimeout) @@ -732,7 +732,7 @@ func (r *Runtime) podFinishedAt(podUID kubetypes.UID, rktUID string) time.Time { return stat.ModTime() } -func (r *Runtime) makeContainerLogMount(opts *kubecontainer.RunContainerOptions, container *api.Container) (*kubecontainer.Mount, error) { +func (r *Runtime) makeContainerLogMount(opts *kubecontainer.RunContainerOptions, container *v1.Container) (*kubecontainer.Mount, error) { if opts.PodContainerDir == "" || container.TerminationMessagePath == "" { return nil, nil } @@ -767,7 +767,7 @@ func (r *Runtime) makeContainerLogMount(opts *kubecontainer.RunContainerOptions, return &mnt, nil } -func (r *Runtime) newAppcRuntimeApp(pod *api.Pod, podIP string, c api.Container, requiresPrivileged bool, pullSecrets []api.Secret, manifest *appcschema.PodManifest) error { +func (r *Runtime) newAppcRuntimeApp(pod *v1.Pod, podIP string, c v1.Container, requiresPrivileged bool, pullSecrets []v1.Secret, manifest *appcschema.PodManifest) error { var annotations appctypes.Annotations = []appctypes.Annotation{ { Name: *appctypes.MustACIdentifier(k8sRktContainerHashAnno), @@ -912,8 +912,8 @@ func newUnitOption(section, name, value string) *unit.UnitOption { return &unit.UnitOption{Section: section, Name: name, Value: value} } -// apiPodToruntimePod converts an api.Pod to kubelet/container.Pod. -func apiPodToruntimePod(uuid string, pod *api.Pod) *kubecontainer.Pod { +// apiPodToruntimePod converts an v1.Pod to kubelet/container.Pod. +func apiPodToruntimePod(uuid string, pod *v1.Pod) *kubecontainer.Pod { p := &kubecontainer.Pod{ ID: pod.UID, Name: pod.Name, @@ -939,19 +939,19 @@ func serviceFilePath(serviceName string) string { // shouldCreateNetns returns true if: // The pod does not run in host network. And // The pod runs inside a netns created outside of rkt. -func (r *Runtime) shouldCreateNetns(pod *api.Pod) bool { +func (r *Runtime) shouldCreateNetns(pod *v1.Pod) bool { return !kubecontainer.IsHostNetworkPod(pod) && r.networkPlugin.Name() != network.DefaultPluginName } // usesRktHostNetwork returns true if: // The pod runs in the host network. Or // The pod runs inside a netns created outside of rkt. -func (r *Runtime) usesRktHostNetwork(pod *api.Pod) bool { +func (r *Runtime) usesRktHostNetwork(pod *v1.Pod) bool { return kubecontainer.IsHostNetworkPod(pod) || r.shouldCreateNetns(pod) } // generateRunCommand crafts a 'rkt run-prepared' command with necessary parameters. -func (r *Runtime) generateRunCommand(pod *api.Pod, uuid, netnsName string) (string, error) { +func (r *Runtime) generateRunCommand(pod *v1.Pod, uuid, netnsName string) (string, error) { config := *r.config privileged := true @@ -1040,7 +1040,7 @@ func (r *Runtime) generateRunCommand(pod *api.Pod, uuid, netnsName string) (stri return strings.Join(runPrepared, " "), nil } -func (r *Runtime) cleanupPodNetwork(pod *api.Pod) error { +func (r *Runtime) cleanupPodNetwork(pod *v1.Pod) error { glog.V(3).Infof("Calling network plugin %s to tear down pod for %s", r.networkPlugin.Name(), format.Pod(pod)) // No-op if the pod is not running in a created netns. @@ -1083,7 +1083,7 @@ func (r *Runtime) preparePodArgs(manifest *appcschema.PodManifest, manifestFileN return cmds } -func (r *Runtime) getSelinuxContext(opt *api.SELinuxOptions) (string, error) { +func (r *Runtime) getSelinuxContext(opt *v1.SELinuxOptions) (string, error) { selinuxRunner := selinux.NewSELinuxRunner() str, err := selinuxRunner.Getfilecon(r.config.Dir) if err != nil { @@ -1118,7 +1118,7 @@ func (r *Runtime) getSelinuxContext(opt *api.SELinuxOptions) (string, error) { // // On success, it will return a string that represents name of the unit file // and the runtime pod. -func (r *Runtime) preparePod(pod *api.Pod, podIP string, pullSecrets []api.Secret, netnsName string) (string, *kubecontainer.Pod, error) { +func (r *Runtime) preparePod(pod *v1.Pod, podIP string, pullSecrets []v1.Secret, netnsName string) (string, *kubecontainer.Pod, error) { // Generate the appc pod manifest from the k8s pod spec. manifest, err := r.makePodManifest(pod, podIP, pullSecrets) if err != nil { @@ -1230,13 +1230,13 @@ func (r *Runtime) generateEvents(runtimePod *kubecontainer.Pod, reason string, f uuid := utilstrings.ShortenString(id.uuid, 8) switch reason { case "Created": - r.recorder.Eventf(ref, api.EventTypeNormal, events.CreatedContainer, "Created with rkt id %v", uuid) + r.recorder.Eventf(ref, v1.EventTypeNormal, events.CreatedContainer, "Created with rkt id %v", uuid) case "Started": - r.recorder.Eventf(ref, api.EventTypeNormal, events.StartedContainer, "Started with rkt id %v", uuid) + r.recorder.Eventf(ref, v1.EventTypeNormal, events.StartedContainer, "Started with rkt id %v", uuid) case "Failed": - r.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToStartContainer, "Failed to start with rkt id %v with error %v", uuid, failure) + r.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToStartContainer, "Failed to start with rkt id %v with error %v", uuid, failure) case "Killing": - r.recorder.Eventf(ref, api.EventTypeNormal, events.KillingContainer, "Killing with rkt id %v", uuid) + r.recorder.Eventf(ref, v1.EventTypeNormal, events.KillingContainer, "Killing with rkt id %v", uuid) default: glog.Errorf("rkt: Unexpected event %q", reason) } @@ -1258,7 +1258,7 @@ func netnsPathFromName(netnsName string) string { // one occurred. // // If the pod is running in host network or is running using the no-op plugin, then nothing will be done. -func (r *Runtime) setupPodNetwork(pod *api.Pod) (string, string, error) { +func (r *Runtime) setupPodNetwork(pod *v1.Pod) (string, string, error) { glog.V(3).Infof("Calling network plugin %s to set up pod for %s", r.networkPlugin.Name(), format.Pod(pod)) // No-op if the pod is not running in a created netns. @@ -1298,7 +1298,7 @@ func (r *Runtime) setupPodNetwork(pod *api.Pod) (string, string, error) { // RunPod first creates the unit file for a pod, and then // starts the unit over d-bus. -func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error { +func (r *Runtime) RunPod(pod *v1.Pod, pullSecrets []v1.Secret) error { glog.V(4).Infof("Rkt starts to run pod: name %q.", format.Pod(pod)) var err error @@ -1322,7 +1322,7 @@ func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error { continue } if prepareErr != nil { - r.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToCreateContainer, "Failed to create rkt container with error: %v", prepareErr) + r.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToCreateContainer, "Failed to create rkt container with error: %v", prepareErr) continue } containerID := runtimePod.Containers[i].ID @@ -1369,7 +1369,7 @@ func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error { return nil } -func (r *Runtime) runPreStopHook(containerID kubecontainer.ContainerID, pod *api.Pod, container *api.Container) error { +func (r *Runtime) runPreStopHook(containerID kubecontainer.ContainerID, pod *v1.Pod, container *v1.Container) error { glog.V(4).Infof("rkt: Running pre-stop hook for container %q of pod %q", container.Name, format.Pod(pod)) msg, err := r.runner.Run(containerID, pod, container, container.Lifecycle.PreStop) if err != nil { @@ -1377,13 +1377,13 @@ func (r *Runtime) runPreStopHook(containerID kubecontainer.ContainerID, pod *api if !ok { glog.Warningf("No ref for container %q", containerID) } else { - r.recorder.Eventf(ref, api.EventTypeWarning, events.FailedPreStopHook, msg) + r.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedPreStopHook, msg) } } return err } -func (r *Runtime) runPostStartHook(containerID kubecontainer.ContainerID, pod *api.Pod, container *api.Container) error { +func (r *Runtime) runPostStartHook(containerID kubecontainer.ContainerID, pod *v1.Pod, container *v1.Container) error { glog.V(4).Infof("rkt: Running post-start hook for container %q of pod %q", container.Name, format.Pod(pod)) cid, err := parseContainerID(containerID) if err != nil { @@ -1419,7 +1419,7 @@ func (r *Runtime) runPostStartHook(containerID kubecontainer.ContainerID, pod *a if !ok { glog.Warningf("No ref for container %q", containerID) } else { - r.recorder.Eventf(ref, api.EventTypeWarning, events.FailedPostStartHook, msg) + r.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedPostStartHook, msg) } } return err @@ -1432,7 +1432,7 @@ const ( lifecyclePreStopHook lifecycleHookType = "pre-stop" ) -func (r *Runtime) runLifecycleHooks(pod *api.Pod, runtimePod *kubecontainer.Pod, typ lifecycleHookType) error { +func (r *Runtime) runLifecycleHooks(pod *v1.Pod, runtimePod *kubecontainer.Pod, typ lifecycleHookType) error { var wg sync.WaitGroup var errlist []error errCh := make(chan error, len(pod.Spec.Containers)) @@ -1440,7 +1440,7 @@ func (r *Runtime) runLifecycleHooks(pod *api.Pod, runtimePod *kubecontainer.Pod, wg.Add(len(pod.Spec.Containers)) for i, c := range pod.Spec.Containers { - var hookFunc func(kubecontainer.ContainerID, *api.Pod, *api.Container) error + var hookFunc func(kubecontainer.ContainerID, *v1.Pod, *v1.Container) error switch typ { case lifecyclePostStartHook: @@ -1601,7 +1601,7 @@ func (r *Runtime) GetPods(all bool) ([]*kubecontainer.Pod, error) { return result, nil } -func getPodTerminationGracePeriodInSecond(pod *api.Pod) int64 { +func getPodTerminationGracePeriodInSecond(pod *v1.Pod) int64 { var gracePeriod int64 switch { case pod.DeletionGracePeriodSeconds != nil: @@ -1615,7 +1615,7 @@ func getPodTerminationGracePeriodInSecond(pod *api.Pod) int64 { return gracePeriod } -func (r *Runtime) waitPreStopHooks(pod *api.Pod, runningPod *kubecontainer.Pod) { +func (r *Runtime) waitPreStopHooks(pod *v1.Pod, runningPod *kubecontainer.Pod) { gracePeriod := getPodTerminationGracePeriodInSecond(pod) done := make(chan struct{}) @@ -1635,7 +1635,7 @@ func (r *Runtime) waitPreStopHooks(pod *api.Pod, runningPod *kubecontainer.Pod) // KillPod invokes 'systemctl kill' to kill the unit that runs the pod. // TODO: add support for gracePeriodOverride which is used in eviction scenarios -func (r *Runtime) KillPod(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { +func (r *Runtime) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { glog.V(4).Infof("Rkt is killing pod: name %q.", runningPod.Name) if len(runningPod.Containers) == 0 { @@ -1706,7 +1706,7 @@ func (r *Runtime) Status() (*kubecontainer.RuntimeStatus, error) { } // SyncPod syncs the running pod to match the specified desired pod. -func (r *Runtime) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { +func (r *Runtime) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { var err error defer func() { if err != nil { @@ -1748,7 +1748,7 @@ func (r *Runtime) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubecontaine } liveness, found := r.livenessManager.Get(c.ID) - if found && liveness != proberesults.Success && pod.Spec.RestartPolicy != api.RestartPolicyNever { + if found && liveness != proberesults.Success && pod.Spec.RestartPolicy != v1.RestartPolicyNever { glog.Infof("Pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name) restartPod = true break @@ -1969,16 +1969,15 @@ func (r *Runtime) cleanupPodNetworkFromServiceFile(serviceFilePath string) error if err != nil { return err } - return r.cleanupPodNetwork(&api.Pod{ - ObjectMeta: api.ObjectMeta{ + return r.cleanupPodNetwork(&v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: kubetypes.UID(id), Name: name, Namespace: namespace, }, - Spec: api.PodSpec{ - SecurityContext: &api.PodSecurityContext{ + Spec: v1.PodSpec{ HostNetwork: hostnetwork, - }, + }, }) } @@ -2260,7 +2259,7 @@ func populateContainerStatus(pod rktapi.Pod, app rktapi.App, runtimeApp appcsche ExitCode: int(app.ExitCode), // By default, the version returned by rkt API service will be "latest" if not specified. Image: fmt.Sprintf("%s:%s", app.Image.Name, app.Image.Version), - ImageID: "rkt://" + app.Image.Id, // TODO(yifan): Add the prefix only in api.PodStatus. + ImageID: "rkt://" + app.Image.Id, // TODO(yifan): Add the prefix only in v1.PodStatus. Hash: hashNum, // TODO(yifan): Note that now all apps share the same restart count, this might // change once apps don't share the same lifecycle. diff --git a/pkg/kubelet/rkt/rkt_test.go b/pkg/kubelet/rkt/rkt_test.go index 472ccc697f2..d73a46a68cb 100644 --- a/pkg/kubelet/rkt/rkt_test.go +++ b/pkg/kubelet/rkt/rkt_test.go @@ -31,8 +31,8 @@ import ( rktapi "github.com/coreos/rkt/api/v1alpha" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" containertesting "k8s.io/kubernetes/pkg/kubelet/container/testing" kubetesting "k8s.io/kubernetes/pkg/kubelet/container/testing" @@ -967,19 +967,19 @@ func TestSetApp(t *testing.T) { fsgid := int64(3) tests := []struct { - container *api.Container + container *v1.Container mountPoints []appctypes.MountPoint containerPorts []appctypes.Port envs []kubecontainer.EnvVar - ctx *api.SecurityContext - podCtx *api.PodSecurityContext + ctx *v1.SecurityContext + podCtx *v1.PodSecurityContext supplementalGids []int64 expect *appctypes.App err error }{ // Nothing should change, but the "User" and "Group" should be filled. { - container: &api.Container{}, + container: &v1.Container{}, mountPoints: []appctypes.MountPoint{}, containerPorts: []appctypes.Port{}, envs: []kubecontainer.EnvVar{}, @@ -992,11 +992,11 @@ func TestSetApp(t *testing.T) { // error verifying non-root. { - container: &api.Container{}, + container: &v1.Container{}, mountPoints: []appctypes.MountPoint{}, containerPorts: []appctypes.Port{}, envs: []kubecontainer.EnvVar{}, - ctx: &api.SecurityContext{ + ctx: &v1.SecurityContext{ RunAsNonRoot: &runAsNonRootTrue, RunAsUser: &rootUser, }, @@ -1008,7 +1008,7 @@ func TestSetApp(t *testing.T) { // app's args should be changed. { - container: &api.Container{ + container: &v1.Container{ Args: []string{"foo"}, }, mountPoints: []appctypes.MountPoint{}, @@ -1044,12 +1044,12 @@ func TestSetApp(t *testing.T) { // app should be changed. { - container: &api.Container{ + container: &v1.Container{ Command: []string{"/bin/bar", "$(env-bar)"}, WorkingDir: tmpDir, - Resources: api.ResourceRequirements{ - Limits: api.ResourceList{"cpu": resource.MustParse("50m"), "memory": resource.MustParse("50M")}, - Requests: api.ResourceList{"cpu": resource.MustParse("5m"), "memory": resource.MustParse("5M")}, + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{"cpu": resource.MustParse("50m"), "memory": resource.MustParse("50M")}, + Requests: v1.ResourceList{"cpu": resource.MustParse("5m"), "memory": resource.MustParse("5M")}, }, }, mountPoints: []appctypes.MountPoint{ @@ -1061,15 +1061,15 @@ func TestSetApp(t *testing.T) { envs: []kubecontainer.EnvVar{ {Name: "env-bar", Value: "foo"}, }, - ctx: &api.SecurityContext{ - Capabilities: &api.Capabilities{ - Add: []api.Capability{"CAP_SYS_CHROOT", "CAP_SYS_BOOT"}, - Drop: []api.Capability{"CAP_SETUID", "CAP_SETGID"}, + ctx: &v1.SecurityContext{ + Capabilities: &v1.Capabilities{ + Add: []v1.Capability{"CAP_SYS_CHROOT", "CAP_SYS_BOOT"}, + Drop: []v1.Capability{"CAP_SETUID", "CAP_SETGID"}, }, RunAsUser: &nonRootUser, RunAsNonRoot: &runAsNonRootTrue, }, - podCtx: &api.PodSecurityContext{ + podCtx: &v1.PodSecurityContext{ SupplementalGroups: []int64{1, 2}, FSGroup: &fsgid, }, @@ -1103,14 +1103,14 @@ func TestSetApp(t *testing.T) { // app should be changed. (env, mounts, ports, are overrided). { - container: &api.Container{ + container: &v1.Container{ Name: "hello-world", Command: []string{"/bin/hello", "$(env-foo)"}, Args: []string{"hello", "world", "$(env-bar)"}, WorkingDir: tmpDir, - Resources: api.ResourceRequirements{ - Limits: api.ResourceList{"cpu": resource.MustParse("50m")}, - Requests: api.ResourceList{"memory": resource.MustParse("5M")}, + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{"cpu": resource.MustParse("50m")}, + Requests: v1.ResourceList{"memory": resource.MustParse("5M")}, }, }, mountPoints: []appctypes.MountPoint{ @@ -1123,15 +1123,15 @@ func TestSetApp(t *testing.T) { {Name: "env-foo", Value: "foo"}, {Name: "env-bar", Value: "bar"}, }, - ctx: &api.SecurityContext{ - Capabilities: &api.Capabilities{ - Add: []api.Capability{"CAP_SYS_CHROOT", "CAP_SYS_BOOT"}, - Drop: []api.Capability{"CAP_SETUID", "CAP_SETGID"}, + ctx: &v1.SecurityContext{ + Capabilities: &v1.Capabilities{ + Add: []v1.Capability{"CAP_SYS_CHROOT", "CAP_SYS_BOOT"}, + Drop: []v1.Capability{"CAP_SETUID", "CAP_SETGID"}, }, RunAsUser: &nonRootUser, RunAsNonRoot: &runAsNonRootTrue, }, - podCtx: &api.PodSecurityContext{ + podCtx: &v1.PodSecurityContext{ SupplementalGroups: []int64{1, 2}, FSGroup: &fsgid, }, @@ -1188,7 +1188,7 @@ func TestGenerateRunCommand(t *testing.T) { tests := []struct { networkPlugin network.NetworkPlugin - pod *api.Pod + pod *v1.Pod uuid string netnsName string @@ -1202,12 +1202,12 @@ func TestGenerateRunCommand(t *testing.T) { // Case #0, returns error. { kubenet.NewPlugin("/tmp"), - &api.Pod{ - ObjectMeta: api.ObjectMeta{ + &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod-name-foo", }, - Spec: api.PodSpec{ - Containers: []api.Container{{Name: "container-foo"}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Name: "container-foo"}}, }, }, "rkt-uuid-foo", @@ -1221,12 +1221,12 @@ func TestGenerateRunCommand(t *testing.T) { // Case #1, returns no dns, with private-net. { kubenet.NewPlugin("/tmp"), - &api.Pod{ - ObjectMeta: api.ObjectMeta{ + &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod-name-foo", }, - Spec: api.PodSpec{ - Containers: []api.Container{{Name: "container-foo"}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Name: "container-foo"}}, }, }, "rkt-uuid-foo", @@ -1240,15 +1240,14 @@ func TestGenerateRunCommand(t *testing.T) { // Case #2, returns no dns, with host-net. { kubenet.NewPlugin("/tmp"), - &api.Pod{ - ObjectMeta: api.ObjectMeta{ + &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod-name-foo", }, - Spec: api.PodSpec{ - SecurityContext: &api.PodSecurityContext{ + Spec: v1.PodSpec{ HostNetwork: true, - }, - Containers: []api.Container{{Name: "container-foo"}}, + + Containers: []v1.Container{{Name: "container-foo"}}, }, }, "rkt-uuid-foo", @@ -1262,15 +1261,14 @@ func TestGenerateRunCommand(t *testing.T) { // Case #3, returns dns, dns searches, with private-net. { kubenet.NewPlugin("/tmp"), - &api.Pod{ - ObjectMeta: api.ObjectMeta{ + &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod-name-foo", }, - Spec: api.PodSpec{ - SecurityContext: &api.PodSecurityContext{ + Spec: v1.PodSpec{ HostNetwork: false, - }, - Containers: []api.Container{{Name: "container-foo"}}, + + Containers: []v1.Container{{Name: "container-foo"}}, }, }, "rkt-uuid-foo", @@ -1284,15 +1282,14 @@ func TestGenerateRunCommand(t *testing.T) { // Case #4, returns no dns, dns searches, with host-network. { kubenet.NewPlugin("/tmp"), - &api.Pod{ - ObjectMeta: api.ObjectMeta{ + &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod-name-foo", }, - Spec: api.PodSpec{ - SecurityContext: &api.PodSecurityContext{ + Spec: v1.PodSpec{ HostNetwork: true, - }, - Containers: []api.Container{{Name: "container-foo"}}, + + Containers: []v1.Container{{Name: "container-foo"}}, }, }, "rkt-uuid-foo", @@ -1306,12 +1303,12 @@ func TestGenerateRunCommand(t *testing.T) { // Case #5, with no-op plugin, returns --net=rkt.kubernetes.io, with dns and dns search. { &network.NoopNetworkPlugin{}, - &api.Pod{ - ObjectMeta: api.ObjectMeta{ + &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod-name-foo", }, - Spec: api.PodSpec{ - Containers: []api.Container{{Name: "container-foo"}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Name: "container-foo"}}, }, }, "rkt-uuid-foo", @@ -1325,14 +1322,14 @@ func TestGenerateRunCommand(t *testing.T) { // Case #6, if all containers are privileged, the result should have 'insecure-options=all-run' { kubenet.NewPlugin("/tmp"), - &api.Pod{ - ObjectMeta: api.ObjectMeta{ + &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod-name-foo", }, - Spec: api.PodSpec{ - Containers: []api.Container{ - {Name: "container-foo", SecurityContext: &api.SecurityContext{Privileged: &boolTrue}}, - {Name: "container-bar", SecurityContext: &api.SecurityContext{Privileged: &boolTrue}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + {Name: "container-foo", SecurityContext: &v1.SecurityContext{Privileged: &boolTrue}}, + {Name: "container-bar", SecurityContext: &v1.SecurityContext{Privileged: &boolTrue}}, }, }, }, @@ -1347,14 +1344,14 @@ func TestGenerateRunCommand(t *testing.T) { // Case #7, if not all containers are privileged, the result should not have 'insecure-options=all-run' { kubenet.NewPlugin("/tmp"), - &api.Pod{ - ObjectMeta: api.ObjectMeta{ + &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod-name-foo", }, - Spec: api.PodSpec{ - Containers: []api.Container{ - {Name: "container-foo", SecurityContext: &api.SecurityContext{Privileged: &boolTrue}}, - {Name: "container-bar", SecurityContext: &api.SecurityContext{Privileged: &boolFalse}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + {Name: "container-foo", SecurityContext: &v1.SecurityContext{Privileged: &boolTrue}}, + {Name: "container-bar", SecurityContext: &v1.SecurityContext{Privileged: &boolFalse}}, }, }, }, @@ -1409,7 +1406,7 @@ func TestLifeCycleHooks(t *testing.T) { } tests := []struct { - pod *api.Pod + pod *v1.Pod runtimePod *kubecontainer.Pod postStartRuns []string preStopRuns []string @@ -1417,14 +1414,14 @@ func TestLifeCycleHooks(t *testing.T) { }{ { // Case 0, container without any hooks. - &api.Pod{ - ObjectMeta: api.ObjectMeta{ + &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod-1", Namespace: "ns-1", UID: "uid-1", }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ {Name: "container-name-1"}, }, }, @@ -1440,43 +1437,43 @@ func TestLifeCycleHooks(t *testing.T) { }, { // Case 1, containers with post-start and pre-stop hooks. - &api.Pod{ - ObjectMeta: api.ObjectMeta{ + &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod-1", Namespace: "ns-1", UID: "uid-1", }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Name: "container-name-1", - Lifecycle: &api.Lifecycle{ - PostStart: &api.Handler{ - Exec: &api.ExecAction{}, + Lifecycle: &v1.Lifecycle{ + PostStart: &v1.Handler{ + Exec: &v1.ExecAction{}, }, }, }, { Name: "container-name-2", - Lifecycle: &api.Lifecycle{ - PostStart: &api.Handler{ - HTTPGet: &api.HTTPGetAction{}, + Lifecycle: &v1.Lifecycle{ + PostStart: &v1.Handler{ + HTTPGet: &v1.HTTPGetAction{}, }, }, }, { Name: "container-name-3", - Lifecycle: &api.Lifecycle{ - PreStop: &api.Handler{ - Exec: &api.ExecAction{}, + Lifecycle: &v1.Lifecycle{ + PreStop: &v1.Handler{ + Exec: &v1.ExecAction{}, }, }, }, { Name: "container-name-4", - Lifecycle: &api.Lifecycle{ - PreStop: &api.Handler{ - HTTPGet: &api.HTTPGetAction{}, + Lifecycle: &v1.Lifecycle{ + PreStop: &v1.Handler{ + HTTPGet: &v1.HTTPGetAction{}, }, }, }, @@ -1515,19 +1512,19 @@ func TestLifeCycleHooks(t *testing.T) { }, { // Case 2, one container with invalid hooks. - &api.Pod{ - ObjectMeta: api.ObjectMeta{ + &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod-1", Namespace: "ns-1", UID: "uid-1", }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Name: "container-name-1", - Lifecycle: &api.Lifecycle{ - PostStart: &api.Handler{}, - PreStop: &api.Handler{}, + Lifecycle: &v1.Lifecycle{ + PostStart: &v1.Handler{}, + PreStop: &v1.Handler{}, }, }, }, @@ -1543,7 +1540,7 @@ func TestLifeCycleHooks(t *testing.T) { }, []string{}, []string{}, - errors.NewAggregate([]error{fmt.Errorf("Invalid handler: %v", &api.Handler{})}), + errors.NewAggregate([]error{fmt.Errorf("Invalid handler: %v", &v1.Handler{})}), }, } @@ -1618,7 +1615,7 @@ func TestGarbageCollect(t *testing.T) { tests := []struct { gcPolicy kubecontainer.ContainerGCPolicy - apiPods []*api.Pod + apiPods []*v1.Pod pods []*rktapi.Pod serviceFilesOnDisk []string expectedCommands []string @@ -1634,11 +1631,11 @@ func TestGarbageCollect(t *testing.T) { MinAge: 0, MaxContainers: 0, }, - []*api.Pod{ - {ObjectMeta: api.ObjectMeta{UID: "pod-uid-1"}}, - {ObjectMeta: api.ObjectMeta{UID: "pod-uid-2"}}, - {ObjectMeta: api.ObjectMeta{UID: "pod-uid-3"}}, - {ObjectMeta: api.ObjectMeta{UID: "pod-uid-4"}}, + []*v1.Pod{ + {ObjectMeta: v1.ObjectMeta{UID: "pod-uid-1"}}, + {ObjectMeta: v1.ObjectMeta{UID: "pod-uid-2"}}, + {ObjectMeta: v1.ObjectMeta{UID: "pod-uid-3"}}, + {ObjectMeta: v1.ObjectMeta{UID: "pod-uid-4"}}, }, []*rktapi.Pod{ { @@ -1718,10 +1715,10 @@ func TestGarbageCollect(t *testing.T) { MinAge: 0, MaxContainers: 1, }, - []*api.Pod{ - {ObjectMeta: api.ObjectMeta{UID: "pod-uid-0"}}, - {ObjectMeta: api.ObjectMeta{UID: "pod-uid-1"}}, - {ObjectMeta: api.ObjectMeta{UID: "pod-uid-2"}}, + []*v1.Pod{ + {ObjectMeta: v1.ObjectMeta{UID: "pod-uid-0"}}, + {ObjectMeta: v1.ObjectMeta{UID: "pod-uid-1"}}, + {ObjectMeta: v1.ObjectMeta{UID: "pod-uid-2"}}, }, []*rktapi.Pod{ { @@ -1817,7 +1814,7 @@ func TestGarbageCollect(t *testing.T) { ctrl.Finish() fakeOS.Removes = []string{} fs.resetFailedUnits = []string{} - getter.pods = make(map[kubetypes.UID]*api.Pod) + getter.pods = make(map[kubetypes.UID]*v1.Pod) } } @@ -1836,13 +1833,13 @@ func TestMakePodManifestAnnotations(t *testing.T) { r := &Runtime{apisvc: fr, systemd: fs} testCases := []struct { - in *api.Pod + in *v1.Pod out *appcschema.PodManifest outerr error }{ { - in: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + in: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "uid-1", Name: "name-1", Namespace: "namespace-1", @@ -1889,7 +1886,7 @@ func TestMakePodManifestAnnotations(t *testing.T) { for i, testCase := range testCases { hint := fmt.Sprintf("case #%d", i) - result, err := r.makePodManifest(testCase.in, "", []api.Secret{}) + result, err := r.makePodManifest(testCase.in, "", []v1.Secret{}) assert.Equal(t, testCase.outerr, err, hint) if err == nil { sort.Sort(annotationsByName(result.Annotations)) diff --git a/pkg/kubelet/runonce.go b/pkg/kubelet/runonce.go index 9ad3c476af3..3b73a9fe7c0 100644 --- a/pkg/kubelet/runonce.go +++ b/pkg/kubelet/runonce.go @@ -22,7 +22,7 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" @@ -36,7 +36,7 @@ const ( ) type RunPodResult struct { - Pod *api.Pod + Pod *v1.Pod Err error } @@ -66,9 +66,9 @@ func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult, } // runOnce runs a given set of pods and returns their status. -func (kl *Kubelet) runOnce(pods []*api.Pod, retryDelay time.Duration) (results []RunPodResult, err error) { +func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results []RunPodResult, err error) { ch := make(chan RunPodResult) - admitted := []*api.Pod{} + admitted := []*v1.Pod{} for _, pod := range pods { // Check if we can admit the pod. if ok, reason, message := kl.canAdmitPod(admitted, pod); !ok { @@ -78,7 +78,7 @@ func (kl *Kubelet) runOnce(pods []*api.Pod, retryDelay time.Duration) (results [ } admitted = append(admitted, pod) - go func(pod *api.Pod) { + go func(pod *v1.Pod) { err := kl.runPod(pod, retryDelay) ch <- RunPodResult{pod, err} }(pod) @@ -105,7 +105,7 @@ func (kl *Kubelet) runOnce(pods []*api.Pod, retryDelay time.Duration) (results [ } // runPod runs a single pod and wait until all containers are running. -func (kl *Kubelet) runPod(pod *api.Pod, retryDelay time.Duration) error { +func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error { delay := retryDelay retry := 0 for { @@ -145,7 +145,7 @@ func (kl *Kubelet) runPod(pod *api.Pod, retryDelay time.Duration) error { } // isPodRunning returns true if all containers of a manifest are running. -func (kl *Kubelet) isPodRunning(pod *api.Pod, status *kubecontainer.PodStatus) bool { +func (kl *Kubelet) isPodRunning(pod *v1.Pod, status *kubecontainer.PodStatus) bool { for _, c := range pod.Spec.Containers { cs := status.FindContainerStatusByName(c.Name) if cs == nil || cs.State != kubecontainer.ContainerStateRunning { diff --git a/pkg/kubelet/runonce_test.go b/pkg/kubelet/runonce_test.go index 4e7de76def7..f212089fda2 100644 --- a/pkg/kubelet/runonce_test.go +++ b/pkg/kubelet/runonce_test.go @@ -23,9 +23,9 @@ import ( cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapiv2 "github.com/google/cadvisor/info/v2" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/componentconfig" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" "k8s.io/kubernetes/pkg/client/record" cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" "k8s.io/kubernetes/pkg/kubelet/cm" @@ -109,13 +109,13 @@ func TestRunOnce(t *testing.T) { // TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency volumeStatsAggPeriod := time.Second * 10 kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod, kb.containerRuntime) - nodeRef := &api.ObjectReference{ + nodeRef := &v1.ObjectReference{ Kind: "Node", Name: string(kb.nodeName), UID: types.UID(kb.nodeName), Namespace: "", } - fakeKillPodFunc := func(pod *api.Pod, podStatus api.PodStatus, gracePeriodOverride *int64) error { + fakeKillPodFunc := func(pod *v1.Pod, podStatus v1.PodStatus, gracePeriodOverride *int64) error { return nil } evictionManager, evictionAdmitHandler, err := eviction.NewManager(kb.resourceAnalyzer, eviction.Config{}, fakeKillPodFunc, nil, kb.recorder, nodeRef, kb.clock) @@ -128,15 +128,15 @@ func TestRunOnce(t *testing.T) { t.Errorf("Failed to init data dirs: %v", err) } - pods := []*api.Pod{ + pods := []*v1.Pod{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ {Name: "bar"}, }, }, diff --git a/pkg/kubelet/server/server.go b/pkg/kubelet/server/server.go index ededae92e3c..a070393500a 100644 --- a/pkg/kubelet/server/server.go +++ b/pkg/kubelet/server/server.go @@ -39,7 +39,7 @@ import ( apierrs "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/api/v1/validation" "k8s.io/kubernetes/pkg/auth/authenticator" "k8s.io/kubernetes/pkg/auth/authorizer" "k8s.io/kubernetes/pkg/healthz" @@ -162,19 +162,19 @@ type HostInterface interface { GetContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error) GetCachedMachineInfo() (*cadvisorapi.MachineInfo, error) - GetPods() []*api.Pod - GetRunningPods() ([]*api.Pod, error) - GetPodByName(namespace, name string) (*api.Pod, bool) + GetPods() []*v1.Pod + GetRunningPods() ([]*v1.Pod, error) + GetPodByName(namespace, name string) (*v1.Pod, bool) RunInContainer(name string, uid types.UID, container string, cmd []string) ([]byte, error) ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan term.Size, timeout time.Duration) error AttachContainer(name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan term.Size) error - GetKubeletContainerLogs(podFullName, containerName string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error + GetKubeletContainerLogs(podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error ServeLogs(w http.ResponseWriter, req *http.Request) PortForward(name string, uid types.UID, port uint16, stream io.ReadWriteCloser) error StreamingConnectionIdleTimeout() time.Duration ResyncInterval() time.Duration GetHostname() string - GetNode() (*api.Node, error) + GetNode() (*v1.Node, error) GetNodeConfig() cm.NodeConfig LatestLoopEntryTime() time.Time ImagesFsInfo() (cadvisorapiv2.FsInfo, error) @@ -456,7 +456,7 @@ func (s *Server) getContainerLogs(request *restful.Request, response *restful.Re } } // container logs on the kubelet are locked to the v1 API version of PodLogOptions - logOptions := &api.PodLogOptions{} + logOptions := &v1.PodLogOptions{} if err := api.ParameterCodec.DecodeParameters(query, v1.SchemeGroupVersion, logOptions); err != nil { response.WriteError(http.StatusBadRequest, fmt.Errorf(`{"message": "Unable to decode query."}`)) return @@ -511,17 +511,17 @@ func (s *Server) getContainerLogs(request *restful.Request, response *restful.Re } } -// encodePods creates an api.PodList object from pods and returns the encoded +// encodePods creates an v1.PodList object from pods and returns the encoded // PodList. -func encodePods(pods []*api.Pod) (data []byte, err error) { - podList := new(api.PodList) +func encodePods(pods []*v1.Pod) (data []byte, err error) { + podList := new(v1.PodList) for _, pod := range pods { podList.Items = append(podList.Items, *pod) } // TODO: this needs to be parameterized to the kubelet, not hardcoded. Depends on Kubelet // as API server refactor. // TODO: Locked to v1, needs to be made generic - codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: api.GroupName, Version: "v1"}) + codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: v1.GroupName, Version: "v1"}) return runtime.Encode(codec, podList) } diff --git a/pkg/kubelet/server/server_test.go b/pkg/kubelet/server/server_test.go index 648eaa30d18..99bf3e335d4 100644 --- a/pkg/kubelet/server/server_test.go +++ b/pkg/kubelet/server/server_test.go @@ -40,6 +40,7 @@ import ( "github.com/stretchr/testify/require" "k8s.io/kubernetes/pkg/api" apierrs "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/auth/authorizer" "k8s.io/kubernetes/pkg/auth/user" "k8s.io/kubernetes/pkg/kubelet/cm" @@ -62,18 +63,18 @@ const ( ) type fakeKubelet struct { - podByNameFunc func(namespace, name string) (*api.Pod, bool) + podByNameFunc func(namespace, name string) (*v1.Pod, bool) containerInfoFunc func(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) rawInfoFunc func(query *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error) machineInfoFunc func() (*cadvisorapi.MachineInfo, error) - podsFunc func() []*api.Pod - runningPodsFunc func() ([]*api.Pod, error) + podsFunc func() []*v1.Pod + runningPodsFunc func() ([]*v1.Pod, error) logFunc func(w http.ResponseWriter, req *http.Request) runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) execFunc func(pod string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error attachFunc func(pod string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool) error portForwardFunc func(name string, uid types.UID, port uint16, stream io.ReadWriteCloser) error - containerLogsFunc func(podFullName, containerName string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error + containerLogsFunc func(podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error streamingConnectionIdleTimeoutFunc func() time.Duration hostnameFunc func() string resyncInterval time.Duration @@ -90,7 +91,7 @@ func (fk *fakeKubelet) LatestLoopEntryTime() time.Time { return fk.loopEntryTime } -func (fk *fakeKubelet) GetPodByName(namespace, name string) (*api.Pod, bool) { +func (fk *fakeKubelet) GetPodByName(namespace, name string) (*v1.Pod, bool) { return fk.podByNameFunc(namespace, name) } @@ -106,11 +107,11 @@ func (fk *fakeKubelet) GetCachedMachineInfo() (*cadvisorapi.MachineInfo, error) return fk.machineInfoFunc() } -func (fk *fakeKubelet) GetPods() []*api.Pod { +func (fk *fakeKubelet) GetPods() []*v1.Pod { return fk.podsFunc() } -func (fk *fakeKubelet) GetRunningPods() ([]*api.Pod, error) { +func (fk *fakeKubelet) GetRunningPods() ([]*v1.Pod, error) { return fk.runningPodsFunc() } @@ -118,7 +119,7 @@ func (fk *fakeKubelet) ServeLogs(w http.ResponseWriter, req *http.Request) { fk.logFunc(w, req) } -func (fk *fakeKubelet) GetKubeletContainerLogs(podFullName, containerName string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error { +func (fk *fakeKubelet) GetKubeletContainerLogs(podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error { return fk.containerLogsFunc(podFullName, containerName, logOptions, stdout, stderr) } @@ -173,7 +174,7 @@ func (_ *fakeKubelet) RootFsInfo() (cadvisorapiv2.FsInfo, error) { return cadvisorapiv2.FsInfo{}, fmt.Errorf("Unsupport Operation RootFsInfo") } -func (_ *fakeKubelet) GetNode() (*api.Node, error) { return nil, nil } +func (_ *fakeKubelet) GetNode() (*v1.Node, error) { return nil, nil } func (_ *fakeKubelet) GetNodeConfig() cm.NodeConfig { return cm.NodeConfig{} } func (fk *fakeKubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool) { @@ -210,9 +211,9 @@ func newServerTest() *serverTestFramework { hostnameFunc: func() string { return "127.0.0.1" }, - podByNameFunc: func(namespace, name string) (*api.Pod, bool) { - return &api.Pod{ - ObjectMeta: api.ObjectMeta{ + podByNameFunc: func(namespace, name string) (*v1.Pod, bool) { + return &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Namespace: namespace, Name: name, UID: testUID, @@ -902,14 +903,14 @@ func assertHealthIsOk(t *testing.T, httpURL string) { } func setPodByNameFunc(fw *serverTestFramework, namespace, pod, container string) { - fw.fakeKubelet.podByNameFunc = func(namespace, name string) (*api.Pod, bool) { - return &api.Pod{ - ObjectMeta: api.ObjectMeta{ + fw.fakeKubelet.podByNameFunc = func(namespace, name string) (*v1.Pod, bool) { + return &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Namespace: namespace, Name: pod, }, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Name: container, }, @@ -919,8 +920,8 @@ func setPodByNameFunc(fw *serverTestFramework, namespace, pod, container string) } } -func setGetContainerLogsFunc(fw *serverTestFramework, t *testing.T, expectedPodName, expectedContainerName string, expectedLogOptions *api.PodLogOptions, output string) { - fw.fakeKubelet.containerLogsFunc = func(podFullName, containerName string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error { +func setGetContainerLogsFunc(fw *serverTestFramework, t *testing.T, expectedPodName, expectedContainerName string, expectedLogOptions *v1.PodLogOptions, output string) { + fw.fakeKubelet.containerLogsFunc = func(podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error { if podFullName != expectedPodName { t.Errorf("expected %s, got %s", expectedPodName, podFullName) } @@ -946,7 +947,7 @@ func TestContainerLogs(t *testing.T) { expectedPodName := getPodName(podName, podNamespace) expectedContainerName := "baz" setPodByNameFunc(fw, podNamespace, podName, expectedContainerName) - setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &api.PodLogOptions{}, output) + setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &v1.PodLogOptions{}, output) resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName) if err != nil { t.Errorf("Got error GETing: %v", err) @@ -973,7 +974,7 @@ func TestContainerLogsWithLimitBytes(t *testing.T) { expectedContainerName := "baz" bytes := int64(3) setPodByNameFunc(fw, podNamespace, podName, expectedContainerName) - setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &api.PodLogOptions{LimitBytes: &bytes}, output) + setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &v1.PodLogOptions{LimitBytes: &bytes}, output) resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?limitBytes=3") if err != nil { t.Errorf("Got error GETing: %v", err) @@ -1000,7 +1001,7 @@ func TestContainerLogsWithTail(t *testing.T) { expectedContainerName := "baz" expectedTail := int64(5) setPodByNameFunc(fw, podNamespace, podName, expectedContainerName) - setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &api.PodLogOptions{TailLines: &expectedTail}, output) + setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &v1.PodLogOptions{TailLines: &expectedTail}, output) resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?tailLines=5") if err != nil { t.Errorf("Got error GETing: %v", err) @@ -1027,7 +1028,7 @@ func TestContainerLogsWithLegacyTail(t *testing.T) { expectedContainerName := "baz" expectedTail := int64(5) setPodByNameFunc(fw, podNamespace, podName, expectedContainerName) - setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &api.PodLogOptions{TailLines: &expectedTail}, output) + setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &v1.PodLogOptions{TailLines: &expectedTail}, output) resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?tail=5") if err != nil { t.Errorf("Got error GETing: %v", err) @@ -1053,7 +1054,7 @@ func TestContainerLogsWithTailAll(t *testing.T) { expectedPodName := getPodName(podName, podNamespace) expectedContainerName := "baz" setPodByNameFunc(fw, podNamespace, podName, expectedContainerName) - setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &api.PodLogOptions{}, output) + setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &v1.PodLogOptions{}, output) resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?tail=all") if err != nil { t.Errorf("Got error GETing: %v", err) @@ -1079,7 +1080,7 @@ func TestContainerLogsWithInvalidTail(t *testing.T) { expectedPodName := getPodName(podName, podNamespace) expectedContainerName := "baz" setPodByNameFunc(fw, podNamespace, podName, expectedContainerName) - setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &api.PodLogOptions{}, output) + setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &v1.PodLogOptions{}, output) resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?tail=-1") if err != nil { t.Errorf("Got error GETing: %v", err) @@ -1099,7 +1100,7 @@ func TestContainerLogsWithFollow(t *testing.T) { expectedPodName := getPodName(podName, podNamespace) expectedContainerName := "baz" setPodByNameFunc(fw, podNamespace, podName, expectedContainerName) - setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &api.PodLogOptions{Follow: true}, output) + setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &v1.PodLogOptions{Follow: true}, output) resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?follow=1") if err != nil { t.Errorf("Got error GETing: %v", err) diff --git a/pkg/kubelet/server/stats/handler.go b/pkg/kubelet/server/stats/handler.go index 3f0dc63417d..fddeaaaa8a1 100644 --- a/pkg/kubelet/server/stats/handler.go +++ b/pkg/kubelet/server/stats/handler.go @@ -29,7 +29,7 @@ import ( cadvisorapiv2 "github.com/google/cadvisor/info/v2" "github.com/emicklei/go-restful" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/types" @@ -41,13 +41,13 @@ type StatsProvider interface { GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) GetContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error) - GetPodByName(namespace, name string) (*api.Pod, bool) - GetNode() (*api.Node, error) + GetPodByName(namespace, name string) (*v1.Pod, bool) + GetNode() (*v1.Node, error) GetNodeConfig() cm.NodeConfig ImagesFsInfo() (cadvisorapiv2.FsInfo, error) RootFsInfo() (cadvisorapiv2.FsInfo, error) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool) - GetPods() []*api.Pod + GetPods() []*v1.Pod } type handler struct { @@ -197,7 +197,7 @@ func (h *handler) handlePodContainer(request *restful.Request, response *restful // Default parameters. params := map[string]string{ - "namespace": api.NamespaceDefault, + "namespace": v1.NamespaceDefault, "uid": "", } for k, v := range request.PathParameters() { diff --git a/pkg/kubelet/server/stats/mocks_test.go b/pkg/kubelet/server/stats/mocks_test.go index bb5d013cbd4..892dcdae99c 100644 --- a/pkg/kubelet/server/stats/mocks_test.go +++ b/pkg/kubelet/server/stats/mocks_test.go @@ -16,15 +16,15 @@ limitations under the License. package stats -import "github.com/stretchr/testify/mock" - -import cadvisorapi "github.com/google/cadvisor/info/v1" -import cadvisorapiv2 "github.com/google/cadvisor/info/v2" -import "k8s.io/kubernetes/pkg/api" -import "k8s.io/kubernetes/pkg/kubelet/cm" - -import "k8s.io/kubernetes/pkg/types" -import "k8s.io/kubernetes/pkg/volume" +import ( + cadvisorapi "github.com/google/cadvisor/info/v1" + cadvisorapiv2 "github.com/google/cadvisor/info/v2" + "github.com/stretchr/testify/mock" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/kubelet/cm" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/volume" +) // DO NOT EDIT // GENERATED BY mockery @@ -103,15 +103,15 @@ func (_m *MockStatsProvider) GetRawContainerInfo(containerName string, req *cadv } // GetPodByName provides a mock function with given fields: namespace, name -func (_m *MockStatsProvider) GetPodByName(namespace string, name string) (*api.Pod, bool) { +func (_m *MockStatsProvider) GetPodByName(namespace string, name string) (*v1.Pod, bool) { ret := _m.Called(namespace, name) - var r0 *api.Pod - if rf, ok := ret.Get(0).(func(string, string) *api.Pod); ok { + var r0 *v1.Pod + if rf, ok := ret.Get(0).(func(string, string) *v1.Pod); ok { r0 = rf(namespace, name) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*api.Pod) + r0 = ret.Get(0).(*v1.Pod) } } @@ -126,15 +126,15 @@ func (_m *MockStatsProvider) GetPodByName(namespace string, name string) (*api.P } // GetNode provides a mock function with given fields: -func (_m *MockStatsProvider) GetNode() (*api.Node, error) { +func (_m *MockStatsProvider) GetNode() (*v1.Node, error) { ret := _m.Called() - var r0 *api.Node - if rf, ok := ret.Get(0).(func() *api.Node); ok { + var r0 *v1.Node + if rf, ok := ret.Get(0).(func() *v1.Node); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*api.Node) + r0 = ret.Get(0).(*v1.Node) } } @@ -228,15 +228,15 @@ func (_m *MockStatsProvider) ListVolumesForPod(podUID types.UID) (map[string]vol } // GetPods provides a mock function with given fields: -func (_m *MockStatsProvider) GetPods() []*api.Pod { +func (_m *MockStatsProvider) GetPods() []*v1.Pod { ret := _m.Called() - var r0 []*api.Pod - if rf, ok := ret.Get(0).(func() []*api.Pod); ok { + var r0 []*v1.Pod + if rf, ok := ret.Get(0).(func() []*v1.Pod); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*api.Pod) + r0 = ret.Get(0).([]*v1.Pod) } } diff --git a/pkg/kubelet/server/stats/summary.go b/pkg/kubelet/server/stats/summary.go index 9faa7ee5e6a..0ece3d2b9d9 100644 --- a/pkg/kubelet/server/stats/summary.go +++ b/pkg/kubelet/server/stats/summary.go @@ -21,8 +21,8 @@ import ( "strings" "time" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/container" @@ -100,7 +100,7 @@ func (sp *summaryProviderImpl) Get() (*stats.Summary, error) { // summaryBuilder aggregates the datastructures provided by cadvisor into a Summary result type summaryBuilder struct { fsResourceAnalyzer fsResourceAnalyzerInterface - node *api.Node + node *v1.Node nodeConfig cm.NodeConfig rootFsInfo cadvisorapiv2.FsInfo imageFsInfo cadvisorapiv2.FsInfo diff --git a/pkg/kubelet/server/stats/summary_test.go b/pkg/kubelet/server/stats/summary_test.go index 7f17170112a..92c52c304bc 100644 --- a/pkg/kubelet/server/stats/summary_test.go +++ b/pkg/kubelet/server/stats/summary_test.go @@ -25,8 +25,8 @@ import ( fuzz "github.com/google/gofuzz" "github.com/stretchr/testify/assert" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + k8sv1 "k8s.io/kubernetes/pkg/api/v1" kubestats "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/container" @@ -54,7 +54,7 @@ var ( ) func TestBuildSummary(t *testing.T) { - node := api.Node{} + node := k8sv1.Node{} node.Name = "FooNode" nodeConfig := cm.NodeConfig{ RuntimeCgroupsName: "/docker-daemon", diff --git a/pkg/kubelet/server/stats/volume_stat_calculator.go b/pkg/kubelet/server/stats/volume_stat_calculator.go index 9eb5b3921e9..ce7ab109f52 100644 --- a/pkg/kubelet/server/stats/volume_stat_calculator.go +++ b/pkg/kubelet/server/stats/volume_stat_calculator.go @@ -21,7 +21,7 @@ import ( "sync/atomic" "time" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/util/wait" @@ -34,7 +34,7 @@ import ( type volumeStatCalculator struct { statsProvider StatsProvider jitterPeriod time.Duration - pod *api.Pod + pod *v1.Pod stopChannel chan struct{} startO sync.Once stopO sync.Once @@ -47,7 +47,7 @@ type PodVolumeStats struct { } // newVolumeStatCalculator creates a new VolumeStatCalculator -func newVolumeStatCalculator(statsProvider StatsProvider, jitterPeriod time.Duration, pod *api.Pod) *volumeStatCalculator { +func newVolumeStatCalculator(statsProvider StatsProvider, jitterPeriod time.Duration, pod *v1.Pod) *volumeStatCalculator { return &volumeStatCalculator{ statsProvider: statsProvider, jitterPeriod: jitterPeriod, diff --git a/pkg/kubelet/status/generate.go b/pkg/kubelet/status/generate.go index aca89076e28..9826d9ac91a 100644 --- a/pkg/kubelet/status/generate.go +++ b/pkg/kubelet/status/generate.go @@ -20,24 +20,24 @@ import ( "fmt" "strings" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) // GeneratePodReadyCondition returns ready condition if all containers in a pod are ready, else it // returns an unready condition. -func GeneratePodReadyCondition(spec *api.PodSpec, containerStatuses []api.ContainerStatus, podPhase api.PodPhase) api.PodCondition { +func GeneratePodReadyCondition(spec *v1.PodSpec, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition { // Find if all containers are ready or not. if containerStatuses == nil { - return api.PodCondition{ - Type: api.PodReady, - Status: api.ConditionFalse, + return v1.PodCondition{ + Type: v1.PodReady, + Status: v1.ConditionFalse, Reason: "UnknownContainerStatuses", } } unknownContainers := []string{} unreadyContainers := []string{} for _, container := range spec.Containers { - if containerStatus, ok := api.GetContainerStatus(containerStatuses, container.Name); ok { + if containerStatus, ok := v1.GetContainerStatus(containerStatuses, container.Name); ok { if !containerStatus.Ready { unreadyContainers = append(unreadyContainers, container.Name) } @@ -47,10 +47,10 @@ func GeneratePodReadyCondition(spec *api.PodSpec, containerStatuses []api.Contai } // If all containers are known and succeeded, just return PodCompleted. - if podPhase == api.PodSucceeded && len(unknownContainers) == 0 { - return api.PodCondition{ - Type: api.PodReady, - Status: api.ConditionFalse, + if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 { + return v1.PodCondition{ + Type: v1.PodReady, + Status: v1.ConditionFalse, Reason: "PodCompleted", } } @@ -64,35 +64,35 @@ func GeneratePodReadyCondition(spec *api.PodSpec, containerStatuses []api.Contai } unreadyMessage := strings.Join(unreadyMessages, ", ") if unreadyMessage != "" { - return api.PodCondition{ - Type: api.PodReady, - Status: api.ConditionFalse, + return v1.PodCondition{ + Type: v1.PodReady, + Status: v1.ConditionFalse, Reason: "ContainersNotReady", Message: unreadyMessage, } } - return api.PodCondition{ - Type: api.PodReady, - Status: api.ConditionTrue, + return v1.PodCondition{ + Type: v1.PodReady, + Status: v1.ConditionTrue, } } // GeneratePodInitializedCondition returns initialized condition if all init containers in a pod are ready, else it // returns an uninitialized condition. -func GeneratePodInitializedCondition(spec *api.PodSpec, containerStatuses []api.ContainerStatus, podPhase api.PodPhase) api.PodCondition { +func GeneratePodInitializedCondition(spec *v1.PodSpec, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition { // Find if all containers are ready or not. if containerStatuses == nil && len(spec.InitContainers) > 0 { - return api.PodCondition{ - Type: api.PodInitialized, - Status: api.ConditionFalse, + return v1.PodCondition{ + Type: v1.PodInitialized, + Status: v1.ConditionFalse, Reason: "UnknownContainerStatuses", } } unknownContainers := []string{} unreadyContainers := []string{} for _, container := range spec.InitContainers { - if containerStatus, ok := api.GetContainerStatus(containerStatuses, container.Name); ok { + if containerStatus, ok := v1.GetContainerStatus(containerStatuses, container.Name); ok { if !containerStatus.Ready { unreadyContainers = append(unreadyContainers, container.Name) } @@ -102,10 +102,10 @@ func GeneratePodInitializedCondition(spec *api.PodSpec, containerStatuses []api. } // If all init containers are known and succeeded, just return PodCompleted. - if podPhase == api.PodSucceeded && len(unknownContainers) == 0 { - return api.PodCondition{ - Type: api.PodInitialized, - Status: api.ConditionTrue, + if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 { + return v1.PodCondition{ + Type: v1.PodInitialized, + Status: v1.ConditionTrue, Reason: "PodCompleted", } } @@ -119,16 +119,16 @@ func GeneratePodInitializedCondition(spec *api.PodSpec, containerStatuses []api. } unreadyMessage := strings.Join(unreadyMessages, ", ") if unreadyMessage != "" { - return api.PodCondition{ - Type: api.PodInitialized, - Status: api.ConditionFalse, + return v1.PodCondition{ + Type: v1.PodInitialized, + Status: v1.ConditionFalse, Reason: "ContainersNotInitialized", Message: unreadyMessage, } } - return api.PodCondition{ - Type: api.PodInitialized, - Status: api.ConditionTrue, + return v1.PodCondition{ + Type: v1.PodInitialized, + Status: v1.ConditionTrue, } } diff --git a/pkg/kubelet/status/generate_test.go b/pkg/kubelet/status/generate_test.go index 768e5898ae7..783c9b05344 100644 --- a/pkg/kubelet/status/generate_test.go +++ b/pkg/kubelet/status/generate_test.go @@ -20,89 +20,89 @@ import ( "reflect" "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) func TestGeneratePodReadyCondition(t *testing.T) { tests := []struct { - spec *api.PodSpec - containerStatuses []api.ContainerStatus - podPhase api.PodPhase - expected api.PodCondition + spec *v1.PodSpec + containerStatuses []v1.ContainerStatus + podPhase v1.PodPhase + expected v1.PodCondition }{ { spec: nil, containerStatuses: nil, - podPhase: api.PodRunning, + podPhase: v1.PodRunning, expected: getReadyCondition(false, "UnknownContainerStatuses", ""), }, { - spec: &api.PodSpec{}, - containerStatuses: []api.ContainerStatus{}, - podPhase: api.PodRunning, + spec: &v1.PodSpec{}, + containerStatuses: []v1.ContainerStatus{}, + podPhase: v1.PodRunning, expected: getReadyCondition(true, "", ""), }, { - spec: &api.PodSpec{ - Containers: []api.Container{ + spec: &v1.PodSpec{ + Containers: []v1.Container{ {Name: "1234"}, }, }, - containerStatuses: []api.ContainerStatus{}, - podPhase: api.PodRunning, + containerStatuses: []v1.ContainerStatus{}, + podPhase: v1.PodRunning, expected: getReadyCondition(false, "ContainersNotReady", "containers with unknown status: [1234]"), }, { - spec: &api.PodSpec{ - Containers: []api.Container{ + spec: &v1.PodSpec{ + Containers: []v1.Container{ {Name: "1234"}, {Name: "5678"}, }, }, - containerStatuses: []api.ContainerStatus{ + containerStatuses: []v1.ContainerStatus{ getReadyStatus("1234"), getReadyStatus("5678"), }, - podPhase: api.PodRunning, + podPhase: v1.PodRunning, expected: getReadyCondition(true, "", ""), }, { - spec: &api.PodSpec{ - Containers: []api.Container{ + spec: &v1.PodSpec{ + Containers: []v1.Container{ {Name: "1234"}, {Name: "5678"}, }, }, - containerStatuses: []api.ContainerStatus{ + containerStatuses: []v1.ContainerStatus{ getReadyStatus("1234"), }, - podPhase: api.PodRunning, + podPhase: v1.PodRunning, expected: getReadyCondition(false, "ContainersNotReady", "containers with unknown status: [5678]"), }, { - spec: &api.PodSpec{ - Containers: []api.Container{ + spec: &v1.PodSpec{ + Containers: []v1.Container{ {Name: "1234"}, {Name: "5678"}, }, }, - containerStatuses: []api.ContainerStatus{ + containerStatuses: []v1.ContainerStatus{ getReadyStatus("1234"), getNotReadyStatus("5678"), }, - podPhase: api.PodRunning, + podPhase: v1.PodRunning, expected: getReadyCondition(false, "ContainersNotReady", "containers with unready status: [5678]"), }, { - spec: &api.PodSpec{ - Containers: []api.Container{ + spec: &v1.PodSpec{ + Containers: []v1.Container{ {Name: "1234"}, }, }, - containerStatuses: []api.ContainerStatus{ + containerStatuses: []v1.ContainerStatus{ getNotReadyStatus("1234"), }, - podPhase: api.PodSucceeded, + podPhase: v1.PodSucceeded, expected: getReadyCondition(false, "PodCompleted", ""), }, } @@ -115,28 +115,28 @@ func TestGeneratePodReadyCondition(t *testing.T) { } } -func getReadyCondition(ready bool, reason, message string) api.PodCondition { - status := api.ConditionFalse +func getReadyCondition(ready bool, reason, message string) v1.PodCondition { + status := v1.ConditionFalse if ready { - status = api.ConditionTrue + status = v1.ConditionTrue } - return api.PodCondition{ - Type: api.PodReady, + return v1.PodCondition{ + Type: v1.PodReady, Status: status, Reason: reason, Message: message, } } -func getReadyStatus(cName string) api.ContainerStatus { - return api.ContainerStatus{ +func getReadyStatus(cName string) v1.ContainerStatus { + return v1.ContainerStatus{ Name: cName, Ready: true, } } -func getNotReadyStatus(cName string) api.ContainerStatus { - return api.ContainerStatus{ +func getNotReadyStatus(cName string) v1.ContainerStatus { + return v1.ContainerStatus{ Name: cName, Ready: false, } diff --git a/pkg/kubelet/status/status_manager.go b/pkg/kubelet/status/status_manager.go index 8c509e35f1f..be2ecf775f9 100644 --- a/pkg/kubelet/status/status_manager.go +++ b/pkg/kubelet/status/status_manager.go @@ -21,12 +21,14 @@ import ( "sync" "time" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubepod "k8s.io/kubernetes/pkg/kubelet/pod" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" @@ -36,10 +38,10 @@ import ( "k8s.io/kubernetes/pkg/util/wait" ) -// A wrapper around api.PodStatus that includes a version to enforce that stale pod statuses are +// A wrapper around v1.PodStatus that includes a version to enforce that stale pod statuses are // not sent to the API server. type versionedPodStatus struct { - status api.PodStatus + status v1.PodStatus // Monotonically increasing version number (per pod). version uint64 // Pod name & namespace, for sending updates to API server. @@ -71,11 +73,11 @@ type manager struct { type PodStatusProvider interface { // GetPodStatus returns the cached status for the provided pod UID, as well as whether it // was a cache hit. - GetPodStatus(uid types.UID) (api.PodStatus, bool) + GetPodStatus(uid types.UID) (v1.PodStatus, bool) } // Manager is the Source of truth for kubelet pod status, and should be kept up-to-date with -// the latest api.PodStatus. It also syncs updates back to the API server. +// the latest v1.PodStatus. It also syncs updates back to the API server. type Manager interface { PodStatusProvider @@ -83,7 +85,7 @@ type Manager interface { Start() // SetPodStatus caches updates the cached status for the given pod, and triggers a status update. - SetPodStatus(pod *api.Pod, status api.PodStatus) + SetPodStatus(pod *v1.Pod, status v1.PodStatus) // SetContainerReadiness updates the cached container status with the given readiness, and // triggers a status update. @@ -91,7 +93,7 @@ type Manager interface { // TerminatePod resets the container status for the provided pod to terminated and triggers // a status update. - TerminatePod(pod *api.Pod) + TerminatePod(pod *v1.Pod) // RemoveOrphanedStatuses scans the status cache and removes any entries for pods not included in // the provided podUIDs. @@ -113,8 +115,8 @@ func NewManager(kubeClient clientset.Interface, podManager kubepod.Manager) Mana // isStatusEqual returns true if the given pod statuses are equal, false otherwise. // This method normalizes the status before comparing so as to make sure that meaningless // changes will be ignored. -func isStatusEqual(oldStatus, status *api.PodStatus) bool { - return api.Semantic.DeepEqual(status, oldStatus) +func isStatusEqual(oldStatus, status *v1.PodStatus) bool { + return v1.Semantic.DeepEqual(status, oldStatus) } func (m *manager) Start() { @@ -139,14 +141,14 @@ func (m *manager) Start() { }, 0) } -func (m *manager) GetPodStatus(uid types.UID) (api.PodStatus, bool) { +func (m *manager) GetPodStatus(uid types.UID) (v1.PodStatus, bool) { m.podStatusesLock.RLock() defer m.podStatusesLock.RUnlock() status, ok := m.podStatuses[m.podManager.TranslatePodUID(uid)] return status.status, ok } -func (m *manager) SetPodStatus(pod *api.Pod, status api.PodStatus) { +func (m *manager) SetPodStatus(pod *v1.Pod, status v1.PodStatus) { m.podStatusesLock.Lock() defer m.podStatusesLock.Unlock() // Make sure we're caching a deep copy. @@ -202,7 +204,7 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai // Update pod condition. readyConditionIndex := -1 for i, condition := range status.Conditions { - if condition.Type == api.PodReady { + if condition.Type == v1.PodReady { readyConditionIndex = i break } @@ -218,7 +220,7 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai m.updateStatusInternal(pod, status, false) } -func findContainerStatus(status *api.PodStatus, containerID string) (containerStatus *api.ContainerStatus, init bool, ok bool) { +func findContainerStatus(status *v1.PodStatus, containerID string) (containerStatus *v1.ContainerStatus, init bool, ok bool) { // Find the container to update. for i, c := range status.ContainerStatuses { if c.ContainerID == containerID { @@ -236,7 +238,7 @@ func findContainerStatus(status *api.PodStatus, containerID string) (containerSt } -func (m *manager) TerminatePod(pod *api.Pod) { +func (m *manager) TerminatePod(pod *v1.Pod) { m.podStatusesLock.Lock() defer m.podStatusesLock.Unlock() oldStatus := &pod.Status @@ -248,13 +250,13 @@ func (m *manager) TerminatePod(pod *api.Pod) { return } for i := range status.ContainerStatuses { - status.ContainerStatuses[i].State = api.ContainerState{ - Terminated: &api.ContainerStateTerminated{}, + status.ContainerStatuses[i].State = v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{}, } } for i := range status.InitContainerStatuses { - status.InitContainerStatuses[i].State = api.ContainerState{ - Terminated: &api.ContainerStateTerminated{}, + status.InitContainerStatuses[i].State = v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{}, } } m.updateStatusInternal(pod, pod.Status, true) @@ -263,8 +265,8 @@ func (m *manager) TerminatePod(pod *api.Pod) { // updateStatusInternal updates the internal status cache, and queues an update to the api server if // necessary. Returns whether an update was triggered. // This method IS NOT THREAD SAFE and must be called from a locked function. -func (m *manager) updateStatusInternal(pod *api.Pod, status api.PodStatus, forceUpdate bool) bool { - var oldStatus api.PodStatus +func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUpdate bool) bool { + var oldStatus v1.PodStatus cachedStatus, isCached := m.podStatuses[pod.UID] if isCached { oldStatus = cachedStatus.status @@ -275,10 +277,10 @@ func (m *manager) updateStatusInternal(pod *api.Pod, status api.PodStatus, force } // Set ReadyCondition.LastTransitionTime. - if _, readyCondition := api.GetPodCondition(&status, api.PodReady); readyCondition != nil { + if _, readyCondition := v1.GetPodCondition(&status, v1.PodReady); readyCondition != nil { // Need to set LastTransitionTime. lastTransitionTime := unversioned.Now() - _, oldReadyCondition := api.GetPodCondition(&oldStatus, api.PodReady) + _, oldReadyCondition := v1.GetPodCondition(&oldStatus, v1.PodReady) if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status { lastTransitionTime = oldReadyCondition.LastTransitionTime } @@ -286,10 +288,10 @@ func (m *manager) updateStatusInternal(pod *api.Pod, status api.PodStatus, force } // Set InitializedCondition.LastTransitionTime. - if _, initCondition := api.GetPodCondition(&status, api.PodInitialized); initCondition != nil { + if _, initCondition := v1.GetPodCondition(&status, v1.PodInitialized); initCondition != nil { // Need to set LastTransitionTime. lastTransitionTime := unversioned.Now() - _, oldInitCondition := api.GetPodCondition(&oldStatus, api.PodInitialized) + _, oldInitCondition := v1.GetPodCondition(&oldStatus, v1.PodInitialized) if oldInitCondition != nil && initCondition.Status == oldInitCondition.Status { lastTransitionTime = oldInitCondition.LastTransitionTime } @@ -419,6 +421,9 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { return } pod.Status = status.status + if err := podutil.SetInitContainersStatusesAnnotations(pod); err != nil { + glog.Error(err) + } // TODO: handle conflict as a retry, make that easier too. pod, err = m.kubeClient.Core().Pods(pod.Namespace).UpdateStatus(pod) if err == nil { @@ -435,10 +440,9 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { glog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod)) return } - deleteOptions := api.NewDeleteOptions(0) + deleteOptions := v1.NewDeleteOptions(0) // Use the pod UID as the precondition for deletion to prevent deleting a newly created pod with the same name and namespace. - deleteOptions.Preconditions = api.NewUIDPreconditions(string(pod.UID)) - glog.V(2).Infof("Removing Pod %q from etcd", format.Pod(pod)) + deleteOptions.Preconditions = v1.NewUIDPreconditions(string(pod.UID)) if err = m.kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, deleteOptions); err == nil { glog.V(3).Infof("Pod %q fully terminated and removed from etcd", format.Pod(pod)) m.deletePodStatus(uid) @@ -467,7 +471,7 @@ func (m *manager) needsUpdate(uid types.UID, status versionedPodStatus) bool { // now the pod manager only supports getting mirror pod by static pod, so we have to pass // static pod uid here. // TODO(random-liu): Simplify the logic when mirror pod manager is added. -func (m *manager) needsReconcile(uid types.UID, status api.PodStatus) bool { +func (m *manager) needsReconcile(uid types.UID, status v1.PodStatus) bool { // The pod could be a static pod, so we should translate first. pod, ok := m.podManager.GetPodByUID(uid) if !ok { @@ -508,11 +512,11 @@ func (m *manager) needsReconcile(uid types.UID, status api.PodStatus) bool { // In fact, the best way to solve this is to do it on api side. However, for now, we normalize the status locally in // kubelet temporarily. // TODO(random-liu): Remove timestamp related logic after apiserver supports nanosecond or makes it consistent. -func normalizeStatus(pod *api.Pod, status *api.PodStatus) *api.PodStatus { +func normalizeStatus(pod *v1.Pod, status *v1.PodStatus) *v1.PodStatus { normalizeTimeStamp := func(t *unversioned.Time) { *t = t.Rfc3339Copy() } - normalizeContainerState := func(c *api.ContainerState) { + normalizeContainerState := func(c *v1.ContainerState) { if c.Running != nil { normalizeTimeStamp(&c.Running.StartedAt) } @@ -553,7 +557,7 @@ func normalizeStatus(pod *api.Pod, status *api.PodStatus) *api.PodStatus { // notRunning returns true if every status is terminated or waiting, or the status list // is empty. -func notRunning(statuses []api.ContainerStatus) bool { +func notRunning(statuses []v1.ContainerStatus) bool { for _, status := range statuses { if status.State.Terminated == nil && status.State.Waiting == nil { return false @@ -562,12 +566,12 @@ func notRunning(statuses []api.ContainerStatus) bool { return true } -func copyStatus(source *api.PodStatus) (api.PodStatus, error) { +func copyStatus(source *v1.PodStatus) (v1.PodStatus, error) { clone, err := api.Scheme.DeepCopy(source) if err != nil { glog.Errorf("Failed to clone status %+v: %v", source, err) - return api.PodStatus{}, err + return v1.PodStatus{}, err } - status := *clone.(*api.PodStatus) + status := *clone.(*v1.PodStatus) return status, nil } diff --git a/pkg/kubelet/status/status_manager_test.go b/pkg/kubelet/status/status_manager_test.go index 1587eea4570..3394a53cbe3 100644 --- a/pkg/kubelet/status/status_manager_test.go +++ b/pkg/kubelet/status/status_manager_test.go @@ -23,8 +23,8 @@ import ( "testing" "time" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" + "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" "k8s.io/kubernetes/pkg/client/testing/core" "github.com/stretchr/testify/assert" @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubepod "k8s.io/kubernetes/pkg/kubelet/pod" podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing" @@ -40,9 +41,9 @@ import ( ) // Generate new instance of test pod with the same initial value. -func getTestPod() *api.Pod { - return &api.Pod{ - ObjectMeta: api.ObjectMeta{ +func getTestPod() *v1.Pod { + return &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", @@ -78,8 +79,8 @@ func generateRandomMessage() string { return strconv.Itoa(rand.Int()) } -func getRandomPodStatus() api.PodStatus { - return api.PodStatus{ +func getRandomPodStatus() v1.PodStatus { + return v1.PodStatus{ Message: generateRandomMessage(), } } @@ -135,13 +136,13 @@ func TestNewStatus(t *testing.T) { func TestNewStatusPreservesPodStartTime(t *testing.T) { syncer := newTestManager(&fake.Clientset{}) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, - Status: api.PodStatus{}, + Status: v1.PodStatus{}, } now := unversioned.Now() startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute)) @@ -154,12 +155,12 @@ func TestNewStatusPreservesPodStartTime(t *testing.T) { } } -func getReadyPodStatus() api.PodStatus { - return api.PodStatus{ - Conditions: []api.PodCondition{ +func getReadyPodStatus() v1.PodStatus { + return v1.PodStatus{ + Conditions: []v1.PodCondition{ { - Type: api.PodReady, - Status: api.ConditionTrue, + Type: v1.PodReady, + Status: v1.ConditionTrue, }, }, } @@ -168,18 +169,18 @@ func getReadyPodStatus() api.PodStatus { func TestNewStatusSetsReadyTransitionTime(t *testing.T) { syncer := newTestManager(&fake.Clientset{}) podStatus := getReadyPodStatus() - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, - Status: api.PodStatus{}, + Status: v1.PodStatus{}, } syncer.SetPodStatus(pod, podStatus) verifyUpdates(t, syncer, 1) status := expectPodStatus(t, syncer, pod) - readyCondition := api.GetPodReadyCondition(status) + readyCondition := v1.GetPodReadyCondition(status) if readyCondition.LastTransitionTime.IsZero() { t.Errorf("Unexpected: last transition time not set") } @@ -215,25 +216,25 @@ func TestChangedStatusKeepsStartTime(t *testing.T) { func TestChangedStatusUpdatesLastTransitionTime(t *testing.T) { syncer := newTestManager(&fake.Clientset{}) podStatus := getReadyPodStatus() - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, - Status: api.PodStatus{}, + Status: v1.PodStatus{}, } syncer.SetPodStatus(pod, podStatus) verifyUpdates(t, syncer, 1) oldStatus := expectPodStatus(t, syncer, pod) anotherStatus := getReadyPodStatus() - anotherStatus.Conditions[0].Status = api.ConditionFalse + anotherStatus.Conditions[0].Status = v1.ConditionFalse syncer.SetPodStatus(pod, anotherStatus) verifyUpdates(t, syncer, 1) newStatus := expectPodStatus(t, syncer, pod) - oldReadyCondition := api.GetPodReadyCondition(oldStatus) - newReadyCondition := api.GetPodReadyCondition(newStatus) + oldReadyCondition := v1.GetPodReadyCondition(oldStatus) + newReadyCondition := v1.GetPodReadyCondition(newStatus) if newReadyCondition.LastTransitionTime.IsZero() { t.Errorf("Unexpected: last transition time not set") } @@ -254,13 +255,13 @@ func TestUnchangedStatus(t *testing.T) { func TestUnchangedStatusPreservesLastTransitionTime(t *testing.T) { syncer := newTestManager(&fake.Clientset{}) podStatus := getReadyPodStatus() - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, - Status: api.PodStatus{}, + Status: v1.PodStatus{}, } syncer.SetPodStatus(pod, podStatus) verifyUpdates(t, syncer, 1) @@ -271,8 +272,8 @@ func TestUnchangedStatusPreservesLastTransitionTime(t *testing.T) { verifyUpdates(t, syncer, 0) newStatus := expectPodStatus(t, syncer, pod) - oldReadyCondition := api.GetPodReadyCondition(oldStatus) - newReadyCondition := api.GetPodReadyCondition(newStatus) + oldReadyCondition := v1.GetPodReadyCondition(oldStatus) + newReadyCondition := v1.GetPodReadyCondition(newStatus) if newReadyCondition.LastTransitionTime.IsZero() { t.Errorf("Unexpected: last transition time not set") } @@ -330,21 +331,21 @@ func TestSyncBatchNoDeadlock(t *testing.T) { pod := getTestPod() // Setup fake client. - var ret api.Pod + var ret v1.Pod var err error client.AddReactor("*", "pods", func(action core.Action) (bool, runtime.Object, error) { switch action := action.(type) { case core.GetAction: assert.Equal(t, pod.Name, action.GetName(), "Unexpeted GetAction: %+v", action) case core.UpdateAction: - assert.Equal(t, pod.Name, action.GetObject().(*api.Pod).Name, "Unexpeted UpdateAction: %+v", action) + assert.Equal(t, pod.Name, action.GetObject().(*v1.Pod).Name, "Unexpeted UpdateAction: %+v", action) default: assert.Fail(t, "Unexpected Action: %+v", action) } return true, &ret, err }) - pod.Status.ContainerStatuses = []api.ContainerStatus{{State: api.ContainerState{Running: &api.ContainerStateRunning{}}}} + pod.Status.ContainerStatuses = []v1.ContainerStatus{{State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}}} getAction := core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: unversioned.GroupVersionResource{Resource: "pods"}}} updateAction := core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: unversioned.GroupVersionResource{Resource: "pods"}, Subresource: "status"}} @@ -381,7 +382,7 @@ func TestSyncBatchNoDeadlock(t *testing.T) { // Pod is terminated successfully. pod.Status.ContainerStatuses[0].State.Running = nil - pod.Status.ContainerStatuses[0].State.Terminated = &api.ContainerStateTerminated{} + pod.Status.ContainerStatuses[0].State.Terminated = &v1.ContainerStateTerminated{} m.SetPodStatus(pod, getRandomPodStatus()) m.testSyncBatch() verifyActions(t, client, []core.Action{getAction, updateAction}) @@ -400,7 +401,7 @@ func TestStaleUpdates(t *testing.T) { client := fake.NewSimpleClientset(pod) m := newTestManager(client) - status := api.PodStatus{Message: "initial status"} + status := v1.PodStatus{Message: "initial status"} m.SetPodStatus(pod, status) status.Message = "first version bump" m.SetPodStatus(pod, status) @@ -441,10 +442,10 @@ func TestStaleUpdates(t *testing.T) { } // shuffle returns a new shuffled list of container statuses. -func shuffle(statuses []api.ContainerStatus) []api.ContainerStatus { +func shuffle(statuses []v1.ContainerStatus) []v1.ContainerStatus { numStatuses := len(statuses) randIndexes := rand.Perm(numStatuses) - shuffled := make([]api.ContainerStatus, numStatuses) + shuffled := make([]v1.ContainerStatus, numStatuses) for i := 0; i < numStatuses; i++ { shuffled[i] = statuses[randIndexes[i]] } @@ -452,21 +453,21 @@ func shuffle(statuses []api.ContainerStatus) []api.ContainerStatus { } func TestStatusEquality(t *testing.T) { - pod := api.Pod{ - Spec: api.PodSpec{}, + pod := v1.Pod{ + Spec: v1.PodSpec{}, } - containerStatus := []api.ContainerStatus{} + containerStatus := []v1.ContainerStatus{} for i := 0; i < 10; i++ { - s := api.ContainerStatus{ + s := v1.ContainerStatus{ Name: fmt.Sprintf("container%d", i), } containerStatus = append(containerStatus, s) } - podStatus := api.PodStatus{ + podStatus := v1.PodStatus{ ContainerStatuses: containerStatus, } for i := 0; i < 10; i++ { - oldPodStatus := api.PodStatus{ + oldPodStatus := v1.PodStatus{ ContainerStatuses: shuffle(podStatus.ContainerStatuses), } normalizeStatus(&pod, &oldPodStatus) @@ -524,7 +525,7 @@ func TestStaticPod(t *testing.T) { core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: unversioned.GroupVersionResource{Resource: "pods"}, Subresource: "status"}}, }) updateAction := client.Actions()[1].(core.UpdateActionImpl) - updatedPod := updateAction.Object.(*api.Pod) + updatedPod := updateAction.Object.(*v1.Pod) assert.Equal(t, mirrorPod.UID, updatedPod.UID, "Expected mirrorPod (%q), but got %q", mirrorPod.UID, updatedPod.UID) assert.True(t, isStatusEqual(&status, &updatedPod.Status), "Expected: %+v, Got: %+v", status, updatedPod.Status) client.ClearActions() @@ -536,7 +537,7 @@ func TestStaticPod(t *testing.T) { // Change mirror pod identity. m.podManager.DeletePod(mirrorPod) mirrorPod.UID = "new-mirror-pod" - mirrorPod.Status = api.PodStatus{} + mirrorPod.Status = v1.PodStatus{} m.podManager.AddPod(mirrorPod) // Should not update to mirror pod, because UID has changed. @@ -549,7 +550,7 @@ func TestStaticPod(t *testing.T) { func TestSetContainerReadiness(t *testing.T) { cID1 := kubecontainer.ContainerID{Type: "test", ID: "1"} cID2 := kubecontainer.ContainerID{Type: "test", ID: "2"} - containerStatuses := []api.ContainerStatus{ + containerStatuses := []v1.ContainerStatus{ { Name: "c1", ContainerID: cID1.String(), @@ -560,18 +561,18 @@ func TestSetContainerReadiness(t *testing.T) { Ready: false, }, } - status := api.PodStatus{ + status := v1.PodStatus{ ContainerStatuses: containerStatuses, - Conditions: []api.PodCondition{{ - Type: api.PodReady, - Status: api.ConditionFalse, + Conditions: []v1.PodCondition{{ + Type: v1.PodReady, + Status: v1.ConditionFalse, }}, } pod := getTestPod() - pod.Spec.Containers = []api.Container{{Name: "c1"}, {Name: "c2"}} + pod.Spec.Containers = []v1.Container{{Name: "c1"}, {Name: "c2"}} // Verify expected readiness of containers & pod. - verifyReadiness := func(step string, status *api.PodStatus, c1Ready, c2Ready, podReady bool) { + verifyReadiness := func(step string, status *v1.PodStatus, c1Ready, c2Ready, podReady bool) { for _, c := range status.ContainerStatuses { switch c.ContainerID { case cID1.String(): @@ -586,9 +587,9 @@ func TestSetContainerReadiness(t *testing.T) { t.Fatalf("[%s] Unexpected container: %+v", step, c) } } - if status.Conditions[0].Type != api.PodReady { + if status.Conditions[0].Type != v1.PodReady { t.Fatalf("[%s] Unexpected condition: %+v", step, status.Conditions[0]) - } else if ready := (status.Conditions[0].Status == api.ConditionTrue); ready != podReady { + } else if ready := (status.Conditions[0].Status == v1.ConditionTrue); ready != podReady { t.Errorf("[%s] Expected readiness of pod to be %v but was %v", step, podReady, ready) } } @@ -727,7 +728,7 @@ func TestReconcilePodStatus(t *testing.T) { }) } -func expectPodStatus(t *testing.T, m *manager, pod *api.Pod) api.PodStatus { +func expectPodStatus(t *testing.T, m *manager, pod *v1.Pod) v1.PodStatus { status, ok := m.GetPodStatus(pod.UID) if !ok { t.Fatalf("Expected PodStatus for %q not found", pod.UID) diff --git a/pkg/kubelet/sysctl/runtime.go b/pkg/kubelet/sysctl/runtime.go index b72753e6932..727919021ed 100644 --- a/pkg/kubelet/sysctl/runtime.go +++ b/pkg/kubelet/sysctl/runtime.go @@ -19,7 +19,7 @@ package sysctl import ( "fmt" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/kubelet/lifecycle" @@ -81,7 +81,7 @@ func NewRuntimeAdmitHandler(runtime container.Runtime) (*runtimeAdmitHandler, er // Admit checks whether the runtime supports sysctls. func (w *runtimeAdmitHandler) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult { - sysctls, unsafeSysctls, err := api.SysctlsFromPodAnnotations(attrs.Pod.Annotations) + sysctls, unsafeSysctls, err := v1.SysctlsFromPodAnnotations(attrs.Pod.Annotations) if err != nil { return lifecycle.PodAdmitResult{ Admit: false, diff --git a/pkg/kubelet/sysctl/whitelist.go b/pkg/kubelet/sysctl/whitelist.go index 5a5b608d917..7626a3f5d3b 100644 --- a/pkg/kubelet/sysctl/whitelist.go +++ b/pkg/kubelet/sysctl/whitelist.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/validation" extvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation" "k8s.io/kubernetes/pkg/kubelet/lifecycle" @@ -47,9 +47,9 @@ func SafeSysctlWhitelist() []string { // Whitelist provides a list of allowed sysctls and sysctl patterns (ending in *) // and a function to check whether a given sysctl matches this list. type Whitelist interface { - // Validate checks that all sysctls given in a api.SysctlsPodAnnotationKey annotation + // Validate checks that all sysctls given in a v1.SysctlsPodAnnotationKey annotation // are valid according to the whitelist. - Validate(pod *api.Pod) error + Validate(pod *v1.Pod) error } // patternWhitelist takes a list of sysctls or sysctl patterns (ending in *) and @@ -129,7 +129,7 @@ func (w *patternWhitelist) validateSysctl(sysctl string, hostNet, hostIPC bool) return fmt.Errorf("%q not whitelisted", sysctl) } -// Admit checks that all sysctls given in a api.SysctlsPodAnnotationKey annotation +// Admit checks that all sysctls given in a v1.SysctlsPodAnnotationKey annotation // are valid according to the whitelist. func (w *patternWhitelist) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult { pod := attrs.Pod @@ -140,7 +140,7 @@ func (w *patternWhitelist) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle. } } - sysctls, err := api.SysctlsFromPodAnnotation(a) + sysctls, err := v1.SysctlsFromPodAnnotation(a) if err != nil { return lifecycle.PodAdmitResult{ Admit: false, @@ -151,8 +151,8 @@ func (w *patternWhitelist) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle. var hostNet, hostIPC bool if pod.Spec.SecurityContext != nil { - hostNet = pod.Spec.SecurityContext.HostNetwork - hostIPC = pod.Spec.SecurityContext.HostIPC + hostNet = pod.Spec.HostNetwork + hostIPC = pod.Spec.HostIPC } for _, s := range sysctls { if err := w.validateSysctl(s.Name, hostNet, hostIPC); err != nil { diff --git a/pkg/kubelet/sysctl/whitelist_test.go b/pkg/kubelet/sysctl/whitelist_test.go index 27d3728e099..ea6a54c78b7 100644 --- a/pkg/kubelet/sysctl/whitelist_test.go +++ b/pkg/kubelet/sysctl/whitelist_test.go @@ -19,7 +19,7 @@ package sysctl import ( "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) func TestNewWhitelist(t *testing.T) { @@ -35,7 +35,7 @@ func TestNewWhitelist(t *testing.T) { {sysctls: []string{"net.*.foo"}, err: true}, {sysctls: []string{"foo"}, err: true}, } { - _, err := NewWhitelist(append(SafeSysctlWhitelist(), test.sysctls...), api.SysctlsPodAnnotationKey) + _, err := NewWhitelist(append(SafeSysctlWhitelist(), test.sysctls...), v1.SysctlsPodAnnotationKey) if test.err && err == nil { t.Errorf("expected an error creating a whitelist for %v", test.sysctls) } else if !test.err && err != nil { @@ -65,7 +65,7 @@ func TestWhitelist(t *testing.T) { {sysctl: "kernel.sem", hostIPC: true}, } - w, err := NewWhitelist(append(SafeSysctlWhitelist(), "kernel.msg*", "kernel.sem"), api.SysctlsPodAnnotationKey) + w, err := NewWhitelist(append(SafeSysctlWhitelist(), "kernel.msg*", "kernel.sem"), v1.SysctlsPodAnnotationKey) if err != nil { t.Fatalf("failed to create whitelist: %v", err) } diff --git a/pkg/kubelet/types/pod_update.go b/pkg/kubelet/types/pod_update.go index e98489df6b1..e560a9b91f6 100644 --- a/pkg/kubelet/types/pod_update.go +++ b/pkg/kubelet/types/pod_update.go @@ -19,7 +19,7 @@ package types import ( "fmt" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) const ConfigSourceAnnotationKey = "kubernetes.io/config.source" @@ -55,7 +55,7 @@ const ( // Updates from all sources AllSource = "*" - NamespaceDefault = api.NamespaceDefault + NamespaceDefault = v1.NamespaceDefault ) // PodUpdate defines an operation sent on the channel. You can add or remove single services by @@ -68,7 +68,7 @@ const ( // functionally similar, this helps our unit tests properly check that the correct PodUpdates // are generated. type PodUpdate struct { - Pods []*api.Pod + Pods []*v1.Pod Op PodOperation Source string } @@ -93,7 +93,7 @@ func GetValidatedSources(sources []string) ([]string, error) { } // GetPodSource returns the source of the pod based on the annotation. -func GetPodSource(pod *api.Pod) (string, error) { +func GetPodSource(pod *v1.Pod) (string, error) { if pod.Annotations != nil { if source, ok := pod.Annotations[ConfigSourceAnnotationKey]; ok { return source, nil diff --git a/pkg/kubelet/types/types.go b/pkg/kubelet/types/types.go index 017c3c8cb31..35359c7aa6b 100644 --- a/pkg/kubelet/types/types.go +++ b/pkg/kubelet/types/types.go @@ -20,7 +20,7 @@ import ( "net/http" "time" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) // TODO: Reconcile custom types in kubelet/types and this subpackage @@ -59,7 +59,7 @@ func (t *Timestamp) GetString() string { } // A type to help sort container statuses based on container names. -type SortedContainerStatuses []api.ContainerStatus +type SortedContainerStatuses []v1.ContainerStatus func (s SortedContainerStatuses) Len() int { return len(s) } func (s SortedContainerStatuses) Swap(i, j int) { s[i], s[j] = s[j], s[i] } @@ -70,7 +70,7 @@ func (s SortedContainerStatuses) Less(i, j int) bool { // SortInitContainerStatuses ensures that statuses are in the order that their // init container appears in the pod spec -func SortInitContainerStatuses(p *api.Pod, statuses []api.ContainerStatus) { +func SortInitContainerStatuses(p *v1.Pod, statuses []v1.ContainerStatus) { containers := p.Spec.InitContainers current := 0 for _, container := range containers { @@ -87,7 +87,7 @@ func SortInitContainerStatuses(p *api.Pod, statuses []api.ContainerStatus) { // Reservation represents reserved resources for non-pod components. type Reservation struct { // System represents resources reserved for non-kubernetes components. - System api.ResourceList + System v1.ResourceList // Kubernetes represents resources reserved for kubernetes system components. - Kubernetes api.ResourceList + Kubernetes v1.ResourceList } diff --git a/pkg/kubelet/types/types_test.go b/pkg/kubelet/types/types_test.go index b7476b81269..9bb9acbea8d 100644 --- a/pkg/kubelet/types/types_test.go +++ b/pkg/kubelet/types/types_test.go @@ -20,37 +20,37 @@ import ( "reflect" "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) func TestSortInitContainerStatuses(t *testing.T) { - pod := api.Pod{ - Spec: api.PodSpec{}, + pod := v1.Pod{ + Spec: v1.PodSpec{}, } var cases = []struct { - containers []api.Container - statuses []api.ContainerStatus - sortedStatuses []api.ContainerStatus + containers []v1.Container + statuses []v1.ContainerStatus + sortedStatuses []v1.ContainerStatus }{ { - containers: []api.Container{{Name: "first"}, {Name: "second"}, {Name: "third"}, {Name: "fourth"}}, - statuses: []api.ContainerStatus{{Name: "first"}, {Name: "second"}, {Name: "third"}, {Name: "fourth"}}, - sortedStatuses: []api.ContainerStatus{{Name: "first"}, {Name: "second"}, {Name: "third"}, {Name: "fourth"}}, + containers: []v1.Container{{Name: "first"}, {Name: "second"}, {Name: "third"}, {Name: "fourth"}}, + statuses: []v1.ContainerStatus{{Name: "first"}, {Name: "second"}, {Name: "third"}, {Name: "fourth"}}, + sortedStatuses: []v1.ContainerStatus{{Name: "first"}, {Name: "second"}, {Name: "third"}, {Name: "fourth"}}, }, { - containers: []api.Container{{Name: "first"}, {Name: "second"}, {Name: "third"}, {Name: "fourth"}}, - statuses: []api.ContainerStatus{{Name: "second"}, {Name: "first"}, {Name: "fourth"}, {Name: "third"}}, - sortedStatuses: []api.ContainerStatus{{Name: "first"}, {Name: "second"}, {Name: "third"}, {Name: "fourth"}}, + containers: []v1.Container{{Name: "first"}, {Name: "second"}, {Name: "third"}, {Name: "fourth"}}, + statuses: []v1.ContainerStatus{{Name: "second"}, {Name: "first"}, {Name: "fourth"}, {Name: "third"}}, + sortedStatuses: []v1.ContainerStatus{{Name: "first"}, {Name: "second"}, {Name: "third"}, {Name: "fourth"}}, }, { - containers: []api.Container{{Name: "first"}, {Name: "second"}, {Name: "third"}, {Name: "fourth"}}, - statuses: []api.ContainerStatus{{Name: "fourth"}, {Name: "first"}}, - sortedStatuses: []api.ContainerStatus{{Name: "first"}, {Name: "fourth"}}, + containers: []v1.Container{{Name: "first"}, {Name: "second"}, {Name: "third"}, {Name: "fourth"}}, + statuses: []v1.ContainerStatus{{Name: "fourth"}, {Name: "first"}}, + sortedStatuses: []v1.ContainerStatus{{Name: "first"}, {Name: "fourth"}}, }, { - containers: []api.Container{{Name: "first"}, {Name: "second"}, {Name: "third"}, {Name: "fourth"}}, - statuses: []api.ContainerStatus{{Name: "first"}, {Name: "third"}}, - sortedStatuses: []api.ContainerStatus{{Name: "first"}, {Name: "third"}}, + containers: []v1.Container{{Name: "first"}, {Name: "second"}, {Name: "third"}, {Name: "fourth"}}, + statuses: []v1.ContainerStatus{{Name: "first"}, {Name: "third"}}, + sortedStatuses: []v1.ContainerStatus{{Name: "first"}, {Name: "third"}}, }, } for _, data := range cases { diff --git a/pkg/kubelet/util.go b/pkg/kubelet/util.go index 20afbf30099..f58b2beecc4 100644 --- a/pkg/kubelet/util.go +++ b/pkg/kubelet/util.go @@ -20,14 +20,14 @@ import ( "fmt" "os" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/capabilities" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/securitycontext" ) // Check whether we have the capabilities to run the specified pod. -func canRunPod(pod *api.Pod) error { +func canRunPod(pod *v1.Pod) error { if !capabilities.Get().AllowPrivileged { for _, container := range pod.Spec.Containers { if securitycontext.HasPrivilegedRequest(&container) { @@ -41,11 +41,7 @@ func canRunPod(pod *api.Pod) error { } } - if pod.Spec.SecurityContext == nil { - return nil - } - - if pod.Spec.SecurityContext.HostNetwork { + if pod.Spec.HostNetwork { allowed, err := allowHostNetwork(pod) if err != nil { return err @@ -55,7 +51,7 @@ func canRunPod(pod *api.Pod) error { } } - if pod.Spec.SecurityContext.HostPID { + if pod.Spec.HostPID { allowed, err := allowHostPID(pod) if err != nil { return err @@ -65,7 +61,7 @@ func canRunPod(pod *api.Pod) error { } } - if pod.Spec.SecurityContext.HostIPC { + if pod.Spec.HostIPC { allowed, err := allowHostIPC(pod) if err != nil { return err @@ -79,7 +75,7 @@ func canRunPod(pod *api.Pod) error { } // Determined whether the specified pod is allowed to use host networking -func allowHostNetwork(pod *api.Pod) (bool, error) { +func allowHostNetwork(pod *v1.Pod) (bool, error) { podSource, err := kubetypes.GetPodSource(pod) if err != nil { return false, err @@ -93,7 +89,7 @@ func allowHostNetwork(pod *api.Pod) (bool, error) { } // Determined whether the specified pod is allowed to use host networking -func allowHostPID(pod *api.Pod) (bool, error) { +func allowHostPID(pod *v1.Pod) (bool, error) { podSource, err := kubetypes.GetPodSource(pod) if err != nil { return false, err @@ -107,7 +103,7 @@ func allowHostPID(pod *api.Pod) (bool, error) { } // Determined whether the specified pod is allowed to use host ipc -func allowHostIPC(pod *api.Pod) (bool, error) { +func allowHostIPC(pod *v1.Pod) (bool, error) { podSource, err := kubetypes.GetPodSource(pod) if err != nil { return false, err diff --git a/pkg/kubelet/util/csr/csr.go b/pkg/kubelet/util/csr/csr.go index e68576fe093..1fc43ff4795 100644 --- a/pkg/kubelet/util/csr/csr.go +++ b/pkg/kubelet/util/csr/csr.go @@ -20,10 +20,10 @@ import ( "crypto/x509/pkix" "fmt" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/certificates" - unversionedcertificates "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion" + "k8s.io/kubernetes/pkg/api/v1" + certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1" + unversionedcertificates "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/certificates/v1alpha1" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/types" certutil "k8s.io/kubernetes/pkg/util/cert" @@ -52,7 +52,7 @@ func RequestNodeCertificate(client unversionedcertificates.CertificateSigningReq req, err := client.Create(&certificates.CertificateSigningRequest{ // Username, UID, Groups will be injected by API server. TypeMeta: unversioned.TypeMeta{Kind: "CertificateSigningRequest"}, - ObjectMeta: api.ObjectMeta{GenerateName: "csr-"}, + ObjectMeta: v1.ObjectMeta{GenerateName: "csr-"}, // TODO: For now, this is a request for a certificate with allowed usage of "TLS Web Client Authentication". // Need to figure out whether/how to surface the allowed usage in the spec. @@ -65,10 +65,10 @@ func RequestNodeCertificate(client unversionedcertificates.CertificateSigningReq // Make a default timeout = 3600s. var defaultTimeoutSeconds int64 = 3600 - resultCh, err := client.Watch(api.ListOptions{ + resultCh, err := client.Watch(v1.ListOptions{ Watch: true, TimeoutSeconds: &defaultTimeoutSeconds, - FieldSelector: fields.OneTermEqualSelector("metadata.name", req.Name), + FieldSelector: fields.OneTermEqualSelector("metadata.name", req.Name).String(), }) if err != nil { return nil, fmt.Errorf("cannot watch on the certificate signing request: %v", err) diff --git a/pkg/kubelet/util/format/pod.go b/pkg/kubelet/util/format/pod.go index 13bfc764430..16092f063e2 100644 --- a/pkg/kubelet/util/format/pod.go +++ b/pkg/kubelet/util/format/pod.go @@ -21,15 +21,15 @@ import ( "strings" "time" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/types" ) -type podHandler func(*api.Pod) string +type podHandler func(*v1.Pod) string // Pod returns a string representing a pod in a consistent human readable format, // with pod UID as part of the string. -func Pod(pod *api.Pod) string { +func Pod(pod *v1.Pod) string { return PodDesc(pod.Name, pod.Namespace, pod.UID) } @@ -43,7 +43,7 @@ func PodDesc(podName, podNamespace string, podUID types.UID) string { // PodWithDeletionTimestamp is the same as Pod. In addition, it prints the // deletion timestamp of the pod if it's not nil. -func PodWithDeletionTimestamp(pod *api.Pod) string { +func PodWithDeletionTimestamp(pod *v1.Pod) string { var deletionTimestamp string if pod.DeletionTimestamp != nil { deletionTimestamp = ":DeletionTimestamp=" + pod.DeletionTimestamp.UTC().Format(time.RFC3339) @@ -53,17 +53,17 @@ func PodWithDeletionTimestamp(pod *api.Pod) string { // Pods returns a string representating a list of pods in a human // readable format. -func Pods(pods []*api.Pod) string { +func Pods(pods []*v1.Pod) string { return aggregatePods(pods, Pod) } // PodsWithDeletiontimestamps is the same as Pods. In addition, it prints the // deletion timestamps of the pods if they are not nil. -func PodsWithDeletiontimestamps(pods []*api.Pod) string { +func PodsWithDeletiontimestamps(pods []*v1.Pod) string { return aggregatePods(pods, PodWithDeletionTimestamp) } -func aggregatePods(pods []*api.Pod, handler podHandler) string { +func aggregatePods(pods []*v1.Pod, handler podHandler) string { podStrings := make([]string, 0, len(pods)) for _, pod := range pods { podStrings = append(podStrings, handler(pod)) diff --git a/pkg/kubelet/util/format/resources.go b/pkg/kubelet/util/format/resources.go index f37c4335604..bc50bc9c695 100644 --- a/pkg/kubelet/util/format/resources.go +++ b/pkg/kubelet/util/format/resources.go @@ -21,11 +21,11 @@ import ( "sort" "strings" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) // ResourceList returns a string representation of a resource list in a human readable format. -func ResourceList(resources api.ResourceList) string { +func ResourceList(resources v1.ResourceList) string { resourceStrings := make([]string, 0, len(resources)) for key, value := range resources { resourceStrings = append(resourceStrings, fmt.Sprintf("%v=%v", key, value.String())) diff --git a/pkg/kubelet/util/format/resources_test.go b/pkg/kubelet/util/format/resources_test.go index 177229c116f..2ff0833c14d 100644 --- a/pkg/kubelet/util/format/resources_test.go +++ b/pkg/kubelet/util/format/resources_test.go @@ -19,14 +19,14 @@ package format import ( "testing" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" ) func TestResourceList(t *testing.T) { - resourceList := api.ResourceList{} - resourceList[api.ResourceCPU] = resource.MustParse("100m") - resourceList[api.ResourceMemory] = resource.MustParse("5Gi") + resourceList := v1.ResourceList{} + resourceList[v1.ResourceCPU] = resource.MustParse("100m") + resourceList[v1.ResourceMemory] = resource.MustParse("5Gi") actual := ResourceList(resourceList) expected := "cpu=100m,memory=5Gi" if actual != expected { diff --git a/pkg/kubelet/util/sliceutils/sliceutils.go b/pkg/kubelet/util/sliceutils/sliceutils.go index 55448f17ffd..285cfeb44a3 100644 --- a/pkg/kubelet/util/sliceutils/sliceutils.go +++ b/pkg/kubelet/util/sliceutils/sliceutils.go @@ -17,7 +17,7 @@ limitations under the License. package sliceutils import ( - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -33,7 +33,7 @@ func StringInSlice(s string, list []string) bool { // PodsByCreationTime makes an array of pods sortable by their creation // timestamps in ascending order. -type PodsByCreationTime []*api.Pod +type PodsByCreationTime []*v1.Pod func (s PodsByCreationTime) Len() int { return len(s) diff --git a/pkg/kubelet/volume_host.go b/pkg/kubelet/volume_host.go index 9d017b82349..1a86487ffaa 100644 --- a/pkg/kubelet/volume_host.go +++ b/pkg/kubelet/volume_host.go @@ -20,8 +20,8 @@ import ( "fmt" "net" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/api/v1" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/io" @@ -72,14 +72,14 @@ func (kvh *kubeletVolumeHost) GetPodPluginDir(podUID types.UID, pluginName strin return kvh.kubelet.getPodPluginDir(podUID, pluginName) } -func (kvh *kubeletVolumeHost) GetKubeClient() internalclientset.Interface { +func (kvh *kubeletVolumeHost) GetKubeClient() clientset.Interface { return kvh.kubelet.kubeClient } func (kvh *kubeletVolumeHost) NewWrapperMounter( volName string, spec volume.Spec, - pod *api.Pod, + pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) { // The name of wrapper volume is set to "wrapped_{wrapped_volume_name}" wrapperVolumeName := "wrapped_" + volName @@ -125,7 +125,7 @@ func (kvh *kubeletVolumeHost) GetHostIP() (net.IP, error) { return kvh.kubelet.GetHostIP() } -func (kvh *kubeletVolumeHost) GetNodeAllocatable() (api.ResourceList, error) { +func (kvh *kubeletVolumeHost) GetNodeAllocatable() (v1.ResourceList, error) { node, err := kvh.kubelet.getNodeAnyWay() if err != nil { return nil, fmt.Errorf("error retrieving node: %v", err) diff --git a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go index b29cfec8071..e0376ac3fbd 100644 --- a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go +++ b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go @@ -26,7 +26,7 @@ import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" @@ -58,7 +58,7 @@ type ActualStateOfWorld interface { // volume, this is a no-op. // If a volume with the name volumeName does not exist in the list of // attached volumes, an error is returned. - AddPodToVolume(podName volumetypes.UniquePodName, podUID types.UID, volumeName api.UniqueVolumeName, mounter volume.Mounter, outerVolumeSpecName string, volumeGidValue string) error + AddPodToVolume(podName volumetypes.UniquePodName, podUID types.UID, volumeName v1.UniqueVolumeName, mounter volume.Mounter, outerVolumeSpecName string, volumeGidValue string) error // MarkRemountRequired marks each volume that is successfully attached and // mounted for the specified pod as requiring remount (if the plugin for the @@ -73,7 +73,7 @@ type ActualStateOfWorld interface { // must unmounted prior to detach. // If a volume with the name volumeName does not exist in the list of // attached volumes, an error is returned. - SetVolumeGloballyMounted(volumeName api.UniqueVolumeName, globallyMounted bool) error + SetVolumeGloballyMounted(volumeName v1.UniqueVolumeName, globallyMounted bool) error // DeletePodFromVolume removes the given pod from the given volume in the // cache indicating the volume has been successfully unmounted from the pod. @@ -81,7 +81,7 @@ type ActualStateOfWorld interface { // volume, this is a no-op. // If a volume with the name volumeName does not exist in the list of // attached volumes, an error is returned. - DeletePodFromVolume(podName volumetypes.UniquePodName, volumeName api.UniqueVolumeName) error + DeletePodFromVolume(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error // DeleteVolume removes the given volume from the list of attached volumes // in the cache indicating the volume has been successfully detached from @@ -90,7 +90,7 @@ type ActualStateOfWorld interface { // attached volumes, this is a no-op. // If a volume with the name volumeName exists and its list of mountedPods // is not empty, an error is returned. - DeleteVolume(volumeName api.UniqueVolumeName) error + DeleteVolume(volumeName v1.UniqueVolumeName) error // PodExistsInVolume returns true if the given pod exists in the list of // mountedPods for the given volume in the cache, indicating that the volume @@ -107,12 +107,12 @@ type ActualStateOfWorld interface { // volumes, depend on this to update the contents of the volume. // All volume mounting calls should be idempotent so a second mount call for // volumes that do not need to update contents should not fail. - PodExistsInVolume(podName volumetypes.UniquePodName, volumeName api.UniqueVolumeName) (bool, string, error) + PodExistsInVolume(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) (bool, string, error) // VolumeExists returns true if the given volume exists in the list of // attached volumes in the cache, indicating the volume is attached to this // node. - VolumeExists(volumeName api.UniqueVolumeName) bool + VolumeExists(volumeName v1.UniqueVolumeName) bool // GetMountedVolumes generates and returns a list of volumes and the pods // they are successfully attached and mounted for based on the current @@ -164,7 +164,7 @@ func NewActualStateOfWorld( volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld { return &actualStateOfWorld{ nodeName: nodeName, - attachedVolumes: make(map[api.UniqueVolumeName]attachedVolume), + attachedVolumes: make(map[v1.UniqueVolumeName]attachedVolume), volumePluginMgr: volumePluginMgr, } } @@ -193,7 +193,7 @@ type actualStateOfWorld struct { // state by default. // The key in this map is the name of the volume and the value is an object // containing more information about the attached volume. - attachedVolumes map[api.UniqueVolumeName]attachedVolume + attachedVolumes map[v1.UniqueVolumeName]attachedVolume // volumePluginMgr is the volume plugin manager used to create volume // plugin objects. @@ -206,7 +206,7 @@ type actualStateOfWorld struct { // implement an attacher are assumed to be in this state. type attachedVolume struct { // volumeName contains the unique identifier for this volume. - volumeName api.UniqueVolumeName + volumeName v1.UniqueVolumeName // mountedPods is a map containing the set of pods that this volume has been // successfully mounted to. The key in this map is the name of the pod and @@ -273,19 +273,19 @@ type mountedPod struct { } func (asw *actualStateOfWorld) MarkVolumeAsAttached( - volumeName api.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName, devicePath string) error { + volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName, devicePath string) error { return asw.addVolume(volumeName, volumeSpec, devicePath) } func (asw *actualStateOfWorld) MarkVolumeAsDetached( - volumeName api.UniqueVolumeName, nodeName types.NodeName) { + volumeName v1.UniqueVolumeName, nodeName types.NodeName) { asw.DeleteVolume(volumeName) } func (asw *actualStateOfWorld) MarkVolumeAsMounted( podName volumetypes.UniquePodName, podUID types.UID, - volumeName api.UniqueVolumeName, + volumeName v1.UniqueVolumeName, mounter volume.Mounter, outerVolumeSpecName string, volumeGidValue string) error { @@ -298,27 +298,27 @@ func (asw *actualStateOfWorld) MarkVolumeAsMounted( volumeGidValue) } -func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(volumeName api.UniqueVolumeName, nodeName types.NodeName) { +func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) { // no operation for kubelet side } -func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached(volumeName api.UniqueVolumeName, nodeName types.NodeName) error { +func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) error { // no operation for kubelet side return nil } func (asw *actualStateOfWorld) MarkVolumeAsUnmounted( - podName volumetypes.UniquePodName, volumeName api.UniqueVolumeName) error { + podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error { return asw.DeletePodFromVolume(podName, volumeName) } func (asw *actualStateOfWorld) MarkDeviceAsMounted( - volumeName api.UniqueVolumeName) error { + volumeName v1.UniqueVolumeName) error { return asw.SetVolumeGloballyMounted(volumeName, true /* globallyMounted */) } func (asw *actualStateOfWorld) MarkDeviceAsUnmounted( - volumeName api.UniqueVolumeName) error { + volumeName v1.UniqueVolumeName) error { return asw.SetVolumeGloballyMounted(volumeName, false /* globallyMounted */) } @@ -329,7 +329,7 @@ func (asw *actualStateOfWorld) MarkDeviceAsUnmounted( // volume plugin can support the given volumeSpec or more than one plugin can // support it, an error is returned. func (asw *actualStateOfWorld) addVolume( - volumeName api.UniqueVolumeName, volumeSpec *volume.Spec, devicePath string) error { + volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, devicePath string) error { asw.Lock() defer asw.Unlock() @@ -383,7 +383,7 @@ func (asw *actualStateOfWorld) addVolume( func (asw *actualStateOfWorld) AddPodToVolume( podName volumetypes.UniquePodName, podUID types.UID, - volumeName api.UniqueVolumeName, + volumeName v1.UniqueVolumeName, mounter volume.Mounter, outerVolumeSpecName string, volumeGidValue string) error { @@ -447,7 +447,7 @@ func (asw *actualStateOfWorld) MarkRemountRequired( } func (asw *actualStateOfWorld) SetVolumeGloballyMounted( - volumeName api.UniqueVolumeName, globallyMounted bool) error { + volumeName v1.UniqueVolumeName, globallyMounted bool) error { asw.Lock() defer asw.Unlock() @@ -464,7 +464,7 @@ func (asw *actualStateOfWorld) SetVolumeGloballyMounted( } func (asw *actualStateOfWorld) DeletePodFromVolume( - podName volumetypes.UniquePodName, volumeName api.UniqueVolumeName) error { + podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error { asw.Lock() defer asw.Unlock() @@ -483,7 +483,7 @@ func (asw *actualStateOfWorld) DeletePodFromVolume( return nil } -func (asw *actualStateOfWorld) DeleteVolume(volumeName api.UniqueVolumeName) error { +func (asw *actualStateOfWorld) DeleteVolume(volumeName v1.UniqueVolumeName) error { asw.Lock() defer asw.Unlock() @@ -505,7 +505,7 @@ func (asw *actualStateOfWorld) DeleteVolume(volumeName api.UniqueVolumeName) err func (asw *actualStateOfWorld) PodExistsInVolume( podName volumetypes.UniquePodName, - volumeName api.UniqueVolumeName) (bool, string, error) { + volumeName v1.UniqueVolumeName) (bool, string, error) { asw.RLock() defer asw.RUnlock() @@ -523,7 +523,7 @@ func (asw *actualStateOfWorld) PodExistsInVolume( } func (asw *actualStateOfWorld) VolumeExists( - volumeName api.UniqueVolumeName) bool { + volumeName v1.UniqueVolumeName) bool { asw.RLock() defer asw.RUnlock() @@ -628,7 +628,7 @@ var _ error = volumeNotAttachedError{} // volumeNotAttachedError is an error returned when PodExistsInVolume() fails to // find specified volume in the list of attached volumes. type volumeNotAttachedError struct { - volumeName api.UniqueVolumeName + volumeName v1.UniqueVolumeName } func (err volumeNotAttachedError) Error() string { @@ -637,7 +637,7 @@ func (err volumeNotAttachedError) Error() string { err.volumeName) } -func newVolumeNotAttachedError(volumeName api.UniqueVolumeName) error { +func newVolumeNotAttachedError(volumeName v1.UniqueVolumeName) error { return volumeNotAttachedError{ volumeName: volumeName, } @@ -651,7 +651,7 @@ var _ error = remountRequiredError{} // given volume should be remounted to the pod to reflect changes in the // referencing pod. type remountRequiredError struct { - volumeName api.UniqueVolumeName + volumeName v1.UniqueVolumeName podName volumetypes.UniquePodName } @@ -662,7 +662,7 @@ func (err remountRequiredError) Error() string { } func newRemountRequiredError( - volumeName api.UniqueVolumeName, podName volumetypes.UniquePodName) error { + volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) error { return remountRequiredError{ volumeName: volumeName, podName: podName, diff --git a/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go b/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go index d38fbe3e7fd..bec23f6cef8 100644 --- a/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go +++ b/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go @@ -19,14 +19,14 @@ package cache import ( "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/volume" volumetesting "k8s.io/kubernetes/pkg/volume/testing" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) -var emptyVolumeName = api.UniqueVolumeName("") +var emptyVolumeName = v1.UniqueVolumeName("") // Calls MarkVolumeAsAttached() once to add volume // Verifies newly added volume exists in GetUnmountedVolumes() @@ -35,17 +35,17 @@ func Test_MarkVolumeAsAttached_Positive_NewVolume(t *testing.T) { // Arrange volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t) asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod1", UID: "pod1uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "volume-name", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device1", }, }, @@ -79,17 +79,17 @@ func Test_MarkVolumeAsAttached_SuppliedVolumeName_Positive_NewVolume(t *testing. // Arrange volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod1", UID: "pod1uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "volume-name", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device1", }, }, @@ -99,7 +99,7 @@ func Test_MarkVolumeAsAttached_SuppliedVolumeName_Positive_NewVolume(t *testing. } volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} devicePath := "fake/device/path" - volumeName := api.UniqueVolumeName("this-would-never-be-a-volume-name") + volumeName := v1.UniqueVolumeName("this-would-never-be-a-volume-name") // Act err := asw.MarkVolumeAsAttached(volumeName, volumeSpec, "" /* nodeName */, devicePath) @@ -123,17 +123,17 @@ func Test_MarkVolumeAsAttached_Positive_ExistingVolume(t *testing.T) { volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t) devicePath := "fake/device/path" asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod1", UID: "pod1uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "volume-name", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device1", }, }, @@ -171,17 +171,17 @@ func Test_AddPodToVolume_Positive_ExistingVolumeNewNode(t *testing.T) { asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr) devicePath := "fake/device/path" - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod1", UID: "pod1uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "volume-name", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device1", }, }, @@ -228,17 +228,17 @@ func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) { asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr) devicePath := "fake/device/path" - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod1", UID: "pod1uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "volume-name", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device1", }, }, @@ -290,17 +290,17 @@ func Test_AddPodToVolume_Negative_VolumeDoesntExist(t *testing.T) { volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod1", UID: "pod1uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "volume-name", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device1", }, }, @@ -355,17 +355,17 @@ func Test_MarkDeviceAsMounted_Positive_NewVolume(t *testing.T) { // Arrange volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t) asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod1", UID: "pod1uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "volume-name", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device1", }, }, @@ -396,7 +396,7 @@ func Test_MarkDeviceAsMounted_Positive_NewVolume(t *testing.T) { } func verifyVolumeExistsInGloballyMountedVolumes( - t *testing.T, expectedVolumeName api.UniqueVolumeName, asw ActualStateOfWorld) { + t *testing.T, expectedVolumeName v1.UniqueVolumeName, asw ActualStateOfWorld) { globallyMountedVolumes := asw.GetGloballyMountedVolumes() for _, volume := range globallyMountedVolumes { if volume.VolumeName == expectedVolumeName { @@ -411,7 +411,7 @@ func verifyVolumeExistsInGloballyMountedVolumes( } func verifyVolumeDoesntExistInGloballyMountedVolumes( - t *testing.T, volumeToCheck api.UniqueVolumeName, asw ActualStateOfWorld) { + t *testing.T, volumeToCheck v1.UniqueVolumeName, asw ActualStateOfWorld) { globallyMountedVolumes := asw.GetGloballyMountedVolumes() for _, volume := range globallyMountedVolumes { if volume.VolumeName == volumeToCheck { @@ -424,7 +424,7 @@ func verifyVolumeDoesntExistInGloballyMountedVolumes( func verifyVolumeExistsAsw( t *testing.T, - expectedVolumeName api.UniqueVolumeName, + expectedVolumeName v1.UniqueVolumeName, shouldExist bool, asw ActualStateOfWorld) { volumeExists := asw.VolumeExists(expectedVolumeName) @@ -438,7 +438,7 @@ func verifyVolumeExistsAsw( } func verifyVolumeExistsInUnmountedVolumes( - t *testing.T, expectedVolumeName api.UniqueVolumeName, asw ActualStateOfWorld) { + t *testing.T, expectedVolumeName v1.UniqueVolumeName, asw ActualStateOfWorld) { unmountedVolumes := asw.GetUnmountedVolumes() for _, volume := range unmountedVolumes { if volume.VolumeName == expectedVolumeName { @@ -453,7 +453,7 @@ func verifyVolumeExistsInUnmountedVolumes( } func verifyVolumeDoesntExistInUnmountedVolumes( - t *testing.T, volumeToCheck api.UniqueVolumeName, asw ActualStateOfWorld) { + t *testing.T, volumeToCheck v1.UniqueVolumeName, asw ActualStateOfWorld) { unmountedVolumes := asw.GetUnmountedVolumes() for _, volume := range unmountedVolumes { if volume.VolumeName == volumeToCheck { @@ -467,7 +467,7 @@ func verifyVolumeDoesntExistInUnmountedVolumes( func verifyPodExistsInVolumeAsw( t *testing.T, expectedPodName volumetypes.UniquePodName, - expectedVolumeName api.UniqueVolumeName, + expectedVolumeName v1.UniqueVolumeName, expectedDevicePath string, asw ActualStateOfWorld) { podExistsInVolume, devicePath, err := @@ -494,7 +494,7 @@ func verifyPodExistsInVolumeAsw( func verifyPodDoesntExistInVolumeAsw( t *testing.T, podToCheck volumetypes.UniquePodName, - volumeToCheck api.UniqueVolumeName, + volumeToCheck v1.UniqueVolumeName, expectVolumeToExist bool, asw ActualStateOfWorld) { podExistsInVolume, devicePath, err := diff --git a/pkg/kubelet/volumemanager/cache/desired_state_of_world.go b/pkg/kubelet/volumemanager/cache/desired_state_of_world.go index 65b4765f9eb..e06aa925301 100644 --- a/pkg/kubelet/volumemanager/cache/desired_state_of_world.go +++ b/pkg/kubelet/volumemanager/cache/desired_state_of_world.go @@ -24,7 +24,7 @@ import ( "fmt" "sync" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" "k8s.io/kubernetes/pkg/volume/util/types" @@ -51,7 +51,7 @@ type DesiredStateOfWorld interface { // added. // If a pod with the same unique name already exists under the specified // volume, this is a no-op. - AddPodToVolume(podName types.UniquePodName, pod *api.Pod, volumeSpec *volume.Spec, outerVolumeSpecName string, volumeGidValue string) (api.UniqueVolumeName, error) + AddPodToVolume(podName types.UniquePodName, pod *v1.Pod, volumeSpec *volume.Spec, outerVolumeSpecName string, volumeGidValue string) (v1.UniqueVolumeName, error) // MarkVolumesReportedInUse sets the ReportedInUse value to true for the // reportedVolumes. For volumes not in the reportedVolumes list, the @@ -62,7 +62,7 @@ type DesiredStateOfWorld interface { // to check this value before issuing the operation. // If a volume in the reportedVolumes list does not exist in the list of // volumes that should be attached to this node, it is skipped without error. - MarkVolumesReportedInUse(reportedVolumes []api.UniqueVolumeName) + MarkVolumesReportedInUse(reportedVolumes []v1.UniqueVolumeName) // DeletePodFromVolume removes the given pod from the given volume in the // cache indicating the specified pod no longer requires the specified @@ -73,13 +73,13 @@ type DesiredStateOfWorld interface { // attached volumes, this is a no-op. // If after deleting the pod, the specified volume contains no other child // pods, the volume is also deleted. - DeletePodFromVolume(podName types.UniquePodName, volumeName api.UniqueVolumeName) + DeletePodFromVolume(podName types.UniquePodName, volumeName v1.UniqueVolumeName) // VolumeExists returns true if the given volume exists in the list of // volumes that should be attached to this node. // If a pod with the same unique name does not exist under the specified // volume, false is returned. - VolumeExists(volumeName api.UniqueVolumeName) bool + VolumeExists(volumeName v1.UniqueVolumeName) bool // PodExistsInVolume returns true if the given pod exists in the list of // podsToMount for the given volume in the cache. @@ -87,7 +87,7 @@ type DesiredStateOfWorld interface { // volume, false is returned. // If a volume with the name volumeName does not exist in the list of // attached volumes, false is returned. - PodExistsInVolume(podName types.UniquePodName, volumeName api.UniqueVolumeName) bool + PodExistsInVolume(podName types.UniquePodName, volumeName v1.UniqueVolumeName) bool // GetVolumesToMount generates and returns a list of volumes that should be // attached to this node and the pods they should be mounted to based on the @@ -109,7 +109,7 @@ type VolumeToMount struct { // NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld. func NewDesiredStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorld { return &desiredStateOfWorld{ - volumesToMount: make(map[api.UniqueVolumeName]volumeToMount), + volumesToMount: make(map[v1.UniqueVolumeName]volumeToMount), volumePluginMgr: volumePluginMgr, } } @@ -119,7 +119,7 @@ type desiredStateOfWorld struct { // attached to this node and mounted to the pods referencing it. The key in // the map is the name of the volume and the value is a volume object // containing more information about the volume. - volumesToMount map[api.UniqueVolumeName]volumeToMount + volumesToMount map[v1.UniqueVolumeName]volumeToMount // volumePluginMgr is the volume plugin manager used to create volume // plugin objects. volumePluginMgr *volume.VolumePluginMgr @@ -131,7 +131,7 @@ type desiredStateOfWorld struct { // and mounted to podsToMount. type volumeToMount struct { // volumeName contains the unique identifier for this volume. - volumeName api.UniqueVolumeName + volumeName v1.UniqueVolumeName // podsToMount is a map containing the set of pods that reference this // volume and should mount it once it is attached. The key in the map is @@ -158,7 +158,7 @@ type podToMount struct { podName types.UniquePodName // Pod to mount the volume to. Used to create NewMounter. - pod *api.Pod + pod *v1.Pod // volume spec containing the specification for this volume. Used to // generate the volume plugin object, and passed to plugin methods. @@ -175,10 +175,10 @@ type podToMount struct { func (dsw *desiredStateOfWorld) AddPodToVolume( podName types.UniquePodName, - pod *api.Pod, + pod *v1.Pod, volumeSpec *volume.Spec, outerVolumeSpecName string, - volumeGidValue string) (api.UniqueVolumeName, error) { + volumeGidValue string) (v1.UniqueVolumeName, error) { dsw.Lock() defer dsw.Unlock() @@ -190,7 +190,7 @@ func (dsw *desiredStateOfWorld) AddPodToVolume( err) } - var volumeName api.UniqueVolumeName + var volumeName v1.UniqueVolumeName // The unique volume name used depends on whether the volume is attachable // or not. @@ -239,12 +239,12 @@ func (dsw *desiredStateOfWorld) AddPodToVolume( } func (dsw *desiredStateOfWorld) MarkVolumesReportedInUse( - reportedVolumes []api.UniqueVolumeName) { + reportedVolumes []v1.UniqueVolumeName) { dsw.Lock() defer dsw.Unlock() reportedVolumesMap := make( - map[api.UniqueVolumeName]bool, len(reportedVolumes) /* capacity */) + map[v1.UniqueVolumeName]bool, len(reportedVolumes) /* capacity */) for _, reportedVolume := range reportedVolumes { reportedVolumesMap[reportedVolume] = true @@ -258,7 +258,7 @@ func (dsw *desiredStateOfWorld) MarkVolumesReportedInUse( } func (dsw *desiredStateOfWorld) DeletePodFromVolume( - podName types.UniquePodName, volumeName api.UniqueVolumeName) { + podName types.UniquePodName, volumeName v1.UniqueVolumeName) { dsw.Lock() defer dsw.Unlock() @@ -281,7 +281,7 @@ func (dsw *desiredStateOfWorld) DeletePodFromVolume( } func (dsw *desiredStateOfWorld) VolumeExists( - volumeName api.UniqueVolumeName) bool { + volumeName v1.UniqueVolumeName) bool { dsw.RLock() defer dsw.RUnlock() @@ -290,7 +290,7 @@ func (dsw *desiredStateOfWorld) VolumeExists( } func (dsw *desiredStateOfWorld) PodExistsInVolume( - podName types.UniquePodName, volumeName api.UniqueVolumeName) bool { + podName types.UniquePodName, volumeName v1.UniqueVolumeName) bool { dsw.RLock() defer dsw.RUnlock() diff --git a/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go b/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go index 9d07a7850b0..3bb7467c5eb 100644 --- a/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go +++ b/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go @@ -19,7 +19,7 @@ package cache import ( "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/volume" volumetesting "k8s.io/kubernetes/pkg/volume/testing" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" @@ -33,17 +33,17 @@ func Test_AddPodToVolume_Positive_NewPodNewVolume(t *testing.T) { // Arrange volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) dsw := NewDesiredStateOfWorld(volumePluginMgr) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod3", UID: "pod3uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "volume-name", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device1", }, }, @@ -77,17 +77,17 @@ func Test_AddPodToVolume_Positive_ExistingPodExistingVolume(t *testing.T) { // Arrange volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) dsw := NewDesiredStateOfWorld(volumePluginMgr) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod3", UID: "pod3uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "volume-name", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device1", }, }, @@ -121,17 +121,17 @@ func Test_DeletePodFromVolume_Positive_PodExistsVolumeExists(t *testing.T) { // Arrange volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) dsw := NewDesiredStateOfWorld(volumePluginMgr) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod3", UID: "pod3uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "volume-name", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device1", }, }, @@ -173,17 +173,17 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) { volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) dsw := NewDesiredStateOfWorld(volumePluginMgr) - pod1 := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod1 := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod1", UID: "pod1uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "volume1-name", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device1", }, }, @@ -195,17 +195,17 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) { volume1Spec := &volume.Spec{Volume: &pod1.Spec.Volumes[0]} pod1Name := volumehelper.GetUniquePodName(pod1) - pod2 := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod2 := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod2", UID: "pod2uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "volume2-name", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device2", }, }, @@ -217,17 +217,17 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) { volume2Spec := &volume.Spec{Volume: &pod2.Spec.Volumes[0]} pod2Name := volumehelper.GetUniquePodName(pod2) - pod3 := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod3 := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod3", UID: "pod3uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "volume3-name", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device3", }, }, @@ -258,7 +258,7 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) { } // Act - volumesReportedInUse := []api.UniqueVolumeName{generatedVolume2Name} + volumesReportedInUse := []v1.UniqueVolumeName{generatedVolume2Name} dsw.MarkVolumesReportedInUse(volumesReportedInUse) // Assert @@ -276,7 +276,7 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) { verifyPodExistsInVolumeDsw(t, pod3Name, generatedVolume3Name, dsw) // Act - volumesReportedInUse = []api.UniqueVolumeName{generatedVolume3Name} + volumesReportedInUse = []v1.UniqueVolumeName{generatedVolume3Name} dsw.MarkVolumesReportedInUse(volumesReportedInUse) // Assert @@ -295,7 +295,7 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) { } func verifyVolumeExistsDsw( - t *testing.T, expectedVolumeName api.UniqueVolumeName, dsw DesiredStateOfWorld) { + t *testing.T, expectedVolumeName v1.UniqueVolumeName, dsw DesiredStateOfWorld) { volumeExists := dsw.VolumeExists(expectedVolumeName) if !volumeExists { t.Fatalf( @@ -306,7 +306,7 @@ func verifyVolumeExistsDsw( } func verifyVolumeDoesntExist( - t *testing.T, expectedVolumeName api.UniqueVolumeName, dsw DesiredStateOfWorld) { + t *testing.T, expectedVolumeName v1.UniqueVolumeName, dsw DesiredStateOfWorld) { volumeExists := dsw.VolumeExists(expectedVolumeName) if volumeExists { t.Fatalf( @@ -318,7 +318,7 @@ func verifyVolumeDoesntExist( func verifyVolumeExistsInVolumesToMount( t *testing.T, - expectedVolumeName api.UniqueVolumeName, + expectedVolumeName v1.UniqueVolumeName, expectReportedInUse bool, dsw DesiredStateOfWorld) { volumesToMount := dsw.GetVolumesToMount() @@ -343,7 +343,7 @@ func verifyVolumeExistsInVolumesToMount( } func verifyVolumeDoesntExistInVolumesToMount( - t *testing.T, volumeToCheck api.UniqueVolumeName, dsw DesiredStateOfWorld) { + t *testing.T, volumeToCheck v1.UniqueVolumeName, dsw DesiredStateOfWorld) { volumesToMount := dsw.GetVolumesToMount() for _, volume := range volumesToMount { if volume.VolumeName == volumeToCheck { @@ -357,7 +357,7 @@ func verifyVolumeDoesntExistInVolumesToMount( func verifyPodExistsInVolumeDsw( t *testing.T, expectedPodName volumetypes.UniquePodName, - expectedVolumeName api.UniqueVolumeName, + expectedVolumeName v1.UniqueVolumeName, dsw DesiredStateOfWorld) { if podExistsInVolume := dsw.PodExistsInVolume( expectedPodName, expectedVolumeName); !podExistsInVolume { @@ -370,7 +370,7 @@ func verifyPodExistsInVolumeDsw( func verifyPodDoesntExistInVolumeDsw( t *testing.T, expectedPodName volumetypes.UniquePodName, - expectedVolumeName api.UniqueVolumeName, + expectedVolumeName v1.UniqueVolumeName, dsw DesiredStateOfWorld) { if podExistsInVolume := dsw.PodExistsInVolume( expectedPodName, expectedVolumeName); podExistsInVolume { diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index 888cf9c3439..9f3c14b6dae 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -28,7 +28,8 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/api/v1" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/pod" "k8s.io/kubernetes/pkg/kubelet/util/format" @@ -64,7 +65,7 @@ type DesiredStateOfWorldPopulator interface { // that exist on this host // desiredStateOfWorld - the cache to populate func NewDesiredStateOfWorldPopulator( - kubeClient internalclientset.Interface, + kubeClient clientset.Interface, loopSleepDuration time.Duration, getPodStatusRetryDuration time.Duration, podManager pod.Manager, @@ -83,7 +84,7 @@ func NewDesiredStateOfWorldPopulator( } type desiredStateOfWorldPopulator struct { - kubeClient internalclientset.Interface + kubeClient clientset.Interface loopSleepDuration time.Duration getPodStatusRetryDuration time.Duration podManager pod.Manager @@ -129,8 +130,8 @@ func (dswp *desiredStateOfWorldPopulator) populatorLoopFunc() func() { } } -func isPodTerminated(pod *api.Pod) bool { - return pod.Status.Phase == api.PodFailed || pod.Status.Phase == api.PodSucceeded +func isPodTerminated(pod *v1.Pod) bool { + return pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded } // Iterate through all pods and add to desired state of world if they don't @@ -160,7 +161,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { } // Skip non-memory backed volumes belonging to terminated pods volume := volumeToMount.VolumeSpec.Volume - if (volume.EmptyDir == nil || volume.EmptyDir.Medium != api.StorageMediumMemory) && + if (volume.EmptyDir == nil || volume.EmptyDir.Medium != v1.StorageMediumMemory) && volume.ConfigMap == nil && volume.Secret == nil { continue } @@ -216,7 +217,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { // processPodVolumes processes the volumes in the given pod and adds them to the // desired state of the world. -func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *api.Pod) { +func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) { if pod == nil { return } @@ -294,7 +295,7 @@ func (dswp *desiredStateOfWorldPopulator) deleteProcessedPod( // createVolumeSpec creates and returns a mutatable volume.Spec object for the // specified volume. It dereference any PVC to get PV objects, if needed. func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( - podVolume api.Volume, podNamespace string) (*volume.Spec, string, error) { + podVolume v1.Volume, podNamespace string) (*volume.Spec, string, error) { if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil { glog.V(10).Infof( @@ -349,10 +350,10 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( "failed to deep copy %q volume object. err=%v", podVolume.Name, err) } - clonedPodVolume, ok := clonedPodVolumeObj.(api.Volume) + clonedPodVolume, ok := clonedPodVolumeObj.(v1.Volume) if !ok { return nil, "", fmt.Errorf( - "failed to cast clonedPodVolume %#v to api.Volume", + "failed to cast clonedPodVolume %#v to v1.Volume", clonedPodVolumeObj) } @@ -374,7 +375,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( err) } - if pvc.Status.Phase != api.ClaimBound || pvc.Spec.VolumeName == "" { + if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" { return "", "", fmt.Errorf( "PVC %s/%s has non-bound phase (%q) or empty pvc.Spec.VolumeName (%q)", namespace, @@ -417,7 +418,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVSpec( return volume.NewSpecFromPersistentVolume(pv, pvcReadOnly), volumeGidValue, nil } -func getPVVolumeGidAnnotationValue(pv *api.PersistentVolume) string { +func getPVVolumeGidAnnotationValue(pv *v1.PersistentVolume) string { if volumeGid, ok := pv.Annotations[volumehelper.VolumeGidAnnotationKey]; ok { return volumeGid } diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler.go b/pkg/kubelet/volumemanager/reconciler/reconciler.go index 3db72963756..c2da681dfdf 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -27,8 +27,8 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/cmd/kubelet/app/options" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/api/v1" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/types" @@ -85,7 +85,7 @@ type Reconciler interface { // mounter - mounter passed in from kubelet, passed down unmount path // volumePluginMrg - volume plugin manager passed from kubelet func NewReconciler( - kubeClient internalclientset.Interface, + kubeClient clientset.Interface, controllerAttachDetachEnabled bool, loopSleepDuration time.Duration, syncDuration time.Duration, @@ -115,7 +115,7 @@ func NewReconciler( } type reconciler struct { - kubeClient internalclientset.Interface + kubeClient clientset.Interface controllerAttachDetachEnabled bool loopSleepDuration time.Duration syncDuration time.Duration @@ -401,11 +401,11 @@ type podVolume struct { } type reconstructedVolume struct { - volumeName api.UniqueVolumeName + volumeName v1.UniqueVolumeName podName volumetypes.UniquePodName volumeSpec *volumepkg.Spec outerVolumeSpecName string - pod *api.Pod + pod *v1.Pod pluginIsAttachable bool volumeGidValue string devicePath string @@ -426,7 +426,7 @@ func (rc *reconciler) syncStates(podsDir string) { return } - volumesNeedUpdate := make(map[api.UniqueVolumeName]*reconstructedVolume) + volumesNeedUpdate := make(map[v1.UniqueVolumeName]*reconstructedVolume) for _, volume := range podVolumes { reconstructedVolume, err := rc.reconstructVolume(volume) if err != nil { @@ -493,8 +493,8 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, if err != nil { return nil, err } - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ UID: types.UID(volume.podName), }, } @@ -507,7 +507,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, if err != nil { return nil, err } - var uniqueVolumeName api.UniqueVolumeName + var uniqueVolumeName v1.UniqueVolumeName if attachablePlugin != nil { uniqueVolumeName = volumehelper.GetUniqueVolumeName(volume.pluginName, volumeName) } else { @@ -546,7 +546,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, return reconstructedVolume, nil } -func (rc *reconciler) updateStates(volumesNeedUpdate map[api.UniqueVolumeName]*reconstructedVolume) error { +func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) error { // Get the node status to retrieve volume device path information. node, fetchErr := rc.kubeClient.Core().Nodes().Get(string(rc.nodeName)) if fetchErr != nil { diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler_test.go b/pkg/kubelet/volumemanager/reconciler/reconciler_test.go index 89fedb823da..3ddd5cd94f3 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler_test.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler_test.go @@ -22,8 +22,8 @@ import ( "time" "github.com/stretchr/testify/assert" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/kubelet/config" @@ -111,17 +111,17 @@ func Test_Run_Positive_VolumeAttachAndMount(t *testing.T) { &mount.FakeMounter{}, volumePluginMgr, kubeletPodsDir) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod1", UID: "pod1uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "volume-name", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device1", }, }, @@ -182,17 +182,17 @@ func Test_Run_Positive_VolumeMountControllerAttachEnabled(t *testing.T) { &mount.FakeMounter{}, volumePluginMgr, kubeletPodsDir) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod1", UID: "pod1uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "volume-name", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device1", }, }, @@ -205,7 +205,7 @@ func Test_Run_Positive_VolumeMountControllerAttachEnabled(t *testing.T) { podName := volumehelper.GetUniquePodName(pod) generatedVolumeName, err := dsw.AddPodToVolume( podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */) - dsw.MarkVolumesReportedInUse([]api.UniqueVolumeName{generatedVolumeName}) + dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName}) // Assert if err != nil { @@ -254,17 +254,17 @@ func Test_Run_Positive_VolumeAttachMountUnmountDetach(t *testing.T) { &mount.FakeMounter{}, volumePluginMgr, kubeletPodsDir) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod1", UID: "pod1uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "volume-name", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device1", }, }, @@ -337,17 +337,17 @@ func Test_Run_Positive_VolumeUnmountControllerAttachEnabled(t *testing.T) { &mount.FakeMounter{}, volumePluginMgr, kubeletPodsDir) - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod1", UID: "pod1uid", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "volume-name", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device1", }, }, @@ -369,7 +369,7 @@ func Test_Run_Positive_VolumeUnmountControllerAttachEnabled(t *testing.T) { // Act runReconciler(reconciler) - dsw.MarkVolumesReportedInUse([]api.UniqueVolumeName{generatedVolumeName}) + dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName}) waitForMount(t, fakePlugin, generatedVolumeName, asw) // Assert @@ -396,7 +396,7 @@ func Test_Run_Positive_VolumeUnmountControllerAttachEnabled(t *testing.T) { func waitForMount( t *testing.T, fakePlugin *volumetesting.FakeVolumePlugin, - volumeName api.UniqueVolumeName, + volumeName v1.UniqueVolumeName, asw cache.ActualStateOfWorld) { err := retryWithExponentialBackOff( time.Duration(5*time.Millisecond), @@ -420,7 +420,7 @@ func waitForMount( func waitForDetach( t *testing.T, fakePlugin *volumetesting.FakeVolumePlugin, - volumeName api.UniqueVolumeName, + volumeName v1.UniqueVolumeName, asw cache.ActualStateOfWorld) { err := retryWithExponentialBackOff( time.Duration(5*time.Millisecond), @@ -452,16 +452,16 @@ func createTestClient() *fake.Clientset { fakeClient := &fake.Clientset{} fakeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { - return true, &api.Node{ - ObjectMeta: api.ObjectMeta{Name: string(nodeName)}, - Status: api.NodeStatus{ - VolumesAttached: []api.AttachedVolume{ + return true, &v1.Node{ + ObjectMeta: v1.ObjectMeta{Name: string(nodeName)}, + Status: v1.NodeStatus{ + VolumesAttached: []v1.AttachedVolume{ { Name: "fake-plugin/volume-name", DevicePath: "fake/path", }, }}, - Spec: api.NodeSpec{ExternalID: string(nodeName)}, + Spec: v1.NodeSpec{ExternalID: string(nodeName)}, }, nil }) fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { diff --git a/pkg/kubelet/volumemanager/volume_manager.go b/pkg/kubelet/volumemanager/volume_manager.go index f70590e134f..42929ffae6c 100644 --- a/pkg/kubelet/volumemanager/volume_manager.go +++ b/pkg/kubelet/volumemanager/volume_manager.go @@ -22,8 +22,8 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/api/v1" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/container" @@ -101,7 +101,7 @@ type VolumeManager interface { // actual state of the world). // An error is returned if all volumes are not attached and mounted within // the duration defined in podAttachAndMountTimeout. - WaitForAttachAndMount(pod *api.Pod) error + WaitForAttachAndMount(pod *v1.Pod) error // GetMountedVolumesForPod returns a VolumeMap containing the volumes // referenced by the specified pod that are successfully attached and @@ -113,7 +113,7 @@ type VolumeManager interface { // GetExtraSupplementalGroupsForPod returns a list of the extra // supplemental groups for the Pod. These extra supplemental groups come // from annotations on persistent volumes that the pod depends on. - GetExtraSupplementalGroupsForPod(pod *api.Pod) []int64 + GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 // GetVolumesInUse returns a list of all volumes that implement the volume.Attacher // interface and are currently in use according to the actual and desired @@ -124,7 +124,7 @@ type VolumeManager interface { // has been unmounted (as indicated in actual state of world). // TODO(#27653): VolumesInUse should be handled gracefully on kubelet' // restarts. - GetVolumesInUse() []api.UniqueVolumeName + GetVolumesInUse() []v1.UniqueVolumeName // ReconcilerStatesHasBeenSynced returns true only after the actual states in reconciler // has been synced at least once after kubelet starts so that it is safe to update mounted @@ -133,11 +133,11 @@ type VolumeManager interface { // VolumeIsAttached returns true if the given volume is attached to this // node. - VolumeIsAttached(volumeName api.UniqueVolumeName) bool + VolumeIsAttached(volumeName v1.UniqueVolumeName) bool // Marks the specified volume as having successfully been reported as "in // use" in the nodes's volume status. - MarkVolumesAsReportedInUse(volumesReportedAsInUse []api.UniqueVolumeName) + MarkVolumesAsReportedInUse(volumesReportedAsInUse []v1.UniqueVolumeName) } // NewVolumeManager returns a new concrete instance implementing the @@ -151,7 +151,7 @@ func NewVolumeManager( controllerAttachDetachEnabled bool, nodeName k8stypes.NodeName, podManager pod.Manager, - kubeClient internalclientset.Interface, + kubeClient clientset.Interface, volumePluginMgr *volume.VolumePluginMgr, kubeContainerRuntime kubecontainer.Runtime, mounter mount.Interface, @@ -199,7 +199,7 @@ func NewVolumeManager( type volumeManager struct { // kubeClient is the kube API client used by DesiredStateOfWorldPopulator to // communicate with the API server to fetch PV and PVC objects - kubeClient internalclientset.Interface + kubeClient clientset.Interface // volumePluginMgr is the volume plugin manager used to access volume // plugins. It must be pre-initialized. @@ -255,7 +255,7 @@ func (vm *volumeManager) GetMountedVolumesForPod( return podVolumes } -func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *api.Pod) []int64 { +func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 { podName := volumehelper.GetUniquePodName(pod) supplementalGroups := sets.NewString() @@ -278,7 +278,7 @@ func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *api.Pod) []int64 return result } -func (vm *volumeManager) GetVolumesInUse() []api.UniqueVolumeName { +func (vm *volumeManager) GetVolumesInUse() []v1.UniqueVolumeName { // Report volumes in desired state of world and actual state of world so // that volumes are marked in use as soon as the decision is made that the // volume *should* be attached to this node until it is safely unmounted. @@ -286,12 +286,12 @@ func (vm *volumeManager) GetVolumesInUse() []api.UniqueVolumeName { mountedVolumes := vm.actualStateOfWorld.GetGloballyMountedVolumes() volumesToReportInUse := make( - []api.UniqueVolumeName, + []v1.UniqueVolumeName, 0, /* len */ len(desiredVolumes)+len(mountedVolumes) /* cap */) desiredVolumesMap := make( - map[api.UniqueVolumeName]bool, + map[v1.UniqueVolumeName]bool, len(desiredVolumes)+len(mountedVolumes) /* cap */) for _, volume := range desiredVolumes { @@ -317,16 +317,16 @@ func (vm *volumeManager) ReconcilerStatesHasBeenSynced() bool { } func (vm *volumeManager) VolumeIsAttached( - volumeName api.UniqueVolumeName) bool { + volumeName v1.UniqueVolumeName) bool { return vm.actualStateOfWorld.VolumeExists(volumeName) } func (vm *volumeManager) MarkVolumesAsReportedInUse( - volumesReportedAsInUse []api.UniqueVolumeName) { + volumesReportedAsInUse []v1.UniqueVolumeName) { vm.desiredStateOfWorld.MarkVolumesReportedInUse(volumesReportedAsInUse) } -func (vm *volumeManager) WaitForAttachAndMount(pod *api.Pod) error { +func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error { expectedVolumes := getExpectedVolumes(pod) if len(expectedVolumes) == 0 { // No volumes to verify @@ -402,7 +402,7 @@ func filterUnmountedVolumes( // getExpectedVolumes returns a list of volumes that must be mounted in order to // consider the volume setup step for this pod satisfied. -func getExpectedVolumes(pod *api.Pod) []string { +func getExpectedVolumes(pod *v1.Pod) []string { expectedVolumes := []string{} if pod == nil { return expectedVolumes @@ -418,7 +418,7 @@ func getExpectedVolumes(pod *api.Pod) []string { // getExtraSupplementalGid returns the value of an extra supplemental GID as // defined by an annotation on a volume and a boolean indicating whether the // volume defined a GID that the pod doesn't already request. -func getExtraSupplementalGid(volumeGidValue string, pod *api.Pod) (int64, bool) { +func getExtraSupplementalGid(volumeGidValue string, pod *v1.Pod) (int64, bool) { if volumeGidValue == "" { return 0, false } diff --git a/pkg/kubelet/volumemanager/volume_manager_test.go b/pkg/kubelet/volumemanager/volume_manager_test.go index a000949b92a..6ffbc2d2fa9 100644 --- a/pkg/kubelet/volumemanager/volume_manager_test.go +++ b/pkg/kubelet/volumemanager/volume_manager_test.go @@ -23,9 +23,9 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/api/v1" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" + "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/kubelet/config" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" @@ -64,11 +64,11 @@ func TestGetMountedVolumesForPodAndGetVolumesInUse(t *testing.T) { stopCh := runVolumeManager(manager) defer close(stopCh) - podManager.SetPods([]*api.Pod{pod}) + podManager.SetPods([]*v1.Pod{pod}) // Fake node status update go simulateVolumeInUseUpdate( - api.UniqueVolumeName(node.Status.VolumesAttached[0].Name), + v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name), stopCh, manager) @@ -83,7 +83,7 @@ func TestGetMountedVolumesForPodAndGetVolumesInUse(t *testing.T) { t.Errorf("Expected %v to be mounted to pod but got %v", expectedMounted, actualMounted) } - expectedInUse := []api.UniqueVolumeName{api.UniqueVolumeName(node.Status.VolumesAttached[0].Name)} + expectedInUse := []v1.UniqueVolumeName{v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name)} actualInUse := manager.GetVolumesInUse() if !reflect.DeepEqual(expectedInUse, actualInUse) { t.Errorf("Expected %v to be in use but got %v", expectedInUse, actualInUse) @@ -125,20 +125,20 @@ func TestGetExtraSupplementalGroupsForPod(t *testing.T) { } for _, tc := range cases { - pv := &api.PersistentVolume{ - ObjectMeta: api.ObjectMeta{ + pv := &v1.PersistentVolume{ + ObjectMeta: v1.ObjectMeta{ Name: "pvA", Annotations: map[string]string{ volumehelper.VolumeGidAnnotationKey: tc.gidAnnotation, }, }, - Spec: api.PersistentVolumeSpec{ - PersistentVolumeSource: api.PersistentVolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device", }, }, - ClaimRef: &api.ObjectReference{ + ClaimRef: &v1.ObjectReference{ Name: claim.ObjectMeta.Name, }, }, @@ -156,11 +156,11 @@ func TestGetExtraSupplementalGroupsForPod(t *testing.T) { close(stopCh) }() - podManager.SetPods([]*api.Pod{pod}) + podManager.SetPods([]*v1.Pod{pod}) // Fake node status update go simulateVolumeInUseUpdate( - api.UniqueVolumeName(node.Status.VolumesAttached[0].Name), + v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name), stopCh, manager) @@ -180,7 +180,7 @@ func TestGetExtraSupplementalGroupsForPod(t *testing.T) { func newTestVolumeManager( tmpDir string, podManager pod.Manager, - kubeClient internalclientset.Interface) (VolumeManager, error) { + kubeClient clientset.Interface) (VolumeManager, error) { plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil} fakeRecorder := &record.FakeRecorder{} plugMgr := &volume.VolumePluginMgr{} @@ -203,72 +203,72 @@ func newTestVolumeManager( // createObjects returns objects for making a fake clientset. The pv is // already attached to the node and bound to the claim used by the pod. -func createObjects() (*api.Node, *api.Pod, *api.PersistentVolume, *api.PersistentVolumeClaim) { - node := &api.Node{ - ObjectMeta: api.ObjectMeta{Name: testHostname}, - Status: api.NodeStatus{ - VolumesAttached: []api.AttachedVolume{ +func createObjects() (*v1.Node, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { + node := &v1.Node{ + ObjectMeta: v1.ObjectMeta{Name: testHostname}, + Status: v1.NodeStatus{ + VolumesAttached: []v1.AttachedVolume{ { Name: "fake/pvA", DevicePath: "fake/path", }, }}, - Spec: api.NodeSpec{ExternalID: testHostname}, + Spec: v1.NodeSpec{ExternalID: testHostname}, } - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "abc", Namespace: "nsA", UID: "1234", }, - Spec: api.PodSpec{ - Volumes: []api.Volume{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { Name: "vol1", - VolumeSource: api.VolumeSource{ - PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ ClaimName: "claimA", }, }, }, }, - SecurityContext: &api.PodSecurityContext{ + SecurityContext: &v1.PodSecurityContext{ SupplementalGroups: []int64{555}, }, }, } - pv := &api.PersistentVolume{ - ObjectMeta: api.ObjectMeta{ + pv := &v1.PersistentVolume{ + ObjectMeta: v1.ObjectMeta{ Name: "pvA", }, - Spec: api.PersistentVolumeSpec{ - PersistentVolumeSource: api.PersistentVolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "fake-device", }, }, - ClaimRef: &api.ObjectReference{ + ClaimRef: &v1.ObjectReference{ Name: "claimA", }, }, } - claim := &api.PersistentVolumeClaim{ - ObjectMeta: api.ObjectMeta{ + claim := &v1.PersistentVolumeClaim{ + ObjectMeta: v1.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, - Spec: api.PersistentVolumeClaimSpec{ + Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, - Status: api.PersistentVolumeClaimStatus{ - Phase: api.ClaimBound, + Status: v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimBound, }, } return node, pod, pv, claim } func simulateVolumeInUseUpdate( - volumeName api.UniqueVolumeName, + volumeName v1.UniqueVolumeName, stopCh <-chan struct{}, volumeManager VolumeManager) { ticker := time.NewTicker(100 * time.Millisecond) @@ -277,7 +277,7 @@ func simulateVolumeInUseUpdate( select { case <-ticker.C: volumeManager.MarkVolumesAsReportedInUse( - []api.UniqueVolumeName{volumeName}) + []v1.UniqueVolumeName{volumeName}) case <-stopCh: return }