diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index cddb7cd2643..d2627dc634e 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -329,7 +329,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { var heartbeatClient v1core.CoreV1Interface var externalKubeClient clientset.Interface - clientConfig, err := CreateAPIServerClientConfig(s) + clientConfig, err := createAPIServerClientConfig(s) var clientCertificateManager certificate.Manager if err == nil { @@ -613,10 +613,9 @@ func createClientConfig(s *options.KubeletServer) (*restclient.Config, error) { } } -// CreateAPIServerClientConfig generates a client.Config from command line flags +// createAPIServerClientConfig generates a client.Config from command line flags // via createClientConfig and then injects chaos into the configuration via addChaosToClientConfig. -// This func is exported to support integration with third party kubelet extensions (e.g. kubernetes-mesos). -func CreateAPIServerClientConfig(s *options.KubeletServer) (*restclient.Config, error) { +func createAPIServerClientConfig(s *options.KubeletServer) (*restclient.Config, error) { clientConfig, err := createClientConfig(s) if err != nil { return nil, err @@ -688,15 +687,11 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *kubeletconfiginternal. credentialprovider.SetPreferredDockercfgPath(kubeFlags.RootDirectory) glog.V(2).Infof("Using root directory: %v", kubeFlags.RootDirectory) - builder := kubeDeps.Builder - if builder == nil { - builder = CreateAndInitKubelet - } if kubeDeps.OSInterface == nil { kubeDeps.OSInterface = kubecontainer.RealOS{} } - k, err := builder(kubeCfg, + k, err := CreateAndInitKubelet(kubeCfg, kubeDeps, &kubeFlags.ContainerRuntimeOptions, kubeFlags.ContainerRuntime, diff --git a/pkg/api/endpoints/util.go b/pkg/api/endpoints/util.go index 49bdbc47a19..3d7b6e514f6 100644 --- a/pkg/api/endpoints/util.go +++ b/pkg/api/endpoints/util.go @@ -89,8 +89,7 @@ type addressKey struct { // any existing ready state. func mapAddressByPort(addr *api.EndpointAddress, port api.EndpointPort, ready bool, allAddrs map[addressKey]*api.EndpointAddress, portToAddrReadyMap map[api.EndpointPort]addressSet) *api.EndpointAddress { // use addressKey to distinguish between two endpoints that are identical addresses - // but may have come from different hosts, for attribution. For instance, Mesos - // assigns pods the node IP, but the pods are distinct. + // but may have come from different hosts, for attribution. key := addressKey{ip: addr.IP} if addr.TargetRef != nil { key.uid = addr.TargetRef.UID diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 63c91cc5cf7..632ef952f3d 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -223,27 +223,7 @@ type Builder func(kubeCfg *kubeletconfiginternal.KubeletConfiguration, // at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping // these objects while we figure out a more comprehensive dependency injection story for the Kubelet. type Dependencies struct { - // TODO(mtaufen): KubeletBuilder: - // Mesos currently uses this as a hook to let them make their own call to - // let them wrap the KubeletBootstrap that CreateAndInitKubelet returns with - // their own KubeletBootstrap. It's a useful hook. I need to think about what - // a nice home for it would be. There seems to be a trend, between this and - // the Options fields below, of providing hooks where you can add extra functionality - // to the Kubelet for your solution. Maybe we should centralize these sorts of things? - Builder Builder - - // TODO(mtaufen): ContainerRuntimeOptions and Options: - // Arrays of functions that can do arbitrary things to the Kubelet and the Runtime - // seem like a difficult path to trace when it's time to debug something. - // I'm leaving these fields here for now, but there is likely an easier-to-follow - // way to support their intended use cases. E.g. ContainerRuntimeOptions - // is used by Mesos to set an environment variable in containers which has - // some connection to their container GC. It seems that Mesos intends to use - // Options to add additional node conditions that are updated as part of the - // Kubelet lifecycle (see https://github.com/kubernetes/kubernetes/pull/21521). - // We should think about providing more explicit ways of doing these things. - ContainerRuntimeOptions []kubecontainer.Option - Options []Option + Options []Option // Injected Dependencies Auth server.AuthInterface diff --git a/pkg/kubelet/kubelet_getters.go b/pkg/kubelet/kubelet_getters.go index fefd8aac144..1b98f9189fb 100644 --- a/pkg/kubelet/kubelet_getters.go +++ b/pkg/kubelet/kubelet_getters.go @@ -179,9 +179,8 @@ func (kl *Kubelet) GetHostname() string { return kl.hostname } -// GetRuntime returns the current Runtime implementation in use by the kubelet. This func -// is exported to simplify integration with third party kubelet extensions (e.g. kubernetes-mesos). -func (kl *Kubelet) GetRuntime() kubecontainer.Runtime { +// getRuntime returns the current Runtime implementation in use by the kubelet. +func (kl *Kubelet) getRuntime() kubecontainer.Runtime { return kl.containerRuntime } diff --git a/pkg/kubelet/kubelet_network.go b/pkg/kubelet/kubelet_network.go index d7e47de5576..73fddb56e50 100644 --- a/pkg/kubelet/kubelet_network.go +++ b/pkg/kubelet/kubelet_network.go @@ -65,7 +65,7 @@ func (nh *networkHost) GetKubeClient() clientset.Interface { } func (nh *networkHost) GetRuntime() kubecontainer.Runtime { - return nh.kubelet.GetRuntime() + return nh.kubelet.getRuntime() } func (nh *networkHost) SupportsLegacyFeatures() bool { @@ -88,7 +88,7 @@ type criNetworkHost struct { // Any network plugin invoked by a cri must implement NamespaceGetter // to talk directly to the runtime instead. func (c *criNetworkHost) GetNetNS(containerID string) (string, error) { - return c.kubelet.GetRuntime().GetNetNS(kubecontainer.ContainerID{Type: "", ID: containerID}) + return c.kubelet.getRuntime().GetNetNS(kubecontainer.ContainerID{Type: "", ID: containerID}) } // NoOpLegacyHost implements the network.LegacyHost interface for the remote @@ -106,7 +106,7 @@ func (n *NoOpLegacyHost) GetKubeClient() clientset.Interface { return nil } -// GetRuntime always returns "nil" for 'NoOpLegacyHost' +// getRuntime always returns "nil" for 'NoOpLegacyHost' func (n *NoOpLegacyHost) GetRuntime() kubecontainer.Runtime { return nil } @@ -188,7 +188,7 @@ func (kl *Kubelet) updatePodCIDR(cidr string) { // kubelet -> generic runtime -> runtime shim -> network plugin // docker/rkt non-cri implementations have a passthrough UpdatePodCIDR - if err := kl.GetRuntime().UpdatePodCIDR(cidr); err != nil { + if err := kl.getRuntime().UpdatePodCIDR(cidr); err != nil { glog.Errorf("Failed to update pod CIDR: %v", err) return } diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 6241f4ba340..7c94a30ef13 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -814,7 +814,7 @@ func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *k if runningPod != nil { p = *runningPod } else if status != nil { - p = kubecontainer.ConvertPodStatusToRunningPod(kl.GetRuntime().Type(), status) + p = kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), status) } else { return fmt.Errorf("one of the two arguments must be non-nil: runningPod, status") } @@ -1231,10 +1231,8 @@ func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, lo return kl.containerRuntime.GetContainerLogs(pod, containerID, logOptions, stdout, stderr) } -// GetPhase returns the phase of a pod given its container info. -// This func is exported to simplify integration with 3rd party kubelet -// integrations like kubernetes-mesos. -func GetPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase { +// getPhase returns the phase of a pod given its container info. +func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase { initialized := 0 pendingInitialization := 0 failedInitialization := 0 @@ -1364,7 +1362,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po // Assume info is ready to process spec := &pod.Spec allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...) - s.Phase = GetPhase(spec, allStatus) + s.Phase = getPhase(spec, allStatus) kl.probeManager.UpdatePodStatus(pod.UID, s) s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase)) s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase)) diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index f313f80e056..66cde220d44 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -1845,7 +1845,7 @@ func TestPodPhaseWithRestartAlways(t *testing.T) { }, } for _, test := range tests { - status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) + status := getPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) assert.Equal(t, test.status, status, "[test %s]", test.test) } } @@ -1945,7 +1945,7 @@ func TestPodPhaseWithRestartNever(t *testing.T) { }, } for _, test := range tests { - status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) + status := getPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) assert.Equal(t, test.status, status, "[test %s]", test.test) } } @@ -2058,7 +2058,7 @@ func TestPodPhaseWithRestartOnFailure(t *testing.T) { }, } for _, test := range tests { - status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) + status := getPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) assert.Equal(t, test.status, status, "[test %s]", test.test) } } diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index 934c044a7e8..59ba3fc2e1d 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -1105,22 +1105,6 @@ func GetContainerPortsByPodUID(endpoints *v1.Endpoints) PortsByPodUID { for _, port := range ss.Ports { for _, addr := range ss.Addresses { containerPort := port.Port - hostPort := port.Port - - // use endpoint annotations to recover the container port in a Mesos setup - // compare contrib/mesos/pkg/service/endpoints_controller.syncService - key := fmt.Sprintf("k8s.mesosphere.io/containerPort_%s_%s_%d", port.Protocol, addr.IP, hostPort) - mesosContainerPortString := endpoints.Annotations[key] - if mesosContainerPortString != "" { - mesosContainerPort, err := strconv.Atoi(mesosContainerPortString) - if err != nil { - continue - } - containerPort = int32(mesosContainerPort) - Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString) - } - - // Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort) if _, ok := m[addr.TargetRef.UID]; !ok { m[addr.TargetRef.UID] = make([]int, 0) } diff --git a/test/test_owners.csv b/test/test_owners.csv index d7c2d109dc0..170579118d5 100644 --- a/test/test_owners.csv +++ b/test/test_owners.csv @@ -248,9 +248,6 @@ Loadbalancing: L7 GCE should create ingress with given static-ip,eparis,1, Loadbalancing: L7 Nginx should conform to Ingress spec,ncdc,1,network "Logging soak should survive logging 1KB every * seconds, for a duration of *, scaling up to * pods per node",justinsb,1,node "MemoryEviction when there is memory pressure should evict pods in the correct order (besteffort first, then burstable, then guaranteed)",ixdy,1,node -Mesos applies slave attributes as labels,justinsb,1,apps -Mesos schedules pods annotated with roles on correct slaves,tallclair,1,apps -Mesos starts static pods on every node in the mesos cluster,lavalamp,1,apps MetricsGrabber should grab all metrics from API server.,gmarek,0,instrumentation MetricsGrabber should grab all metrics from a ControllerManager.,gmarek,0,instrumentation MetricsGrabber should grab all metrics from a Kubelet.,gmarek,0,instrumentation