mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 06:54:01 +00:00
Merge pull request #31923 from bprashanth/logging
Automatic merge from submit-queue Adding logging and fix test race Fixes https://github.com/kubernetes/kubernetes/issues/31552, adds clarity for https://github.com/kubernetes/kubernetes/issues/29449 Janet for the job e2e and girish for services. P1 for flake.
This commit is contained in:
commit
88c7e25f72
@ -380,6 +380,8 @@ func (e *EndpointController) syncService(key string) {
|
||||
}
|
||||
}
|
||||
|
||||
readyEps := 0
|
||||
notReadyEps := 0
|
||||
for i := range pods {
|
||||
// TODO: Do we need to copy here?
|
||||
pod := &(*pods[i])
|
||||
@ -432,12 +434,14 @@ func (e *EndpointController) syncService(key string) {
|
||||
Addresses: []api.EndpointAddress{epa},
|
||||
Ports: []api.EndpointPort{epp},
|
||||
})
|
||||
readyEps++
|
||||
} else {
|
||||
glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name)
|
||||
subsets = append(subsets, api.EndpointSubset{
|
||||
NotReadyAddresses: []api.EndpointAddress{epa},
|
||||
Ports: []api.EndpointPort{epp},
|
||||
})
|
||||
notReadyEps++
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -490,6 +494,7 @@ func (e *EndpointController) syncService(key string) {
|
||||
newEndpoints.Annotations[endpoints.PodHostnamesAnnotation] = serializedPodHostNames
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, readyEps, notReadyEps)
|
||||
createEndpoints := len(currentEndpoints.ResourceVersion) == 0
|
||||
if createEndpoints {
|
||||
// No previous endpoints, create them
|
||||
|
@ -2159,7 +2159,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
|
||||
result.Fail(err)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Determined pod ip after infra change: %q: %q", format.Pod(pod), podIP)
|
||||
glog.Infof("Determined pod ip after infra change: %q: %q", format.Pod(pod), podIP)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4292,14 +4292,17 @@ func RestartKubeProxy(host string) error {
|
||||
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
|
||||
}
|
||||
// kubelet will restart the kube-proxy since it's running in a static pod
|
||||
Logf("Killing kube-proxy on node %v", host)
|
||||
result, err := SSH("sudo pkill kube-proxy", host, TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
return fmt.Errorf("couldn't restart kube-proxy: %v", err)
|
||||
}
|
||||
// wait for kube-proxy to come back up
|
||||
sshCmd := "sudo /bin/sh -c 'pgrep kube-proxy | wc -l'"
|
||||
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||
result, err := SSH("sudo /bin/sh -c 'pgrep kube-proxy | wc -l'", host, TestContext.Provider)
|
||||
Logf("Waiting for kubeproxy to come back up with %v on %v", sshCmd, host)
|
||||
result, err := SSH(sshCmd, host, TestContext.Provider)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -4346,6 +4349,7 @@ func sshRestartMaster() error {
|
||||
} else {
|
||||
command = "sudo /etc/init.d/kube-apiserver restart"
|
||||
}
|
||||
Logf("Restarting master via ssh, running: %v", command)
|
||||
result, err := SSH(command, GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
|
@ -275,8 +275,11 @@ func waitForJobReplaced(c *client.Client, ns, previousJobName string) error {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(jobs.Items) != 1 {
|
||||
return false, fmt.Errorf("More than one job is running")
|
||||
if len(jobs.Items) > 1 {
|
||||
return false, fmt.Errorf("More than one job is running %+v", jobs.Items)
|
||||
} else if len(jobs.Items) == 0 {
|
||||
framework.Logf("Warning: Found 0 jobs in namespace %v", ns)
|
||||
return false, nil
|
||||
}
|
||||
return jobs.Items[0].Name != previousJobName, nil
|
||||
})
|
||||
|
@ -312,7 +312,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
||||
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort))
|
||||
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort))
|
||||
|
||||
By("Restarting kube-proxy")
|
||||
By(fmt.Sprintf("Restarting kube-proxy on %v", host))
|
||||
if err := framework.RestartKubeProxy(host); err != nil {
|
||||
framework.Failf("error restarting kube-proxy: %v", err)
|
||||
}
|
||||
@ -353,9 +353,11 @@ var _ = framework.KubeDescribe("Services", func() {
|
||||
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort))
|
||||
|
||||
// Restart apiserver
|
||||
By("Restarting apiserver")
|
||||
if err := framework.RestartApiserver(c); err != nil {
|
||||
framework.Failf("error restarting apiserver: %v", err)
|
||||
}
|
||||
By("Waiting for apiserver to come up by polling /healthz")
|
||||
if err := framework.WaitForApiserverUp(c); err != nil {
|
||||
framework.Failf("error while waiting for apiserver up: %v", err)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user