mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 10:19:50 +00:00
Logging for service restart e2e clarity
This commit is contained in:
parent
1b05640cf8
commit
5dac1122b4
@ -380,6 +380,8 @@ func (e *EndpointController) syncService(key string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
readyEps := 0
|
||||||
|
notReadyEps := 0
|
||||||
for i := range pods {
|
for i := range pods {
|
||||||
// TODO: Do we need to copy here?
|
// TODO: Do we need to copy here?
|
||||||
pod := &(*pods[i])
|
pod := &(*pods[i])
|
||||||
@ -432,12 +434,14 @@ func (e *EndpointController) syncService(key string) {
|
|||||||
Addresses: []api.EndpointAddress{epa},
|
Addresses: []api.EndpointAddress{epa},
|
||||||
Ports: []api.EndpointPort{epp},
|
Ports: []api.EndpointPort{epp},
|
||||||
})
|
})
|
||||||
|
readyEps++
|
||||||
} else {
|
} else {
|
||||||
glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name)
|
glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name)
|
||||||
subsets = append(subsets, api.EndpointSubset{
|
subsets = append(subsets, api.EndpointSubset{
|
||||||
NotReadyAddresses: []api.EndpointAddress{epa},
|
NotReadyAddresses: []api.EndpointAddress{epa},
|
||||||
Ports: []api.EndpointPort{epp},
|
Ports: []api.EndpointPort{epp},
|
||||||
})
|
})
|
||||||
|
notReadyEps++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -490,6 +494,7 @@ func (e *EndpointController) syncService(key string) {
|
|||||||
newEndpoints.Annotations[endpoints.PodHostnamesAnnotation] = serializedPodHostNames
|
newEndpoints.Annotations[endpoints.PodHostnamesAnnotation] = serializedPodHostNames
|
||||||
}
|
}
|
||||||
|
|
||||||
|
glog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, readyEps, notReadyEps)
|
||||||
createEndpoints := len(currentEndpoints.ResourceVersion) == 0
|
createEndpoints := len(currentEndpoints.ResourceVersion) == 0
|
||||||
if createEndpoints {
|
if createEndpoints {
|
||||||
// No previous endpoints, create them
|
// No previous endpoints, create them
|
||||||
|
@ -4071,14 +4071,17 @@ func RestartKubeProxy(host string) error {
|
|||||||
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
|
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
|
||||||
}
|
}
|
||||||
// kubelet will restart the kube-proxy since it's running in a static pod
|
// kubelet will restart the kube-proxy since it's running in a static pod
|
||||||
|
Logf("Killing kube-proxy on node %v", host)
|
||||||
result, err := SSH("sudo pkill kube-proxy", host, TestContext.Provider)
|
result, err := SSH("sudo pkill kube-proxy", host, TestContext.Provider)
|
||||||
if err != nil || result.Code != 0 {
|
if err != nil || result.Code != 0 {
|
||||||
LogSSHResult(result)
|
LogSSHResult(result)
|
||||||
return fmt.Errorf("couldn't restart kube-proxy: %v", err)
|
return fmt.Errorf("couldn't restart kube-proxy: %v", err)
|
||||||
}
|
}
|
||||||
// wait for kube-proxy to come back up
|
// wait for kube-proxy to come back up
|
||||||
|
sshCmd := "sudo /bin/sh -c 'pgrep kube-proxy | wc -l'"
|
||||||
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||||
result, err := SSH("sudo /bin/sh -c 'pgrep kube-proxy | wc -l'", host, TestContext.Provider)
|
Logf("Waiting for kubeproxy to come back up with %v on %v", sshCmd, host)
|
||||||
|
result, err := SSH(sshCmd, host, TestContext.Provider)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -4125,6 +4128,7 @@ func sshRestartMaster() error {
|
|||||||
} else {
|
} else {
|
||||||
command = "sudo /etc/init.d/kube-apiserver restart"
|
command = "sudo /etc/init.d/kube-apiserver restart"
|
||||||
}
|
}
|
||||||
|
Logf("Restarting master via ssh, running: %v", command)
|
||||||
result, err := SSH(command, GetMasterHost()+":22", TestContext.Provider)
|
result, err := SSH(command, GetMasterHost()+":22", TestContext.Provider)
|
||||||
if err != nil || result.Code != 0 {
|
if err != nil || result.Code != 0 {
|
||||||
LogSSHResult(result)
|
LogSSHResult(result)
|
||||||
|
@ -312,7 +312,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort))
|
||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort))
|
||||||
|
|
||||||
By("Restarting kube-proxy")
|
By(fmt.Sprintf("Restarting kube-proxy on %v", host))
|
||||||
if err := framework.RestartKubeProxy(host); err != nil {
|
if err := framework.RestartKubeProxy(host); err != nil {
|
||||||
framework.Failf("error restarting kube-proxy: %v", err)
|
framework.Failf("error restarting kube-proxy: %v", err)
|
||||||
}
|
}
|
||||||
@ -353,9 +353,11 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||||||
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort))
|
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort))
|
||||||
|
|
||||||
// Restart apiserver
|
// Restart apiserver
|
||||||
|
By("Restarting apiserver")
|
||||||
if err := framework.RestartApiserver(c); err != nil {
|
if err := framework.RestartApiserver(c); err != nil {
|
||||||
framework.Failf("error restarting apiserver: %v", err)
|
framework.Failf("error restarting apiserver: %v", err)
|
||||||
}
|
}
|
||||||
|
By("Waiting for apiserver to come up by polling /healthz")
|
||||||
if err := framework.WaitForApiserverUp(c); err != nil {
|
if err := framework.WaitForApiserverUp(c); err != nil {
|
||||||
framework.Failf("error while waiting for apiserver up: %v", err)
|
framework.Failf("error while waiting for apiserver up: %v", err)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user