mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
Merge pull request #60993 from MrHohn/e2e-restart-apiserver-refine-followup
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. [e2e service] Move apiserver restart validation logic into util **What this PR does / why we need it**: Follow up of #60906, on GKE apiserver pod is invisible on k8s, hence test is failing. This PR bakes the restart validation logic into the util function instead so it could be env-awared. **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes #60761 **Special notes for your reviewer**: Sorry for the noise. /assign @rramkumar1 @bowei cc @krzyzacy **Release note**: ```release-note NONE ```
This commit is contained in:
commit
9ad5ea2d61
@ -3916,17 +3916,24 @@ func WaitForKubeletUp(host string) error {
|
|||||||
return fmt.Errorf("waiting for kubelet timed out")
|
return fmt.Errorf("waiting for kubelet timed out")
|
||||||
}
|
}
|
||||||
|
|
||||||
func RestartApiserver(c discovery.ServerVersionInterface) error {
|
func RestartApiserver(cs clientset.Interface) error {
|
||||||
// TODO: Make it work for all providers.
|
// TODO: Make it work for all providers.
|
||||||
if !ProviderIs("gce", "gke", "aws") {
|
if !ProviderIs("gce", "gke", "aws") {
|
||||||
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
|
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
|
||||||
}
|
}
|
||||||
if ProviderIs("gce", "aws") {
|
if ProviderIs("gce", "aws") {
|
||||||
return sshRestartMaster()
|
initialRestartCount, err := getApiserverRestartCount(cs)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get apiserver's restart count: %v", err)
|
||||||
|
}
|
||||||
|
if err := sshRestartMaster(); err != nil {
|
||||||
|
return fmt.Errorf("failed to restart apiserver: %v", err)
|
||||||
|
}
|
||||||
|
return waitForApiserverRestarted(cs, initialRestartCount)
|
||||||
}
|
}
|
||||||
// GKE doesn't allow ssh access, so use a same-version master
|
// GKE doesn't allow ssh access, so use a same-version master
|
||||||
// upgrade to teardown/recreate master.
|
// upgrade to teardown/recreate master.
|
||||||
v, err := c.ServerVersion()
|
v, err := cs.Discovery().ServerVersion()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -3962,10 +3969,10 @@ func WaitForApiserverUp(c clientset.Interface) error {
|
|||||||
return fmt.Errorf("waiting for apiserver timed out")
|
return fmt.Errorf("waiting for apiserver timed out")
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForApiserverRestarted waits until apiserver's restart count increased.
|
// waitForApiserverRestarted waits until apiserver's restart count increased.
|
||||||
func WaitForApiserverRestarted(c clientset.Interface, initialRestartCount int32) error {
|
func waitForApiserverRestarted(c clientset.Interface, initialRestartCount int32) error {
|
||||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
|
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
|
||||||
restartCount, err := GetApiserverRestartCount(c)
|
restartCount, err := getApiserverRestartCount(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Logf("Failed to get apiserver's restart count: %v", err)
|
Logf("Failed to get apiserver's restart count: %v", err)
|
||||||
continue
|
continue
|
||||||
@ -3979,7 +3986,7 @@ func WaitForApiserverRestarted(c clientset.Interface, initialRestartCount int32)
|
|||||||
return fmt.Errorf("timed out waiting for apiserver to be restarted")
|
return fmt.Errorf("timed out waiting for apiserver to be restarted")
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetApiserverRestartCount(c clientset.Interface) (int32, error) {
|
func getApiserverRestartCount(c clientset.Interface) (int32, error) {
|
||||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"component": "kube-apiserver"}))
|
label := labels.SelectorFromSet(labels.Set(map[string]string{"component": "kube-apiserver"}))
|
||||||
listOpts := metav1.ListOptions{LabelSelector: label.String()}
|
listOpts := metav1.ListOptions{LabelSelector: label.String()}
|
||||||
pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts)
|
pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts)
|
||||||
|
@ -427,16 +427,10 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
|
framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
|
||||||
|
|
||||||
// Restart apiserver
|
// Restart apiserver
|
||||||
initialRestartCount, err := framework.GetApiserverRestartCount(cs)
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to get apiserver's restart count")
|
|
||||||
By("Restarting apiserver")
|
By("Restarting apiserver")
|
||||||
if err := framework.RestartApiserver(cs.Discovery()); err != nil {
|
if err := framework.RestartApiserver(cs); err != nil {
|
||||||
framework.Failf("error restarting apiserver: %v", err)
|
framework.Failf("error restarting apiserver: %v", err)
|
||||||
}
|
}
|
||||||
By("Waiting for apiserver to be restarted")
|
|
||||||
if err := framework.WaitForApiserverRestarted(cs, initialRestartCount); err != nil {
|
|
||||||
framework.Failf("error while waiting for apiserver to be restarted: %v", err)
|
|
||||||
}
|
|
||||||
By("Waiting for apiserver to come up by polling /healthz")
|
By("Waiting for apiserver to come up by polling /healthz")
|
||||||
if err := framework.WaitForApiserverUp(cs); err != nil {
|
if err := framework.WaitForApiserverUp(cs); err != nil {
|
||||||
framework.Failf("error while waiting for apiserver up: %v", err)
|
framework.Failf("error while waiting for apiserver up: %v", err)
|
||||||
|
Loading…
Reference in New Issue
Block a user