mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-02 16:29:21 +00:00
Merge pull request #37303 from krousey/e2eutil
Automatic merge from submit-queue Guard the ready replica checking by server version I fixed replica readiness checking for 1.4->1.5 upgrades by using a field that only exists in versions >=1.4.0 in #36924 This fixed a lot of issues in 1.4->1.5 upgrade testing, but did not fix 1.3->1.5 upgrade tests. I've disabled replica checking for 1.3 masters as the old logic was broken anyway. This will not affect the 1.3 CI tests. Just 1.3 -> {1.4, 1.5} upgrade tests. https://k8s-gubernator.appspot.com/build/kubernetes-jenkins/logs/kubernetes-e2e-gke-container_vm-1.3-container_vm-1.5-upgrade-cluster-new/330?log is an example of this breakage. This is the tell-tale logs: ```console Nov 22 09:40:50.469: INFO: 11 / 11 pods in namespace 'kube-system' are running and ready (506 seconds elapsed) Nov 22 09:40:50.469: INFO: expected 5 pod replicas in namespace 'kube-system', 0 are Running and Ready. Nov 22 09:40:50.469: INFO: POD NODE PHASE GRACE CONDITIONS ```
This commit is contained in:
commit
e69b497e07
@ -479,6 +479,8 @@ func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[s
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var ReadyReplicaVersion = version.MustParse("v1.4.0")
|
||||||
|
|
||||||
// WaitForPodsRunningReady waits up to timeout to ensure that all pods in
|
// WaitForPodsRunningReady waits up to timeout to ensure that all pods in
|
||||||
// namespace ns are either running and ready, or failed but controlled by a
|
// namespace ns are either running and ready, or failed but controlled by a
|
||||||
// controller. Also, it ensures that at least minPods are running and
|
// controller. Also, it ensures that at least minPods are running and
|
||||||
@ -493,6 +495,14 @@ func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[s
|
|||||||
// and some in Success. This is to allow the client to decide if "Success"
|
// and some in Success. This is to allow the client to decide if "Success"
|
||||||
// means "Ready" or not.
|
// means "Ready" or not.
|
||||||
func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
|
func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
|
||||||
|
|
||||||
|
// This can be removed when we no longer have 1.3 servers running with upgrade tests.
|
||||||
|
hasReadyReplicas, err := ServerVersionGTE(ReadyReplicaVersion, c.Discovery())
|
||||||
|
if err != nil {
|
||||||
|
Logf("Error getting the server version: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
|
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
||||||
@ -514,24 +524,26 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, ti
|
|||||||
// checked.
|
// checked.
|
||||||
replicas, replicaOk := int32(0), int32(0)
|
replicas, replicaOk := int32(0), int32(0)
|
||||||
|
|
||||||
rcList, err := c.Core().ReplicationControllers(ns).List(api.ListOptions{})
|
if hasReadyReplicas {
|
||||||
if err != nil {
|
rcList, err := c.Core().ReplicationControllers(ns).List(api.ListOptions{})
|
||||||
Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
|
if err != nil {
|
||||||
return false, nil
|
Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
|
||||||
}
|
return false, nil
|
||||||
for _, rc := range rcList.Items {
|
}
|
||||||
replicas += rc.Spec.Replicas
|
for _, rc := range rcList.Items {
|
||||||
replicaOk += rc.Status.ReadyReplicas
|
replicas += rc.Spec.Replicas
|
||||||
}
|
replicaOk += rc.Status.ReadyReplicas
|
||||||
|
}
|
||||||
|
|
||||||
rsList, err := c.Extensions().ReplicaSets(ns).List(api.ListOptions{})
|
rsList, err := c.Extensions().ReplicaSets(ns).List(api.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Logf("Error getting replication sets in namespace %q: %v", ns, err)
|
Logf("Error getting replication sets in namespace %q: %v", ns, err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
for _, rs := range rsList.Items {
|
for _, rs := range rsList.Items {
|
||||||
replicas += rs.Spec.Replicas
|
replicas += rs.Spec.Replicas
|
||||||
replicaOk += rs.Status.ReadyReplicas
|
replicaOk += rs.Status.ReadyReplicas
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
podList, err := c.Core().Pods(ns).List(api.ListOptions{})
|
podList, err := c.Core().Pods(ns).List(api.ListOptions{})
|
||||||
@ -563,7 +575,9 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, ti
|
|||||||
|
|
||||||
Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
|
Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
|
||||||
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
|
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
|
||||||
Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
|
if hasReadyReplicas {
|
||||||
|
Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
|
||||||
|
}
|
||||||
|
|
||||||
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
|
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
|
||||||
return true, nil
|
return true, nil
|
||||||
|
Loading…
Reference in New Issue
Block a user