mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 22:46:12 +00:00
Add a retry loop to KubeletManagerEtcHosts flake
Note: this still makes the test fail if a retry occurs, but will give us more information regarding whether or not the test flake could be occuring due to delay in mounting of /etc/hosts.
This commit is contained in:
parent
617fa91264
commit
d61d3e48ee
@ -18,17 +18,19 @@ package common
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
api "k8s.io/kubernetes/pkg/api"
|
api "k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
kubeletEtcHostsImageName = "gcr.io/google_containers/netexec:1.7"
|
etcHostsImageName = "gcr.io/google_containers/netexec:1.7"
|
||||||
kubeletEtcHostsPodName = "test-pod"
|
etcHostsPodName = "test-pod"
|
||||||
kubeletEtcHostsHostNetworkPodName = "test-host-network-pod"
|
etcHostsHostNetworkPodName = "test-host-network-pod"
|
||||||
etcHostsPartialContent = "# Kubernetes-managed hosts file."
|
etcHostsPartialContent = "# Kubernetes-managed hosts file."
|
||||||
)
|
)
|
||||||
|
|
||||||
type KubeletManagedHostConfig struct {
|
type KubeletManagedHostConfig struct {
|
||||||
@ -54,20 +56,15 @@ var _ = framework.KubeDescribe("KubeletManagedEtcHosts", func() {
|
|||||||
|
|
||||||
func (config *KubeletManagedHostConfig) verifyEtcHosts() {
|
func (config *KubeletManagedHostConfig) verifyEtcHosts() {
|
||||||
By("Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false")
|
By("Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false")
|
||||||
stdout := config.getEtcHostsContent(kubeletEtcHostsPodName, "busybox-1")
|
assertManagedStatus(config, etcHostsPodName, true, "busybox-1")
|
||||||
assertEtcHostsIsKubeletManaged(stdout)
|
assertManagedStatus(config, etcHostsPodName, true, "busybox-2")
|
||||||
stdout = config.getEtcHostsContent(kubeletEtcHostsPodName, "busybox-2")
|
|
||||||
assertEtcHostsIsKubeletManaged(stdout)
|
|
||||||
|
|
||||||
By("Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount")
|
By("Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount")
|
||||||
stdout = config.getEtcHostsContent(kubeletEtcHostsPodName, "busybox-3")
|
assertManagedStatus(config, etcHostsPodName, false, "busybox-3")
|
||||||
assertEtcHostsIsNotKubeletManaged(stdout)
|
|
||||||
|
|
||||||
By("Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true")
|
By("Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true")
|
||||||
stdout = config.getEtcHostsContent(kubeletEtcHostsHostNetworkPodName, "busybox-1")
|
assertManagedStatus(config, etcHostsHostNetworkPodName, false, "busybox-1")
|
||||||
assertEtcHostsIsNotKubeletManaged(stdout)
|
assertManagedStatus(config, etcHostsHostNetworkPodName, false, "busybox-2")
|
||||||
stdout = config.getEtcHostsContent(kubeletEtcHostsHostNetworkPodName, "busybox-2")
|
|
||||||
assertEtcHostsIsNotKubeletManaged(stdout)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *KubeletManagedHostConfig) setup() {
|
func (config *KubeletManagedHostConfig) setup() {
|
||||||
@ -79,26 +76,64 @@ func (config *KubeletManagedHostConfig) setup() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (config *KubeletManagedHostConfig) createPodWithoutHostNetwork() {
|
func (config *KubeletManagedHostConfig) createPodWithoutHostNetwork() {
|
||||||
podSpec := config.createPodSpec(kubeletEtcHostsPodName)
|
podSpec := config.createPodSpec(etcHostsPodName)
|
||||||
config.pod = config.f.PodClient().CreateSync(podSpec)
|
config.pod = config.f.PodClient().CreateSync(podSpec)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *KubeletManagedHostConfig) createPodWithHostNetwork() {
|
func (config *KubeletManagedHostConfig) createPodWithHostNetwork() {
|
||||||
podSpec := config.createPodSpecWithHostNetwork(kubeletEtcHostsHostNetworkPodName)
|
podSpec := config.createPodSpecWithHostNetwork(etcHostsHostNetworkPodName)
|
||||||
config.hostNetworkPod = config.f.PodClient().CreateSync(podSpec)
|
config.hostNetworkPod = config.f.PodClient().CreateSync(podSpec)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertEtcHostsIsKubeletManaged(etcHostsContent string) {
|
func assertManagedStatus(
|
||||||
isKubeletManaged := strings.Contains(etcHostsContent, etcHostsPartialContent)
|
config *KubeletManagedHostConfig, podName string, expectedIsManaged bool, name string) {
|
||||||
if !isKubeletManaged {
|
// See https://github.com/kubernetes/kubernetes/issues/27023
|
||||||
framework.Failf("/etc/hosts file should be kubelet managed, but is not: %q", etcHostsContent)
|
//
|
||||||
}
|
// Retry until timeout for the right contents of /etc/hosts to show
|
||||||
}
|
// up. There may be a low probability race here. We still fail the
|
||||||
|
// test if retry was necessary, but at least we will know whether or
|
||||||
|
// not it resolves or seems to be a permanent condition.
|
||||||
|
//
|
||||||
|
// If /etc/hosts is properly mounted, then this will succeed
|
||||||
|
// immediately.
|
||||||
|
const retryTimeout = 30 * time.Second
|
||||||
|
|
||||||
func assertEtcHostsIsNotKubeletManaged(etcHostsContent string) {
|
retryCount := 0
|
||||||
isKubeletManaged := strings.Contains(etcHostsContent, etcHostsPartialContent)
|
etcHostsContent := ""
|
||||||
if isKubeletManaged {
|
matched := false
|
||||||
framework.Failf("/etc/hosts file should not be kubelet managed, but is: %q", etcHostsContent)
|
|
||||||
|
for startTime := time.Now(); time.Since(startTime) < retryTimeout; {
|
||||||
|
etcHostsContent = config.getEtcHostsContent(podName, name)
|
||||||
|
isManaged := strings.Contains(etcHostsContent, etcHostsPartialContent)
|
||||||
|
|
||||||
|
if expectedIsManaged == isManaged {
|
||||||
|
matched = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.Errorf(
|
||||||
|
"For pod: %s, name: %s, expected %t, actual %t (/etc/hosts was %q), retryCount: %d",
|
||||||
|
podName, name, expectedIsManaged, isManaged, etcHostsContent, retryCount)
|
||||||
|
|
||||||
|
retryCount++
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
if retryCount > 0 {
|
||||||
|
if matched {
|
||||||
|
conditionText := "should"
|
||||||
|
if !expectedIsManaged {
|
||||||
|
conditionText = "should not"
|
||||||
|
}
|
||||||
|
|
||||||
|
framework.Failf(
|
||||||
|
"/etc/hosts file %s be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q",
|
||||||
|
conditionText, name, retryCount, etcHostsContent)
|
||||||
|
} else {
|
||||||
|
framework.Failf(
|
||||||
|
"had to retry %d times to get matching content in /etc/hosts (name: %s)",
|
||||||
|
retryCount, name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,7 +150,7 @@ func (config *KubeletManagedHostConfig) createPodSpec(podName string) *api.Pod {
|
|||||||
Containers: []api.Container{
|
Containers: []api.Container{
|
||||||
{
|
{
|
||||||
Name: "busybox-1",
|
Name: "busybox-1",
|
||||||
Image: kubeletEtcHostsImageName,
|
Image: etcHostsImageName,
|
||||||
ImagePullPolicy: api.PullIfNotPresent,
|
ImagePullPolicy: api.PullIfNotPresent,
|
||||||
Command: []string{
|
Command: []string{
|
||||||
"sleep",
|
"sleep",
|
||||||
@ -124,7 +159,7 @@ func (config *KubeletManagedHostConfig) createPodSpec(podName string) *api.Pod {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "busybox-2",
|
Name: "busybox-2",
|
||||||
Image: kubeletEtcHostsImageName,
|
Image: etcHostsImageName,
|
||||||
ImagePullPolicy: api.PullIfNotPresent,
|
ImagePullPolicy: api.PullIfNotPresent,
|
||||||
Command: []string{
|
Command: []string{
|
||||||
"sleep",
|
"sleep",
|
||||||
@ -133,7 +168,7 @@ func (config *KubeletManagedHostConfig) createPodSpec(podName string) *api.Pod {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "busybox-3",
|
Name: "busybox-3",
|
||||||
Image: kubeletEtcHostsImageName,
|
Image: etcHostsImageName,
|
||||||
ImagePullPolicy: api.PullIfNotPresent,
|
ImagePullPolicy: api.PullIfNotPresent,
|
||||||
Command: []string{
|
Command: []string{
|
||||||
"sleep",
|
"sleep",
|
||||||
@ -174,7 +209,7 @@ func (config *KubeletManagedHostConfig) createPodSpecWithHostNetwork(podName str
|
|||||||
Containers: []api.Container{
|
Containers: []api.Container{
|
||||||
{
|
{
|
||||||
Name: "busybox-1",
|
Name: "busybox-1",
|
||||||
Image: kubeletEtcHostsImageName,
|
Image: etcHostsImageName,
|
||||||
ImagePullPolicy: api.PullIfNotPresent,
|
ImagePullPolicy: api.PullIfNotPresent,
|
||||||
Command: []string{
|
Command: []string{
|
||||||
"sleep",
|
"sleep",
|
||||||
@ -183,7 +218,7 @@ func (config *KubeletManagedHostConfig) createPodSpecWithHostNetwork(podName str
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "busybox-2",
|
Name: "busybox-2",
|
||||||
Image: kubeletEtcHostsImageName,
|
Image: etcHostsImageName,
|
||||||
ImagePullPolicy: api.PullIfNotPresent,
|
ImagePullPolicy: api.PullIfNotPresent,
|
||||||
Command: []string{
|
Command: []string{
|
||||||
"sleep",
|
"sleep",
|
||||||
|
Loading…
Reference in New Issue
Block a user