mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 18:24:07 +00:00
Merge pull request #36479 from Random-Liu/node-e2e-node-name
Automatic merge from submit-queue Node Conformance & E2E: Get node name from node object. This PR changes the node e2e test framework to get node name from apiserver instead of test flags. When a user tried out the node conformance test, he found that node conformance test will not work properly if kubelet is started with `hostname-override`. The reason is that node conformance test is using [the default node name - `os.Hostname`](https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/e2e_node_suite_test.go#L124), which may be different from `hostname-override`. This will cause test pods not scheduled, and eventually test timeout. We can expose a flag from node conformance test, and let user set node name themselves if they are using `hostname-override` on kubelet. However, let the framework automatically detect it from apiserver is more user friendly. /cc @kubernetes/sig-node This PR 1) only changes node e2e test framework; 2) fixes a problem in node conformance test which is a 1.5 feature. @saad-ali Can we have this in 1.5?
This commit is contained in:
commit
1bc5b822cd
@ -122,33 +122,37 @@ func (c *PodClient) DeleteSync(name string, options *api.DeleteOptions, timeout
|
|||||||
|
|
||||||
// mungeSpec apply test-suite specific transformations to the pod spec.
|
// mungeSpec apply test-suite specific transformations to the pod spec.
|
||||||
func (c *PodClient) mungeSpec(pod *api.Pod) {
|
func (c *PodClient) mungeSpec(pod *api.Pod) {
|
||||||
if TestContext.NodeName != "" {
|
if !TestContext.NodeE2E {
|
||||||
Expect(pod.Spec.NodeName).To(Or(BeZero(), Equal(TestContext.NodeName)), "Test misconfigured")
|
return
|
||||||
pod.Spec.NodeName = TestContext.NodeName
|
}
|
||||||
// Node e2e does not support the default DNSClusterFirst policy. Set
|
|
||||||
// the policy to DNSDefault, which is configured per node.
|
|
||||||
pod.Spec.DNSPolicy = api.DNSDefault
|
|
||||||
|
|
||||||
if !TestContext.PrepullImages {
|
Expect(pod.Spec.NodeName).To(Or(BeZero(), Equal(TestContext.NodeName)), "Test misconfigured")
|
||||||
return
|
pod.Spec.NodeName = TestContext.NodeName
|
||||||
}
|
// Node e2e does not support the default DNSClusterFirst policy. Set
|
||||||
// If prepull is enabled, munge the container spec to make sure the images are not pulled
|
// the policy to DNSDefault, which is configured per node.
|
||||||
// during the test.
|
pod.Spec.DNSPolicy = api.DNSDefault
|
||||||
for i := range pod.Spec.Containers {
|
|
||||||
c := &pod.Spec.Containers[i]
|
// PrepullImages only works for node e2e now. For cluster e2e, image prepull is not enforced,
|
||||||
if c.ImagePullPolicy == api.PullAlways {
|
// we should not munge ImagePullPolicy for cluster e2e pods.
|
||||||
// If the image pull policy is PullAlways, the image doesn't need to be in
|
if !TestContext.PrepullImages {
|
||||||
// the white list or pre-pulled, because the image is expected to be pulled
|
return
|
||||||
// in the test anyway.
|
}
|
||||||
continue
|
// If prepull is enabled, munge the container spec to make sure the images are not pulled
|
||||||
}
|
// during the test.
|
||||||
// If the image policy is not PullAlways, the image must be in the white list and
|
for i := range pod.Spec.Containers {
|
||||||
// pre-pulled.
|
c := &pod.Spec.Containers[i]
|
||||||
Expect(ImageWhiteList.Has(c.Image)).To(BeTrue(), "Image %q is not in the white list, consider adding it to CommonImageWhiteList in test/e2e/common/util.go or NodeImageWhiteList in test/e2e_node/image_list.go", c.Image)
|
if c.ImagePullPolicy == api.PullAlways {
|
||||||
// Do not pull images during the tests because the images in white list should have
|
// If the image pull policy is PullAlways, the image doesn't need to be in
|
||||||
// been prepulled.
|
// the white list or pre-pulled, because the image is expected to be pulled
|
||||||
c.ImagePullPolicy = api.PullNever
|
// in the test anyway.
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
// If the image policy is not PullAlways, the image must be in the white list and
|
||||||
|
// pre-pulled.
|
||||||
|
Expect(ImageWhiteList.Has(c.Image)).To(BeTrue(), "Image %q is not in the white list, consider adding it to CommonImageWhiteList in test/e2e/common/util.go or NodeImageWhiteList in test/e2e_node/image_list.go", c.Image)
|
||||||
|
// Do not pull images during the tests because the images in white list should have
|
||||||
|
// been prepulled.
|
||||||
|
c.ImagePullPolicy = api.PullNever
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +100,9 @@ type TestContextType struct {
|
|||||||
|
|
||||||
// NodeTestContextType is part of TestContextType, it is shared by all node e2e test.
|
// NodeTestContextType is part of TestContextType, it is shared by all node e2e test.
|
||||||
type NodeTestContextType struct {
|
type NodeTestContextType struct {
|
||||||
// Name of the node to run tests on (node e2e suite only).
|
// NodeE2E indicates whether it is running node e2e.
|
||||||
|
NodeE2E bool
|
||||||
|
// Name of the node to run tests on.
|
||||||
NodeName string
|
NodeName string
|
||||||
// NodeConformance indicates whether the test is running in node conformance mode.
|
// NodeConformance indicates whether the test is running in node conformance mode.
|
||||||
NodeConformance bool
|
NodeConformance bool
|
||||||
@ -208,7 +210,9 @@ func RegisterClusterFlags() {
|
|||||||
|
|
||||||
// Register flags specific to the node e2e test suite.
|
// Register flags specific to the node e2e test suite.
|
||||||
func RegisterNodeFlags() {
|
func RegisterNodeFlags() {
|
||||||
flag.StringVar(&TestContext.NodeName, "node-name", "", "Name of the node to run tests on (node e2e suite only).")
|
// Mark the test as node e2e when node flags are registered.
|
||||||
|
TestContext.NodeE2E = true
|
||||||
|
flag.StringVar(&TestContext.NodeName, "node-name", "", "Name of the node to run tests on.")
|
||||||
// TODO(random-liu): Move kubelet start logic out of the test.
|
// TODO(random-liu): Move kubelet start logic out of the test.
|
||||||
// TODO(random-liu): Move log fetch logic out of the test.
|
// TODO(random-liu): Move log fetch logic out of the test.
|
||||||
// There are different ways to start kubelet (systemd, initd, docker, rkt, manually started etc.)
|
// There are different ways to start kubelet (systemd, initd, docker, rkt, manually started etc.)
|
||||||
|
@ -1789,7 +1789,7 @@ func restclientConfig(kubeContext string) (*clientcmdapi.Config, error) {
|
|||||||
type ClientConfigGetter func() (*restclient.Config, error)
|
type ClientConfigGetter func() (*restclient.Config, error)
|
||||||
|
|
||||||
func LoadConfig() (*restclient.Config, error) {
|
func LoadConfig() (*restclient.Config, error) {
|
||||||
if TestContext.NodeName != "" {
|
if TestContext.NodeE2E {
|
||||||
// This is a node e2e test, apply the node e2e configuration
|
// This is a node e2e test, apply the node e2e configuration
|
||||||
return &restclient.Config{Host: TestContext.Host}, nil
|
return &restclient.Config{Host: TestContext.Host}, nil
|
||||||
}
|
}
|
||||||
|
@ -119,13 +119,6 @@ func TestE2eNode(t *testing.T) {
|
|||||||
|
|
||||||
// Setup the kubelet on the node
|
// Setup the kubelet on the node
|
||||||
var _ = SynchronizedBeforeSuite(func() []byte {
|
var _ = SynchronizedBeforeSuite(func() []byte {
|
||||||
// Initialize node name here, so that the following code can get right node name.
|
|
||||||
if framework.TestContext.NodeName == "" {
|
|
||||||
hostname, err := os.Hostname()
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "should be able to get node name")
|
|
||||||
framework.TestContext.NodeName = hostname
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run system validation test.
|
// Run system validation test.
|
||||||
Expect(validateSystem()).To(Succeed(), "system validation")
|
Expect(validateSystem()).To(Succeed(), "system validation")
|
||||||
|
|
||||||
@ -166,6 +159,9 @@ var _ = SynchronizedBeforeSuite(func() []byte {
|
|||||||
// The node test context is updated in the first function, update it on every test node.
|
// The node test context is updated in the first function, update it on every test node.
|
||||||
err := json.Unmarshal(data, &framework.TestContext.NodeTestContextType)
|
err := json.Unmarshal(data, &framework.TestContext.NodeTestContextType)
|
||||||
Expect(err).NotTo(HaveOccurred(), "should be able to deserialize node test context.")
|
Expect(err).NotTo(HaveOccurred(), "should be able to deserialize node test context.")
|
||||||
|
|
||||||
|
// update test context with node configuration.
|
||||||
|
Expect(updateTestContext()).To(Succeed(), "update test context with node config.")
|
||||||
})
|
})
|
||||||
|
|
||||||
// Tear down the kubelet on the node
|
// Tear down the kubelet on the node
|
||||||
@ -217,24 +213,60 @@ func waitForNodeReady() {
|
|||||||
// nodeReadyPollInterval is the interval to check node ready.
|
// nodeReadyPollInterval is the interval to check node ready.
|
||||||
nodeReadyPollInterval = 1 * time.Second
|
nodeReadyPollInterval = 1 * time.Second
|
||||||
)
|
)
|
||||||
config, err := framework.LoadConfig()
|
client, err := getAPIServerClient()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "should be able to get apiserver client.")
|
||||||
client, err := clientset.NewForConfig(config)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Eventually(func() error {
|
Eventually(func() error {
|
||||||
nodes, err := client.Nodes().List(api.ListOptions{})
|
node, err := getNode(client)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
if nodes == nil {
|
return fmt.Errorf("failed to get node: %v", err)
|
||||||
return fmt.Errorf("the node list is nil.")
|
|
||||||
}
|
}
|
||||||
Expect(len(nodes.Items) > 1).NotTo(BeTrue())
|
if !api.IsNodeReady(node) {
|
||||||
if len(nodes.Items) == 0 {
|
|
||||||
return fmt.Errorf("empty node list: %+v", nodes)
|
|
||||||
}
|
|
||||||
node := nodes.Items[0]
|
|
||||||
if !api.IsNodeReady(&node) {
|
|
||||||
return fmt.Errorf("node is not ready: %+v", node)
|
return fmt.Errorf("node is not ready: %+v", node)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, nodeReadyTimeout, nodeReadyPollInterval).Should(Succeed())
|
}, nodeReadyTimeout, nodeReadyPollInterval).Should(Succeed())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// updateTestContext updates the test context with the node name.
|
||||||
|
// TODO(random-liu): Using dynamic kubelet configuration feature to
|
||||||
|
// update test context with node configuration.
|
||||||
|
func updateTestContext() error {
|
||||||
|
client, err := getAPIServerClient()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get apiserver client: %v", err)
|
||||||
|
}
|
||||||
|
node, err := getNode(client)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get node: %v", err)
|
||||||
|
}
|
||||||
|
// Initialize the node name
|
||||||
|
framework.TestContext.NodeName = node.Name
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getNode gets node object from the apiserver.
|
||||||
|
func getNode(c *clientset.Clientset) (*api.Node, error) {
|
||||||
|
nodes, err := c.Nodes().List(api.ListOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "should be able to list nodes.")
|
||||||
|
if nodes == nil {
|
||||||
|
return nil, fmt.Errorf("the node list is nil.")
|
||||||
|
}
|
||||||
|
Expect(len(nodes.Items) > 1).NotTo(BeTrue(), "should not be more than 1 nodes.")
|
||||||
|
if len(nodes.Items) == 0 {
|
||||||
|
return nil, fmt.Errorf("empty node list: %+v", nodes)
|
||||||
|
}
|
||||||
|
return &nodes.Items[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAPIServerClient gets a apiserver client.
|
||||||
|
func getAPIServerClient() (*clientset.Clientset, error) {
|
||||||
|
config, err := framework.LoadConfig()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to load config: %v", err)
|
||||||
|
}
|
||||||
|
client, err := clientset.NewForConfig(config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create client: %v", err)
|
||||||
|
}
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
@ -199,7 +199,6 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
|||||||
"--address", "0.0.0.0",
|
"--address", "0.0.0.0",
|
||||||
"--port", kubeletPort,
|
"--port", kubeletPort,
|
||||||
"--read-only-port", kubeletReadOnlyPort,
|
"--read-only-port", kubeletReadOnlyPort,
|
||||||
"--hostname-override", framework.TestContext.NodeName, // Required because hostname is inconsistent across hosts
|
|
||||||
"--volume-stats-agg-period", "10s", // Aggregate volumes frequently so tests don't need to wait as long
|
"--volume-stats-agg-period", "10s", // Aggregate volumes frequently so tests don't need to wait as long
|
||||||
"--allow-privileged", "true",
|
"--allow-privileged", "true",
|
||||||
"--serialize-image-pulls", "false",
|
"--serialize-image-pulls", "false",
|
||||||
@ -213,7 +212,9 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
|||||||
|
|
||||||
"--experimental-mounter-path", framework.TestContext.MounterPath,
|
"--experimental-mounter-path", framework.TestContext.MounterPath,
|
||||||
)
|
)
|
||||||
|
if framework.TestContext.NodeName != "" { // If node name is specified, set hostname override.
|
||||||
|
cmdArgs = append(cmdArgs, "--hostname-override", framework.TestContext.NodeName)
|
||||||
|
}
|
||||||
if framework.TestContext.EnableCRI {
|
if framework.TestContext.EnableCRI {
|
||||||
cmdArgs = append(cmdArgs, "--experimental-cri", "true") // Whether to use experimental cri integration.
|
cmdArgs = append(cmdArgs, "--experimental-cri", "true") // Whether to use experimental cri integration.
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user