mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-01 07:47:56 +00:00
Merge pull request #105507 from claudiubelu/tests/refactor-daemonset
tests: Refactors daemonset utils into framework
This commit is contained in:
commit
20ff5381ce
@ -52,6 +52,7 @@ import (
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
|
||||
)
|
||||
@ -169,7 +170,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
err = e2edaemonset.CheckDaemonStatus(f, dsName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.")
|
||||
@ -207,9 +208,9 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.ExpectNoError(err, "error setting labels on node")
|
||||
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
||||
framework.ExpectEqual(len(daemonSetLabels), 1)
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
err = e2edaemonset.CheckDaemonStatus(f, dsName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled")
|
||||
@ -226,9 +227,9 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.ExpectNoError(err, "error patching daemon set")
|
||||
daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels)
|
||||
framework.ExpectEqual(len(daemonSetLabels), 1)
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
err = e2edaemonset.CheckDaemonStatus(f, dsName)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@ -270,9 +271,9 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.ExpectNoError(err, "error setting labels on node")
|
||||
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
||||
framework.ExpectEqual(len(daemonSetLabels), 1)
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
err = e2edaemonset.CheckDaemonStatus(f, dsName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Remove the node label and wait for daemons to be unscheduled")
|
||||
@ -297,7 +298,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
err = e2edaemonset.CheckDaemonStatus(f, dsName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.")
|
||||
@ -745,7 +746,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
}
|
||||
|
||||
// Make sure every daemon pod on the node has been updated
|
||||
nodeNames := schedulableNodes(c, ds)
|
||||
nodeNames := e2edaemonset.SchedulableNodes(c, ds)
|
||||
for _, node := range nodeNames {
|
||||
switch {
|
||||
case
|
||||
@ -831,7 +832,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
err = e2edaemonset.CheckDaemonStatus(f, dsName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("listing all DeamonSets")
|
||||
@ -879,7 +880,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
err = e2edaemonset.CheckDaemonStatus(f, dsName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Getting /status")
|
||||
@ -1008,58 +1009,13 @@ func getDaemonSetImagePatch(containerName, containerImage string) string {
|
||||
}
|
||||
|
||||
func newDaemonSet(dsName, image string, label map[string]string) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: dsName,
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: label,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: label,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "app",
|
||||
Image: image,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
ds := newDaemonSetWithLabel(dsName, image, label)
|
||||
ds.ObjectMeta.Labels = nil
|
||||
return ds
|
||||
}
|
||||
|
||||
func newDaemonSetWithLabel(dsName, image string, label map[string]string) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: dsName,
|
||||
Labels: label,
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: label,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: label,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "app",
|
||||
Image: image,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return e2edaemonset.NewDaemonSet(dsName, image, label, nil, nil, []v1.ContainerPort{{ContainerPort: 9376}})
|
||||
}
|
||||
|
||||
func listDaemonPods(c clientset.Interface, ns string, label map[string]string) *v1.PodList {
|
||||
@ -1158,66 +1114,12 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
|
||||
return newNode, nil
|
||||
}
|
||||
|
||||
func checkDaemonPodOnNodes(f *framework.Framework, ds *appsv1.DaemonSet, nodeNames []string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("could not get the pod list: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
pods := podList.Items
|
||||
|
||||
nodesToPodCount := make(map[string]int)
|
||||
for _, pod := range pods {
|
||||
if !metav1.IsControlledBy(&pod, ds) {
|
||||
continue
|
||||
}
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
if podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
||||
nodesToPodCount[pod.Spec.NodeName]++
|
||||
}
|
||||
}
|
||||
framework.Logf("Number of nodes with available pods: %d", len(nodesToPodCount))
|
||||
|
||||
// Ensure that exactly 1 pod is running on all nodes in nodeNames.
|
||||
for _, nodeName := range nodeNames {
|
||||
if nodesToPodCount[nodeName] != 1 {
|
||||
framework.Logf("Node %s is running more than one daemon pod", nodeName)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
framework.Logf("Number of running nodes: %d, number of available pods: %d", len(nodeNames), len(nodesToPodCount))
|
||||
// Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in
|
||||
// nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any
|
||||
// other nodes.
|
||||
return len(nodesToPodCount) == len(nodeNames), nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkRunningOnAllNodes(f *framework.Framework, ds *appsv1.DaemonSet) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
nodeNames := schedulableNodes(f.ClientSet, ds)
|
||||
return checkDaemonPodOnNodes(f, ds, nodeNames)()
|
||||
return e2edaemonset.CheckRunningOnAllNodes(f, ds)
|
||||
}
|
||||
}
|
||||
|
||||
func schedulableNodes(c clientset.Interface, ds *appsv1.DaemonSet) []string {
|
||||
nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
nodeNames := make([]string, 0)
|
||||
for _, node := range nodeList.Items {
|
||||
if !canScheduleOnNode(node, ds) {
|
||||
framework.Logf("DaemonSet pods can't tolerate node %s with taints %+v, skip checking this node", node.Name, node.Spec.Taints)
|
||||
continue
|
||||
}
|
||||
nodeNames = append(nodeNames, node.Name)
|
||||
}
|
||||
return nodeNames
|
||||
}
|
||||
|
||||
func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]string, newImage string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
@ -1230,27 +1132,8 @@ func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]st
|
||||
}
|
||||
}
|
||||
|
||||
// canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node
|
||||
func canScheduleOnNode(node v1.Node, ds *appsv1.DaemonSet) bool {
|
||||
newPod := daemon.NewPod(ds, node.Name)
|
||||
fitsNodeName, fitsNodeAffinity, fitsTaints := daemon.Predicates(newPod, &node, node.Spec.Taints)
|
||||
return fitsNodeName && fitsNodeAffinity && fitsTaints
|
||||
}
|
||||
|
||||
func checkRunningOnNoNodes(f *framework.Framework, ds *appsv1.DaemonSet) func() (bool, error) {
|
||||
return checkDaemonPodOnNodes(f, ds, make([]string, 0))
|
||||
}
|
||||
|
||||
func checkDaemonStatus(f *framework.Framework, dsName string) error {
|
||||
ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(context.TODO(), dsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get daemon set from v1")
|
||||
}
|
||||
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
|
||||
if desired != scheduled && desired != ready {
|
||||
return fmt.Errorf("error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready)
|
||||
}
|
||||
return nil
|
||||
return e2edaemonset.CheckDaemonPodOnNodes(f, ds, make([]string, 0))
|
||||
}
|
||||
|
||||
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *appsv1.DaemonSet, image string, maxUnavailable int) func() (bool, error) {
|
||||
@ -1286,7 +1169,7 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *appsv1.Daemo
|
||||
return false, fmt.Errorf("number of unavailable pods: %d is greater than maxUnavailable: %d", unavailablePods, maxUnavailable)
|
||||
}
|
||||
// Make sure every daemon pod on the node has been updated
|
||||
nodeNames := schedulableNodes(c, ds)
|
||||
nodeNames := e2edaemonset.SchedulableNodes(c, ds)
|
||||
for _, node := range nodeNames {
|
||||
if nodesToUpdatedPodCount[node] == 0 {
|
||||
return false, nil
|
||||
|
140
test/e2e/framework/daemonset/fixtures.go
Normal file
140
test/e2e/framework/daemonset/fixtures.go
Normal file
@ -0,0 +1,140 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package daemonset
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
func NewDaemonSet(dsName, image string, labels map[string]string, volumes []v1.Volume, mounts []v1.VolumeMount, ports []v1.ContainerPort, args ...string) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: dsName,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "app",
|
||||
Image: image,
|
||||
Args: args,
|
||||
Ports: ports,
|
||||
VolumeMounts: mounts,
|
||||
SecurityContext: &v1.SecurityContext{},
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
Volumes: volumes,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func CheckRunningOnAllNodes(f *framework.Framework, ds *appsv1.DaemonSet) (bool, error) {
|
||||
nodeNames := SchedulableNodes(f.ClientSet, ds)
|
||||
return CheckDaemonPodOnNodes(f, ds, nodeNames)()
|
||||
}
|
||||
|
||||
func SchedulableNodes(c clientset.Interface, ds *appsv1.DaemonSet) []string {
|
||||
nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
nodeNames := make([]string, 0)
|
||||
for _, node := range nodeList.Items {
|
||||
if !canScheduleOnNode(node, ds) {
|
||||
framework.Logf("DaemonSet pods can't tolerate node %s with taints %+v, skip checking this node", node.Name, node.Spec.Taints)
|
||||
continue
|
||||
}
|
||||
nodeNames = append(nodeNames, node.Name)
|
||||
}
|
||||
return nodeNames
|
||||
}
|
||||
|
||||
// canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node
|
||||
func canScheduleOnNode(node v1.Node, ds *appsv1.DaemonSet) bool {
|
||||
newPod := daemon.NewPod(ds, node.Name)
|
||||
fitsNodeName, fitsNodeAffinity, fitsTaints := daemon.Predicates(newPod, &node, node.Spec.Taints)
|
||||
return fitsNodeName && fitsNodeAffinity && fitsTaints
|
||||
}
|
||||
|
||||
func CheckDaemonPodOnNodes(f *framework.Framework, ds *appsv1.DaemonSet, nodeNames []string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("could not get the pod list: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
pods := podList.Items
|
||||
|
||||
nodesToPodCount := make(map[string]int)
|
||||
for _, pod := range pods {
|
||||
if !metav1.IsControlledBy(&pod, ds) {
|
||||
continue
|
||||
}
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
if podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
||||
nodesToPodCount[pod.Spec.NodeName]++
|
||||
}
|
||||
}
|
||||
framework.Logf("Number of nodes with available pods: %d", len(nodesToPodCount))
|
||||
|
||||
// Ensure that exactly 1 pod is running on all nodes in nodeNames.
|
||||
for _, nodeName := range nodeNames {
|
||||
if nodesToPodCount[nodeName] != 1 {
|
||||
framework.Logf("Node %s is running %d daemon pod, expected 1", nodeName, nodesToPodCount[nodeName])
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
framework.Logf("Number of running nodes: %d, number of available pods: %d", len(nodeNames), len(nodesToPodCount))
|
||||
// Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in
|
||||
// nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any
|
||||
// other nodes.
|
||||
return len(nodesToPodCount) == len(nodeNames), nil
|
||||
}
|
||||
}
|
||||
|
||||
func CheckDaemonStatus(f *framework.Framework, dsName string) error {
|
||||
ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(context.TODO(), dsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
|
||||
if desired != scheduled && desired != ready {
|
||||
return fmt.Errorf("error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset"
|
||||
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
"k8s.io/kubernetes/test/e2e/network/common"
|
||||
@ -133,34 +134,8 @@ func iperf2ServerService(client clientset.Interface, namespace string) (*v1.Serv
|
||||
func iperf2ClientDaemonSet(client clientset.Interface, namespace string) (*appsv1.DaemonSet, error) {
|
||||
one := int64(1)
|
||||
labels := map[string]string{labelKey: clientLabelValue}
|
||||
spec := &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "iperf2-clients",
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "iperf2-client",
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Command: []string{"/agnhost"},
|
||||
Args: []string{"pause"},
|
||||
},
|
||||
},
|
||||
TerminationGracePeriodSeconds: &one,
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: appsv1.DaemonSetStatus{},
|
||||
}
|
||||
spec := e2edaemonset.NewDaemonSet("iperf2-clients", imageutils.GetE2EImage(imageutils.Agnhost), labels, nil, nil, nil)
|
||||
spec.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
|
||||
|
||||
ds, err := client.AppsV1().DaemonSets(namespace).Create(context.TODO(), spec, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
|
@ -23,10 +23,9 @@ import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset"
|
||||
"k8s.io/kubernetes/test/e2e/upgrades"
|
||||
)
|
||||
|
||||
@ -47,35 +46,9 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) {
|
||||
|
||||
ns := f.Namespace
|
||||
|
||||
t.daemonSet = &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns.Name,
|
||||
Name: daemonSetName,
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labelSet,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labelSet,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Tolerations: []v1.Toleration{
|
||||
{Operator: v1.TolerationOpExists},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: daemonSetName,
|
||||
Image: image,
|
||||
Args: []string{"serve-hostname"},
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
|
||||
SecurityContext: &v1.SecurityContext{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
t.daemonSet = e2edaemonset.NewDaemonSet(daemonSetName, image, labelSet, nil, nil, []v1.ContainerPort{{ContainerPort: 9376}}, "serve-hostname")
|
||||
t.daemonSet.Spec.Template.Spec.Tolerations = []v1.Toleration{
|
||||
{Operator: v1.TolerationOpExists},
|
||||
}
|
||||
|
||||
ginkgo.By("Creating a DaemonSet")
|
||||
@ -86,7 +59,7 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) {
|
||||
|
||||
ginkgo.By("Waiting for DaemonSet pods to become ready")
|
||||
err = wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
|
||||
return checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels)
|
||||
return e2edaemonset.CheckRunningOnAllNodes(f, t.daemonSet)
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@ -111,7 +84,7 @@ func (t *DaemonSetUpgradeTest) Teardown(f *framework.Framework) {
|
||||
|
||||
func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework) {
|
||||
ginkgo.By("confirming the DaemonSet pods are running on all expected nodes")
|
||||
res, err := checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels)
|
||||
res, err := e2edaemonset.CheckRunningOnAllNodes(f, t.daemonSet)
|
||||
framework.ExpectNoError(err)
|
||||
if !res {
|
||||
framework.Failf("expected DaemonSet pod to be running on all nodes, it was not")
|
||||
@ -119,72 +92,6 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework)
|
||||
|
||||
// DaemonSet resource itself should be good
|
||||
ginkgo.By("confirming the DaemonSet resource is in a good state")
|
||||
res, err = checkDaemonStatus(f, t.daemonSet.Namespace, t.daemonSet.Name)
|
||||
err = e2edaemonset.CheckDaemonStatus(f, t.daemonSet.Name)
|
||||
framework.ExpectNoError(err)
|
||||
if !res {
|
||||
framework.Failf("expected DaemonSet to be in a good state, it was not")
|
||||
}
|
||||
}
|
||||
|
||||
func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector map[string]string) (bool, error) {
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
nodeNames := make([]string, 0)
|
||||
for _, node := range nodeList.Items {
|
||||
if len(node.Spec.Taints) != 0 {
|
||||
framework.Logf("Ignore taints %v on Node %v for DaemonSet Pod.", node.Spec.Taints, node.Name)
|
||||
}
|
||||
// DaemonSet Pods are expected to run on all the nodes in e2e.
|
||||
nodeNames = append(nodeNames, node.Name)
|
||||
}
|
||||
|
||||
return checkDaemonPodOnNodes(f, namespace, selector, nodeNames)
|
||||
}
|
||||
|
||||
func checkDaemonPodOnNodes(f *framework.Framework, namespace string, labelSet map[string]string, nodeNames []string) (bool, error) {
|
||||
selector := labels.Set(labelSet).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := f.ClientSet.CoreV1().Pods(namespace).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
pods := podList.Items
|
||||
|
||||
nodesToPodCount := make(map[string]int)
|
||||
for _, pod := range pods {
|
||||
if controller.IsPodActive(&pod) {
|
||||
framework.Logf("Pod name: %v\t Node Name: %v", pod.Name, pod.Spec.NodeName)
|
||||
nodesToPodCount[pod.Spec.NodeName]++
|
||||
}
|
||||
}
|
||||
framework.Logf("nodesToPodCount: %v", nodesToPodCount)
|
||||
|
||||
// Ensure that exactly 1 pod is running on all nodes in nodeNames.
|
||||
for _, nodeName := range nodeNames {
|
||||
if nodesToPodCount[nodeName] != 1 {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in
|
||||
// nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any
|
||||
// other nodes.
|
||||
return len(nodesToPodCount) == len(nodeNames), nil
|
||||
}
|
||||
|
||||
func checkDaemonStatus(f *framework.Framework, namespace string, dsName string) (bool, error) {
|
||||
ds, err := f.ClientSet.AppsV1().DaemonSets(namespace).Get(context.TODO(), dsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
|
||||
if desired != scheduled && desired != ready {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
"k8s.io/kubernetes/test/e2e/upgrades"
|
||||
|
||||
@ -68,7 +69,7 @@ func (t *KubeProxyUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Waiting for kube-proxy DaemonSet running and ready")
|
||||
err = waitForKubeProxyDaemonSetRunning(c)
|
||||
err = waitForKubeProxyDaemonSetRunning(f, c)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
@ -86,7 +87,7 @@ func (KubeProxyDowngradeTest) Name() string { return "[sig-network] kube-proxy-d
|
||||
// Setup verifies kube-proxy DaemonSet is running before upgrade.
|
||||
func (t *KubeProxyDowngradeTest) Setup(f *framework.Framework) {
|
||||
ginkgo.By("Waiting for kube-proxy DaemonSet running and ready")
|
||||
err := waitForKubeProxyDaemonSetRunning(f.ClientSet)
|
||||
err := waitForKubeProxyDaemonSetRunning(f, f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
@ -170,7 +171,7 @@ func waitForKubeProxyStaticPodsDisappear(c clientset.Interface) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitForKubeProxyDaemonSetRunning(c clientset.Interface) error {
|
||||
func waitForKubeProxyDaemonSetRunning(f *framework.Framework, c clientset.Interface) error {
|
||||
framework.Logf("Waiting up to %v for kube-proxy DaemonSet running", defaultTestTimeout)
|
||||
|
||||
condition := func() (bool, error) {
|
||||
@ -185,19 +186,7 @@ func waitForKubeProxyDaemonSetRunning(c clientset.Interface) error {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
nodes, err := e2enode.GetReadySchedulableNodes(c)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get nodes: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
numberSchedulableNodes := len(nodes.Items)
|
||||
numberkubeProxyPods := int(daemonSets.Items[0].Status.NumberAvailable)
|
||||
if numberkubeProxyPods != numberSchedulableNodes {
|
||||
framework.Logf("Expect %v kube-proxy DaemonSet pods running, got %v", numberSchedulableNodes, numberkubeProxyPods)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
return e2edaemonset.CheckRunningOnAllNodes(f, &daemonSets.Items[0])
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil {
|
||||
|
@ -20,12 +20,12 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/daemonset"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -57,60 +57,37 @@ var _ = SIGDescribe("[Feature:GPUDevicePlugin] Device Plugin", func() {
|
||||
labels := map[string]string{
|
||||
daemonsetNameLabel: dsName,
|
||||
}
|
||||
ds := &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: dsName,
|
||||
Namespace: "kube-system",
|
||||
volumes := []v1.Volume{
|
||||
{
|
||||
Name: mountName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: mountPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
PriorityClassName: "system-node-critical",
|
||||
Tolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "CriticalAddonsOnly",
|
||||
Operator: "Exists",
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "hostdev",
|
||||
Image: image,
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: mountName,
|
||||
MountPath: mountPath,
|
||||
},
|
||||
},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "DIRECTX_GPU_MATCH_NAME",
|
||||
Value: " ",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: mountName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: mountPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeSelector: map[string]string{
|
||||
"kubernetes.io/os": "windows",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
mounts := []v1.VolumeMount{
|
||||
{
|
||||
Name: mountName,
|
||||
MountPath: mountPath,
|
||||
},
|
||||
}
|
||||
ds := daemonset.NewDaemonSet(dsName, image, labels, volumes, mounts, nil)
|
||||
ds.Spec.Template.Spec.PriorityClassName = "system-node-critical"
|
||||
ds.Spec.Template.Spec.Tolerations = []v1.Toleration{
|
||||
{
|
||||
Key: "CriticalAddonsOnly",
|
||||
Operator: "Exists",
|
||||
},
|
||||
}
|
||||
ds.Spec.Template.Spec.NodeSelector = map[string]string{
|
||||
"kubernetes.io/os": "windows",
|
||||
}
|
||||
ds.Spec.Template.Spec.Containers[0].Env = []v1.EnvVar{
|
||||
{
|
||||
Name: "DIRECTX_GPU_MATCH_NAME",
|
||||
Value: " ",
|
||||
},
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user