mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
parent
e8bc121341
commit
32850dc47d
@ -27,7 +27,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@ -63,8 +63,9 @@ var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor
|
||||
return
|
||||
}
|
||||
state := status.ContainerStatuses[0].State.Terminated
|
||||
framework.ExpectNotEqual(state, nil, "ContainerState: %+v", status.ContainerStatuses[0].State)
|
||||
framework.ExpectEqual(state.ExitCode, 0, "ContainerStateTerminated: %+v", state)
|
||||
gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
|
||||
gomega.Expect(state.ExitCode).To(gomega.Not(gomega.BeZero()), "ContainerStateTerminated: %+v", state)
|
||||
|
||||
})
|
||||
ginkgo.It("should enforce a permissive profile", func() {
|
||||
status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"audit-write")
|
||||
@ -73,8 +74,8 @@ var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor
|
||||
return
|
||||
}
|
||||
state := status.ContainerStatuses[0].State.Terminated
|
||||
framework.ExpectNotEqual(state, nil, "ContainerState: %+v", status.ContainerStatuses[0].State)
|
||||
framework.ExpectEqual(state.ExitCode, 0, "ContainerStateTerminated: %+v", state)
|
||||
gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
|
||||
gomega.Expect(state.ExitCode).To(gomega.BeZero(), "ContainerStateTerminated: %+v", state)
|
||||
})
|
||||
})
|
||||
} else {
|
||||
|
@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
||||
ginkgo.Context("once the node is setup", func() {
|
||||
ginkgo.It("container runtime's oom-score-adj should be -999", func() {
|
||||
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
|
||||
framework.ExpectEqual(err, nil, "failed to get list of container runtime pids")
|
||||
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of container runtime pids")
|
||||
for _, pid := range runtimePids {
|
||||
gomega.Eventually(func() error {
|
||||
return validateOOMScoreAdjSetting(pid, -999)
|
||||
@ -88,7 +88,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
||||
})
|
||||
ginkgo.It("Kubelet's oom-score-adj should be -999", func() {
|
||||
kubeletPids, err := getPidsForProcess(kubeletProcessName, "")
|
||||
framework.ExpectEqual(err, nil, "failed to get list of kubelet pids")
|
||||
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of kubelet pids")
|
||||
framework.ExpectEqual(len(kubeletPids), 1, "expected only one kubelet process; found %d", len(kubeletPids))
|
||||
gomega.Eventually(func() error {
|
||||
return validateOOMScoreAdjSetting(kubeletPids[0], -999)
|
||||
@ -100,7 +100,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
||||
// created before this test, and may not be infra
|
||||
// containers. They should be excluded from the test.
|
||||
existingPausePIDs, err := getPidsForProcess("pause", "")
|
||||
framework.ExpectEqual(err, nil, "failed to list all pause processes on the node")
|
||||
gomega.Expect(err).To(gomega.BeNil(), "failed to list all pause processes on the node")
|
||||
existingPausePIDSet := sets.NewInt(existingPausePIDs...)
|
||||
|
||||
podClient := f.PodClient()
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -85,7 +86,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C
|
||||
})
|
||||
|
||||
_, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(systemCriticalPriority)
|
||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true, "failed to create PriorityClasses with an error: %v", err)
|
||||
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue(), "failed to create PriorityClasses with an error: %v", err)
|
||||
|
||||
// Create pods, starting with non-critical so that the critical preempts the other pods.
|
||||
f.PodClient().CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed})
|
||||
@ -156,9 +157,9 @@ func getTestPod(critical bool, name string, resources v1.ResourceRequirements) *
|
||||
pod.Spec.PriorityClassName = systemCriticalPriorityName
|
||||
pod.Spec.Priority = &value
|
||||
|
||||
framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), true, "pod should be a critical pod")
|
||||
gomega.Expect(kubelettypes.IsCriticalPod(pod)).To(gomega.BeTrue(), "pod should be a critical pod")
|
||||
} else {
|
||||
framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), false, "pod should not be a critical pod")
|
||||
gomega.Expect(kubelettypes.IsCriticalPod(pod)).To(gomega.BeFalse(), "pod should not be a critical pod")
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
|
||||
"regexp"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
@ -97,12 +97,12 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
|
||||
deviceIDRE := "stub devices: (Dev-[0-9]+)"
|
||||
devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||
framework.ExpectNotEqual(devID1, "")
|
||||
gomega.Expect(devID1).To(gomega.Not(gomega.Equal("")))
|
||||
|
||||
podResources, err := getNodeDevices()
|
||||
var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources
|
||||
framework.Logf("pod resources %v", podResources)
|
||||
framework.ExpectEqual(err, nil)
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
framework.ExpectEqual(len(podResources.PodResources), 2)
|
||||
for _, res := range podResources.GetPodResources() {
|
||||
if res.Name == pod1.Name {
|
||||
@ -110,7 +110,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
}
|
||||
}
|
||||
framework.Logf("resourcesForOurPod %v", resourcesForOurPod)
|
||||
framework.ExpectNotEqual(resourcesForOurPod, nil)
|
||||
gomega.Expect(resourcesForOurPod).NotTo(gomega.BeNil())
|
||||
framework.ExpectEqual(resourcesForOurPod.Name, pod1.Name)
|
||||
framework.ExpectEqual(resourcesForOurPod.Namespace, pod1.Namespace)
|
||||
framework.ExpectEqual(len(resourcesForOurPod.Containers), 1)
|
||||
@ -181,7 +181,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
ginkgo.By("Checking that pod got a different fake device")
|
||||
devID2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
|
||||
|
||||
framework.ExpectNotEqual(devID1, devID2)
|
||||
gomega.Expect(devID1).To(gomega.Not(gomega.Equal(devID2)))
|
||||
|
||||
ginkgo.By("By deleting the pods and waiting for container removal")
|
||||
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions)
|
||||
|
@ -307,7 +307,7 @@ func getNode(c *clientset.Clientset) (*v1.Node, error) {
|
||||
if nodes == nil {
|
||||
return nil, fmt.Errorf("the node list is nil")
|
||||
}
|
||||
framework.ExpectNotEqual(len(nodes.Items) > 1, true, "the number of nodes is more than 1.")
|
||||
gomega.Expect(len(nodes.Items) > 1).NotTo(gomega.BeTrue(), "the number of nodes is more than 1.")
|
||||
if len(nodes.Items) == 0 {
|
||||
return nil, fmt.Errorf("empty node list: %+v", nodes)
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
|
||||
})
|
||||
ginkgo.BeforeEach(func() {
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
||||
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||
@ -359,7 +359,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
|
||||
})
|
||||
ginkgo.BeforeEach(func() {
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
||||
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||
@ -412,7 +412,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis
|
||||
})
|
||||
ginkgo.BeforeEach(func() {
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
||||
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||
@ -661,7 +661,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
|
||||
if expectedStarvedResource != noStarvedResource {
|
||||
// Check the eviction.StarvedResourceKey
|
||||
starved, found := event.Annotations[eviction.StarvedResourceKey]
|
||||
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
|
||||
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
|
||||
pod.Name, expectedStarvedResource)
|
||||
starvedResource := v1.ResourceName(starved)
|
||||
framework.ExpectEqual(starvedResource, expectedStarvedResource, "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
|
||||
@ -671,7 +671,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
|
||||
if expectedStarvedResource == v1.ResourceMemory {
|
||||
// Check the eviction.OffendingContainersKey
|
||||
offendersString, found := event.Annotations[eviction.OffendingContainersKey]
|
||||
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
|
||||
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
|
||||
pod.Name)
|
||||
offendingContainers := strings.Split(offendersString, ",")
|
||||
framework.ExpectEqual(len(offendingContainers), 1, "Expected to find the offending container's usage in the %s annotation, but no container was found",
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -104,7 +104,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
|
||||
|
||||
nodeTime = time.Now()
|
||||
bootTime, err = util.GetBootTime()
|
||||
framework.ExpectEqual(err, nil)
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
|
||||
// Set lookback duration longer than node up time.
|
||||
// Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds.
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -178,7 +179,7 @@ var _ = framework.KubeDescribe("StartupProbe [Serial] [Disruptive] [NodeAlphaFea
|
||||
|
||||
isReady, err := testutils.PodRunningReady(p)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(isReady, true, "pod should be ready")
|
||||
gomega.Expect(isReady).To(gomega.BeTrue(), "pod should be ready")
|
||||
|
||||
// We assume the pod became ready when the container became ready. This
|
||||
// is true for a single container pod.
|
||||
|
Loading…
Reference in New Issue
Block a user