mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 18:02:01 +00:00
feat: use framework.ExpectEqual instead of should
This commit is contained in:
parent
932487c744
commit
0ec95afb76
@ -297,7 +297,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(len(allOldRSs)).Should(gomega.Equal(1))
|
||||
framework.ExpectEqual(len(allOldRSs), 1)
|
||||
}
|
||||
|
||||
func testRecreateDeployment(f *framework.Framework) {
|
||||
@ -497,8 +497,8 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
}
|
||||
|
||||
func ensureReplicas(rs *appsv1.ReplicaSet, replicas int32) {
|
||||
gomega.Expect(*rs.Spec.Replicas).Should(gomega.Equal(replicas))
|
||||
gomega.Expect(rs.Status.Replicas).Should(gomega.Equal(replicas))
|
||||
framework.ExpectEqual(*rs.Spec.Replicas, replicas)
|
||||
framework.ExpectEqual(rs.Status.Replicas, replicas)
|
||||
}
|
||||
|
||||
func randomScale(d *appsv1.Deployment, i int) {
|
||||
@ -652,7 +652,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
|
||||
e2elog.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
|
||||
rsList := listDeploymentReplicaSets(c, ns, podLabels)
|
||||
gomega.Expect(len(rsList.Items)).Should(gomega.Equal(1))
|
||||
framework.ExpectEqual(len(rsList.Items), 1)
|
||||
|
||||
e2elog.Logf("Obtaining the ReplicaSet's UID")
|
||||
orphanedRSUID := rsList.Items[0].UID
|
||||
@ -683,10 +683,10 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
|
||||
e2elog.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName)
|
||||
rsList = listDeploymentReplicaSets(c, ns, podLabels)
|
||||
gomega.Expect(len(rsList.Items)).Should(gomega.Equal(1))
|
||||
framework.ExpectEqual(len(rsList.Items), 1)
|
||||
|
||||
e2elog.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet")
|
||||
gomega.Expect(rsList.Items[0].UID).Should(gomega.Equal(orphanedRSUID))
|
||||
framework.ExpectEqual(rsList.Items[0].UID, orphanedRSUID)
|
||||
}
|
||||
|
||||
// testProportionalScalingDeployment tests that when a RollingUpdate Deployment is scaled in the middle
|
||||
@ -769,7 +769,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
|
||||
// Second rollout's replicaset should have 0 available replicas.
|
||||
e2elog.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0")
|
||||
gomega.Expect(secondRS.Status.AvailableReplicas).Should(gomega.Equal(int32(0)))
|
||||
framework.ExpectEqual(secondRS.Status.AvailableReplicas, int32(0))
|
||||
|
||||
// Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas.
|
||||
newReplicas := replicas + int32(maxSurge) - minAvailableReplicas
|
||||
|
@ -74,17 +74,17 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
})
|
||||
ginkgo.It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func() {
|
||||
_, err := c.CoreV1().Secrets(ns).Get("foo", metav1.GetOptions{})
|
||||
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true))
|
||||
framework.ExpectEqual(apierrors.IsForbidden(err), true)
|
||||
})
|
||||
|
||||
ginkgo.It("Getting an existing secret should exit with the Forbidden error", func() {
|
||||
_, err := c.CoreV1().Secrets(ns).Get(defaultSaSecret, metav1.GetOptions{})
|
||||
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true))
|
||||
framework.ExpectEqual(apierrors.IsForbidden(err), true)
|
||||
})
|
||||
|
||||
ginkgo.It("Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error", func() {
|
||||
_, err := c.CoreV1().ConfigMaps(ns).Get("foo", metav1.GetOptions{})
|
||||
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true))
|
||||
framework.ExpectEqual(apierrors.IsForbidden(err), true)
|
||||
})
|
||||
|
||||
ginkgo.It("Getting an existing configmap should exit with the Forbidden error", func() {
|
||||
@ -101,7 +101,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
_, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(configmap)
|
||||
framework.ExpectNoError(err, "failed to create configmap (%s:%s) %+v", ns, configmap.Name, *configmap)
|
||||
_, err = c.CoreV1().ConfigMaps(ns).Get(configmap.Name, metav1.GetOptions{})
|
||||
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true))
|
||||
framework.ExpectEqual(apierrors.IsForbidden(err), true)
|
||||
})
|
||||
|
||||
ginkgo.It("Getting a secret for a workload the node has access to should succeed", func() {
|
||||
@ -120,7 +120,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
|
||||
ginkgo.By("Node should not get the secret")
|
||||
_, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{})
|
||||
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true))
|
||||
framework.ExpectEqual(apierrors.IsForbidden(err), true)
|
||||
|
||||
ginkgo.By("Create a pod that use the secret")
|
||||
pod := &v1.Pod{
|
||||
@ -175,12 +175,12 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
}
|
||||
ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser))
|
||||
_, err := c.CoreV1().Nodes().Create(node)
|
||||
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true))
|
||||
framework.ExpectEqual(apierrors.IsForbidden(err), true)
|
||||
})
|
||||
|
||||
ginkgo.It("A node shouldn't be able to delete another node", func() {
|
||||
ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser))
|
||||
err := c.CoreV1().Nodes().Delete("foo", &metav1.DeleteOptions{})
|
||||
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true))
|
||||
framework.ExpectEqual(apierrors.IsForbidden(err), true)
|
||||
})
|
||||
})
|
||||
|
@ -98,7 +98,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
coresPerNode = int((&cpu).MilliValue() / 1000)
|
||||
memCapacityMb = int((&mem).Value() / 1024 / 1024)
|
||||
|
||||
gomega.Expect(nodeCount).Should(gomega.Equal(sum))
|
||||
framework.ExpectEqual(nodeCount, sum)
|
||||
|
||||
if framework.ProviderIs("gke") {
|
||||
val, err := isAutoscalerEnabled(3)
|
||||
@ -326,7 +326,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
ginkgo.By("Checking if the number of nodes is as expected")
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
klog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes)
|
||||
gomega.Expect(len(nodes.Items)).Should(gomega.Equal(totalNodes))
|
||||
framework.ExpectEqual(len(nodes.Items), totalNodes)
|
||||
})
|
||||
|
||||
ginkgo.Specify("CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]", func() {
|
||||
|
@ -123,7 +123,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
mem := nodes.Items[0].Status.Allocatable[v1.ResourceMemory]
|
||||
memAllocatableMb = int((&mem).Value() / 1024 / 1024)
|
||||
|
||||
gomega.Expect(nodeCount).Should(gomega.Equal(sum))
|
||||
framework.ExpectEqual(nodeCount, sum)
|
||||
|
||||
if framework.ProviderIs("gke") {
|
||||
val, err := isAutoscalerEnabled(5)
|
||||
@ -186,7 +186,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
}
|
||||
}
|
||||
}
|
||||
gomega.Expect(eventFound).Should(gomega.Equal(true))
|
||||
framework.ExpectEqual(eventFound, true)
|
||||
// Verify that cluster size is not changed
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size <= nodeCount }, time.Second))
|
||||
@ -223,7 +223,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
ginkgo.By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(0))
|
||||
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0)
|
||||
|
||||
ginkgo.By("Schedule a pod which requires GPU")
|
||||
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
|
||||
@ -231,7 +231,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(1))
|
||||
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1)
|
||||
})
|
||||
|
||||
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
@ -254,14 +254,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
ginkgo.By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 2)
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(1))
|
||||
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1)
|
||||
|
||||
ginkgo.By("Scale GPU deployment")
|
||||
framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(2))
|
||||
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 2)
|
||||
})
|
||||
|
||||
ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
@ -280,7 +280,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
ginkgo.By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(0))
|
||||
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0)
|
||||
|
||||
ginkgo.By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
|
||||
@ -290,7 +290,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
||||
|
||||
// Expect gpu pool to stay intact
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(0))
|
||||
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0)
|
||||
})
|
||||
|
||||
ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
@ -313,14 +313,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
ginkgo.By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(1))
|
||||
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1)
|
||||
|
||||
ginkgo.By("Remove the only POD requiring GPU")
|
||||
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(0))
|
||||
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0)
|
||||
})
|
||||
|
||||
ginkgo.It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]",
|
||||
@ -360,10 +360,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
if status.target != target {
|
||||
klog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target)
|
||||
}
|
||||
gomega.Expect(status.timestamp.Add(freshStatusLimit).Before(time.Now())).Should(gomega.Equal(false))
|
||||
gomega.Expect(status.status).Should(gomega.Equal(caNoScaleUpStatus))
|
||||
gomega.Expect(status.ready).Should(gomega.Equal(status.target))
|
||||
gomega.Expect(len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items)).Should(gomega.Equal(status.target + unmanagedNodes))
|
||||
framework.ExpectEqual(status.timestamp.Add(freshStatusLimit).Before(time.Now()), false)
|
||||
framework.ExpectEqual(status.status, caNoScaleUpStatus)
|
||||
framework.ExpectEqual(status.ready, status.target)
|
||||
framework.ExpectEqual(len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items), status.target+unmanagedNodes)
|
||||
})
|
||||
|
||||
ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
@ -382,7 +382,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
ginkgo.By("Getting memory available on new nodes, so we can account for it when creating RC")
|
||||
nodes := getPoolNodes(f, extraPoolName)
|
||||
gomega.Expect(len(nodes)).Should(gomega.Equal(extraNodes))
|
||||
framework.ExpectEqual(len(nodes), extraNodes)
|
||||
extraMemMb := 0
|
||||
for _, node := range nodes {
|
||||
mem := node.Status.Allocatable[v1.ResourceMemory]
|
||||
@ -723,7 +723,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
ginkgo.By("No nodes should be removed")
|
||||
time.Sleep(scaleDownTimeout)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
gomega.Expect(len(nodes.Items)).Should(gomega.Equal(increasedSize))
|
||||
framework.ExpectEqual(len(nodes.Items), increasedSize)
|
||||
})
|
||||
})
|
||||
|
||||
@ -831,7 +831,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
// GKE-specific check
|
||||
newSize := getPoolSize(f, extraPoolName)
|
||||
gomega.Expect(newSize).Should(gomega.Equal(0))
|
||||
framework.ExpectEqual(newSize, 0)
|
||||
}
|
||||
|
||||
gceScaleToZero := func() {
|
||||
@ -862,7 +862,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
// non-GKE only
|
||||
newSize, err := framework.GroupSize(minMig)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(newSize).Should(gomega.Equal(0))
|
||||
framework.ExpectEqual(newSize, 0)
|
||||
}
|
||||
|
||||
ginkgo.It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
@ -918,11 +918,11 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
time.Sleep(scaleUpTimeout)
|
||||
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
e2elog.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
|
||||
gomega.Expect(len(currentNodes.Items)).Should(gomega.Equal(len(nodes.Items) - nodesToBreakCount))
|
||||
framework.ExpectEqual(len(currentNodes.Items), len(nodes.Items)-nodesToBreakCount)
|
||||
status, err := getClusterwideStatus(c)
|
||||
e2elog.Logf("Clusterwide status: %v", status)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(status).Should(gomega.Equal("Unhealthy"))
|
||||
framework.ExpectEqual(status, "Unhealthy")
|
||||
}
|
||||
}
|
||||
testFunction()
|
||||
@ -1247,7 +1247,7 @@ func getPoolInitialSize(poolName string) int {
|
||||
klog.Infof("Node-pool initial size: %s", output)
|
||||
framework.ExpectNoError(err, string(output))
|
||||
fields := strings.Fields(string(output))
|
||||
gomega.Expect(len(fields)).Should(gomega.Equal(1))
|
||||
framework.ExpectEqual(len(fields), 1)
|
||||
size, err := strconv.ParseInt(fields[0], 10, 64)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
|
@ -112,7 +112,9 @@ while true; do sleep 1; done
|
||||
gomega.Eventually(terminateContainer.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(testCase.Phase))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'Ready' condition", testContainer.Name))
|
||||
gomega.Expect(terminateContainer.IsReady()).Should(gomega.Equal(testCase.Ready))
|
||||
isReady, err := terminateContainer.IsReady()
|
||||
framework.ExpectEqual(isReady, testCase.Ready)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
status, err := terminateContainer.GetStatus()
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -24,7 +24,6 @@ go_library(
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/lifecycle:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -18,7 +18,6 @@ package bootstrap
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -78,7 +77,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() {
|
||||
cfgMap, err := f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
signedToken, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenId]
|
||||
gomega.Expect(ok).Should(gomega.Equal(true))
|
||||
framework.ExpectEqual(ok, true)
|
||||
|
||||
ginkgo.By("update the cluster-info ConfigMap")
|
||||
originalData := cfgMap.Data[bootstrapapi.KubeConfigKey]
|
||||
|
@ -132,7 +132,7 @@ var _ = SIGDescribe("Pods Extended", func() {
|
||||
ginkgo.By("deleting the pod gracefully")
|
||||
rsp, err := client.Do(req)
|
||||
framework.ExpectNoError(err, "failed to use http client to send delete")
|
||||
gomega.Expect(rsp.StatusCode).Should(gomega.Equal(http.StatusOK), "failed to delete gracefully by client request")
|
||||
framework.ExpectEqual(rsp.StatusCode, http.StatusOK, "failed to delete gracefully by client request")
|
||||
var lastPod v1.Pod
|
||||
err = json.NewDecoder(rsp.Body).Decode(&lastPod)
|
||||
framework.ExpectNoError(err, "failed to decode graceful termination proxy response")
|
||||
|
Loading…
Reference in New Issue
Block a user