mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-01 07:47:56 +00:00
e2e_upgrades: stop using deprecated framework.ExpectEqual
This commit is contained in:
parent
fa88c0b779
commit
209f23ecd7
@ -116,7 +116,7 @@ func (t *CassandraUpgradeTest) Setup(ctx context.Context, f *framework.Framework
|
||||
ginkgo.By("Verifying that the users exist")
|
||||
users, err := t.listUsers()
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(len(users), 2)
|
||||
gomega.Expect(users).To(gomega.HaveLen(2))
|
||||
}
|
||||
|
||||
// listUsers gets a list of users from the db via the tester service.
|
||||
|
@ -32,6 +32,8 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/upgrades"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -79,7 +81,7 @@ func (t *DeploymentUpgradeTest) Setup(ctx context.Context, f *framework.Framewor
|
||||
rsList, err := rsClient.List(ctx, metav1.ListOptions{LabelSelector: rsSelector.String()})
|
||||
framework.ExpectNoError(err)
|
||||
rss := rsList.Items
|
||||
framework.ExpectEqual(len(rss), 1, "expected one replicaset, got %d", len(rss))
|
||||
gomega.Expect(rss).To(gomega.HaveLen(1), "expected one replicaset, got %d", len(rss))
|
||||
t.oldRSUID = rss[0].UID
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Waiting for revision of the deployment %q to become 1", deploymentName))
|
||||
@ -99,7 +101,7 @@ func (t *DeploymentUpgradeTest) Setup(ctx context.Context, f *framework.Framewor
|
||||
rsList, err = rsClient.List(ctx, metav1.ListOptions{LabelSelector: rsSelector.String()})
|
||||
framework.ExpectNoError(err)
|
||||
rss = rsList.Items
|
||||
framework.ExpectEqual(len(rss), 2, "expected 2 replicaset, got %d", len(rss))
|
||||
gomega.Expect(rss).To(gomega.HaveLen(2), "expected 2 replicaset, got %d", len(rss))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Checking replicaset of deployment %q that is created before rollout survives the rollout", deploymentName))
|
||||
switch t.oldRSUID {
|
||||
@ -132,7 +134,7 @@ func (t *DeploymentUpgradeTest) Test(ctx context.Context, f *framework.Framework
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Checking UID to verify deployment %q survives upgrade", deploymentName))
|
||||
framework.ExpectEqual(deployment.UID, t.oldDeploymentUID)
|
||||
gomega.Expect(deployment.UID).To(gomega.Equal(t.oldDeploymentUID))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Verifying deployment %q does not create new replicasets", deploymentName))
|
||||
rsSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
@ -140,19 +142,19 @@ func (t *DeploymentUpgradeTest) Test(ctx context.Context, f *framework.Framework
|
||||
rsList, err := rsClient.List(ctx, metav1.ListOptions{LabelSelector: rsSelector.String()})
|
||||
framework.ExpectNoError(err)
|
||||
rss := rsList.Items
|
||||
framework.ExpectEqual(len(rss), 2, "expected 2 replicaset, got %d", len(rss))
|
||||
gomega.Expect(rss).To(gomega.HaveLen(2), "expected 2 replicaset, got %d", len(rss))
|
||||
|
||||
switch t.oldRSUID {
|
||||
case rss[0].UID:
|
||||
framework.ExpectEqual(rss[1].UID, t.newRSUID)
|
||||
gomega.Expect(rss[1].UID).To(gomega.Equal(t.newRSUID))
|
||||
case rss[1].UID:
|
||||
framework.ExpectEqual(rss[0].UID, t.newRSUID)
|
||||
gomega.Expect(rss[0].UID).To(gomega.Equal(t.newRSUID))
|
||||
default:
|
||||
framework.ExpectNoError(fmt.Errorf("new replicasets are created during upgrade of deployment %q", deploymentName))
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Verifying revision of the deployment %q is still 2", deploymentName))
|
||||
framework.ExpectEqual(deployment.Annotations[deploymentutil.RevisionAnnotation], "2")
|
||||
gomega.Expect(deployment.Annotations).To(gomega.HaveKeyWithValue(deploymentutil.RevisionAnnotation, "2"))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName))
|
||||
framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(c, deployment))
|
||||
|
@ -111,7 +111,7 @@ func (t *EtcdUpgradeTest) Setup(ctx context.Context, f *framework.Framework) {
|
||||
ginkgo.By("Verifying that the users exist")
|
||||
users, err := t.listUsers()
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(len(users), 2)
|
||||
gomega.Expect(users).To(gomega.HaveLen(2))
|
||||
}
|
||||
|
||||
func (t *EtcdUpgradeTest) listUsers() ([]string, error) {
|
||||
|
@ -125,7 +125,7 @@ func (t *MySQLUpgradeTest) Setup(ctx context.Context, f *framework.Framework) {
|
||||
ginkgo.By("Verifying that the 2 names have been inserted")
|
||||
count, err := t.countNames()
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(count, 2)
|
||||
gomega.Expect(count).To(gomega.Equal(2))
|
||||
}
|
||||
|
||||
// Test continually polls the db using the read and write connections, inserting data, and checking
|
||||
|
@ -92,7 +92,7 @@ func (t *AppArmorUpgradeTest) verifyPodStillUp(ctx context.Context, f *framework
|
||||
ginkgo.By("Verifying an AppArmor profile is continuously enforced for a pod")
|
||||
pod, err := e2epod.NewPodClient(f).Get(ctx, t.pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Should be able to get pod")
|
||||
framework.ExpectEqual(pod.Status.Phase, v1.PodRunning, "Pod should stay running")
|
||||
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodRunning), "Pod should stay running")
|
||||
gomega.Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(gomega.BeNil(), "Container should be running")
|
||||
gomega.Expect(pod.Status.ContainerStatuses[0].RestartCount).To(gomega.BeZero(), "Container should not need to be restarted")
|
||||
}
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/upgrades"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -56,7 +57,7 @@ func (t *NvidiaGPUUpgradeTest) Test(ctx context.Context, f *framework.Framework,
|
||||
// MasterUpgrade should be totally hitless.
|
||||
job, err := e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, "cuda-add")
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(job.Status.Failed, 0, "Job pods failed during master upgrade: %v", job.Status.Failed)
|
||||
gomega.Expect(job.Status.Failed).To(gomega.Equal(0), "Job pods failed during master upgrade: %v", job.Status.Failed)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -59,7 +60,7 @@ func (t *SysctlUpgradeTest) Test(ctx context.Context, f *framework.Framework, do
|
||||
ginkgo.By("Checking the safe sysctl pod keeps running on master upgrade")
|
||||
pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(ctx, t.validPod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(pod.Status.Phase, v1.PodRunning)
|
||||
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodRunning))
|
||||
}
|
||||
|
||||
ginkgo.By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade")
|
||||
@ -105,7 +106,7 @@ func (t *SysctlUpgradeTest) verifyUnsafeSysctlsAreRejected(ctx context.Context,
|
||||
ginkgo.By("Making sure the invalid pod failed")
|
||||
ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(ctx, invalidPod)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(ev.Reason, sysctl.ForbiddenReason)
|
||||
gomega.Expect(ev.Reason).To(gomega.Equal(sysctl.ForbiddenReason))
|
||||
|
||||
return invalidPod
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user