mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 20:53:33 +00:00
feat: use framework.ExpectEqual in upgrades and windows e2e test
This commit is contained in:
parent
a33840e023
commit
ff09cc5405
@ -87,7 +87,7 @@ func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {
|
|||||||
ginkgo.By("Verifying an AppArmor profile is continuously enforced for a pod")
|
ginkgo.By("Verifying an AppArmor profile is continuously enforced for a pod")
|
||||||
pod, err := f.PodClient().Get(t.pod.Name, metav1.GetOptions{})
|
pod, err := f.PodClient().Get(t.pod.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err, "Should be able to get pod")
|
framework.ExpectNoError(err, "Should be able to get pod")
|
||||||
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodRunning), "Pod should stay running")
|
framework.ExpectEqual(pod.Status.Phase, v1.PodRunning, "Pod should stay running")
|
||||||
gomega.Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(gomega.BeNil(), "Container should be running")
|
gomega.Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(gomega.BeNil(), "Container should be running")
|
||||||
gomega.Expect(pod.Status.ContainerStatuses[0].RestartCount).To(gomega.BeZero(), "Container should not need to be restarted")
|
gomega.Expect(pod.Status.ContainerStatuses[0].RestartCount).To(gomega.BeZero(), "Container should not need to be restarted")
|
||||||
}
|
}
|
||||||
|
@ -35,7 +35,6 @@ go_library(
|
|||||||
"//test/e2e/upgrades:go_default_library",
|
"//test/e2e/upgrades:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/upgrades"
|
"k8s.io/kubernetes/test/e2e/upgrades"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -130,7 +129,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Checking UID to verify deployment %q survives upgrade", deploymentName))
|
ginkgo.By(fmt.Sprintf("Checking UID to verify deployment %q survives upgrade", deploymentName))
|
||||||
gomega.Expect(deployment.UID).To(gomega.Equal(t.oldDeploymentUID))
|
framework.ExpectEqual(deployment.UID, t.oldDeploymentUID)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Verifying deployment %q does not create new replicasets", deploymentName))
|
ginkgo.By(fmt.Sprintf("Verifying deployment %q does not create new replicasets", deploymentName))
|
||||||
rsSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
rsSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||||
@ -144,15 +143,15 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
|
|||||||
|
|
||||||
switch t.oldRSUID {
|
switch t.oldRSUID {
|
||||||
case rss[0].UID:
|
case rss[0].UID:
|
||||||
gomega.Expect(rss[1].UID).To(gomega.Equal(t.newRSUID))
|
framework.ExpectEqual(rss[1].UID, t.newRSUID)
|
||||||
case rss[1].UID:
|
case rss[1].UID:
|
||||||
gomega.Expect(rss[0].UID).To(gomega.Equal(t.newRSUID))
|
framework.ExpectEqual(rss[0].UID, t.newRSUID)
|
||||||
default:
|
default:
|
||||||
framework.ExpectNoError(fmt.Errorf("new replicasets are created during upgrade of deployment %q", deploymentName))
|
framework.ExpectNoError(fmt.Errorf("new replicasets are created during upgrade of deployment %q", deploymentName))
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Verifying revision of the deployment %q is still 2", deploymentName))
|
ginkgo.By(fmt.Sprintf("Verifying revision of the deployment %q is still 2", deploymentName))
|
||||||
gomega.Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).To(gomega.Equal("2"))
|
framework.ExpectEqual(deployment.Annotations[deploymentutil.RevisionAnnotation], "2")
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName))
|
ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName))
|
||||||
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment))
|
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment))
|
||||||
|
@ -108,7 +108,7 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
|
|||||||
ginkgo.By("Verifying that the users exist")
|
ginkgo.By("Verifying that the users exist")
|
||||||
users, err := t.listUsers()
|
users, err := t.listUsers()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
gomega.Expect(len(users)).To(gomega.Equal(2))
|
framework.ExpectEqual(len(users), 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
// listUsers gets a list of users from the db via the tester service.
|
// listUsers gets a list of users from the db via the tester service.
|
||||||
|
@ -103,7 +103,7 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
|
|||||||
ginkgo.By("Verifying that the users exist")
|
ginkgo.By("Verifying that the users exist")
|
||||||
users, err := t.listUsers()
|
users, err := t.listUsers()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
gomega.Expect(len(users)).To(gomega.Equal(2))
|
framework.ExpectEqual(len(users), 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *EtcdUpgradeTest) listUsers() ([]string, error) {
|
func (t *EtcdUpgradeTest) listUsers() ([]string, error) {
|
||||||
|
@ -117,7 +117,7 @@ func (t *MySQLUpgradeTest) Setup(f *framework.Framework) {
|
|||||||
ginkgo.By("Verifying that the 2 names have been inserted")
|
ginkgo.By("Verifying that the 2 names have been inserted")
|
||||||
count, err := t.countNames()
|
count, err := t.countNames()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
gomega.Expect(count).To(gomega.Equal(2))
|
framework.ExpectEqual(count, 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test continually polls the db using the read and write connections, inserting data, and checking
|
// Test continually polls the db using the read and write connections, inserting data, and checking
|
||||||
|
@ -56,7 +56,7 @@ func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, u
|
|||||||
ginkgo.By("Checking the safe sysctl pod keeps running on master upgrade")
|
ginkgo.By("Checking the safe sysctl pod keeps running on master upgrade")
|
||||||
pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{})
|
pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodRunning))
|
framework.ExpectEqual(pod.Status.Phase, v1.PodRunning)
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade")
|
ginkgo.By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade")
|
||||||
@ -108,7 +108,7 @@ func (t *SysctlUpgradeTest) verifyUnsafeSysctlsAreRejected(f *framework.Framewor
|
|||||||
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
|
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
|
||||||
framework.Skipf("No sysctl support in Docker <1.12")
|
framework.Skipf("No sysctl support in Docker <1.12")
|
||||||
}
|
}
|
||||||
gomega.Expect(ev.Reason).To(gomega.Equal(sysctl.ForbiddenReason))
|
framework.ExpectEqual(ev.Reason, sysctl.ForbiddenReason)
|
||||||
|
|
||||||
return invalidPod
|
return invalidPod
|
||||||
}
|
}
|
||||||
|
@ -132,7 +132,7 @@ func runDensityBatchTest(f *framework.Framework, testArg densityTest) (time.Dura
|
|||||||
|
|
||||||
for name, create := range createTimes {
|
for name, create := range createTimes {
|
||||||
watch, ok := watchTimes[name]
|
watch, ok := watchTimes[name]
|
||||||
gomega.Expect(ok).To(gomega.Equal(true))
|
framework.ExpectEqual(ok, true)
|
||||||
|
|
||||||
e2eLags = append(e2eLags,
|
e2eLags = append(e2eLags,
|
||||||
framework.PodLatencyData{Name: name, Latency: watch.Time.Sub(create.Time)})
|
framework.PodLatencyData{Name: name, Latency: watch.Time.Sub(create.Time)})
|
||||||
@ -202,12 +202,12 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m
|
|||||||
cache.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(obj interface{}) {
|
AddFunc: func(obj interface{}) {
|
||||||
p, ok := obj.(*v1.Pod)
|
p, ok := obj.(*v1.Pod)
|
||||||
gomega.Expect(ok).To(gomega.Equal(true))
|
framework.ExpectEqual(ok, true)
|
||||||
go checkPodRunning(p)
|
go checkPodRunning(p)
|
||||||
},
|
},
|
||||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||||
p, ok := newObj.(*v1.Pod)
|
p, ok := newObj.(*v1.Pod)
|
||||||
gomega.Expect(ok).To(gomega.Equal(true))
|
framework.ExpectEqual(ok, true)
|
||||||
go checkPodRunning(p)
|
go checkPodRunning(p)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -97,7 +97,7 @@ func checkNodeAllocatableTest(f *framework.Framework) {
|
|||||||
ginkgo.By(fmt.Sprintf("Checking stated allocatable memory %v against calculated allocatable memory %v", &nodeMem.allocatable, calculatedNodeAlloc))
|
ginkgo.By(fmt.Sprintf("Checking stated allocatable memory %v against calculated allocatable memory %v", &nodeMem.allocatable, calculatedNodeAlloc))
|
||||||
|
|
||||||
// sanity check against stated allocatable
|
// sanity check against stated allocatable
|
||||||
gomega.Expect(calculatedNodeAlloc.Cmp(nodeMem.allocatable)).To(gomega.Equal(0))
|
framework.ExpectEqual(calculatedNodeAlloc.Cmp(nodeMem.allocatable), 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deploys `allocatablePods + 1` pods, each with a memory limit of `1/allocatablePods` of the total allocatable
|
// Deploys `allocatablePods + 1` pods, each with a memory limit of `1/allocatablePods` of the total allocatable
|
||||||
@ -277,7 +277,7 @@ func pollConfigz(timeout time.Duration, pollInterval time.Duration, nodeName str
|
|||||||
output := string(buf[:n])
|
output := string(buf[:n])
|
||||||
proxyRegexp := regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
|
proxyRegexp := regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
|
||||||
match := proxyRegexp.FindStringSubmatch(output)
|
match := proxyRegexp.FindStringSubmatch(output)
|
||||||
gomega.Expect(len(match)).To(gomega.Equal(2))
|
framework.ExpectEqual(len(match), 2)
|
||||||
port, err := strconv.Atoi(match[1])
|
port, err := strconv.Atoi(match[1])
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
ginkgo.By("http requesting node kubelet /configz")
|
ginkgo.By("http requesting node kubelet /configz")
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -95,7 +94,7 @@ func doReadOnlyTest(f *framework.Framework, source v1.VolumeSource, volumePath s
|
|||||||
|
|
||||||
_, stderr, _ := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
|
_, stderr, _ := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
|
||||||
|
|
||||||
gomega.Expect(stderr).To(gomega.Equal("Access is denied."))
|
framework.ExpectEqual(stderr, "Access is denied.")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -128,12 +127,12 @@ func doReadWriteReadOnlyTest(f *framework.Framework, source v1.VolumeSource, vol
|
|||||||
framework.ExpectNoError(errRW, msg)
|
framework.ExpectNoError(errRW, msg)
|
||||||
|
|
||||||
_, stderr, _ := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
|
_, stderr, _ := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
|
||||||
gomega.Expect(stderr).To(gomega.Equal("Access is denied."))
|
framework.ExpectEqual(stderr, "Access is denied.")
|
||||||
|
|
||||||
readcmd := []string{"cmd", "/c", "type", filePath}
|
readcmd := []string{"cmd", "/c", "type", filePath}
|
||||||
readout, readerr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, readcmd...)
|
readout, readerr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, readcmd...)
|
||||||
readmsg := fmt.Sprintf("cmd: %v, stdout: %q, stderr: %q", readcmd, readout, readerr)
|
readmsg := fmt.Sprintf("cmd: %v, stdout: %q, stderr: %q", readcmd, readout, readerr)
|
||||||
gomega.Expect(readout).To(gomega.Equal("windows-volume-test"))
|
framework.ExpectEqual(readout, "windows-volume-test")
|
||||||
framework.ExpectNoError(err, readmsg)
|
framework.ExpectNoError(err, readmsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user