mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-21 01:50:55 +00:00
e2e: use Ginkgo context
All code must use the context from Ginkgo when doing API calls or polling for a change, otherwise the code would not return immediately when the test gets aborted.
This commit is contained in:
@@ -45,17 +45,17 @@ var _ = SIGDescribe("[Feature:Windows] Cpu Resources [Serial]", func() {
|
||||
ginkgo.It("should not be exceeded after waiting 2 minutes", func(ctx context.Context) {
|
||||
ginkgo.By("Creating one pod with limit set to '0.5'")
|
||||
podsDecimal := newCPUBurnPods(1, powershellImage, "0.5", "1Gi")
|
||||
e2epod.NewPodClient(f).CreateBatch(podsDecimal)
|
||||
e2epod.NewPodClient(f).CreateBatch(ctx, podsDecimal)
|
||||
ginkgo.By("Creating one pod with limit set to '500m'")
|
||||
podsMilli := newCPUBurnPods(1, powershellImage, "500m", "1Gi")
|
||||
e2epod.NewPodClient(f).CreateBatch(podsMilli)
|
||||
e2epod.NewPodClient(f).CreateBatch(ctx, podsMilli)
|
||||
ginkgo.By("Waiting 2 minutes")
|
||||
time.Sleep(2 * time.Minute)
|
||||
ginkgo.By("Ensuring pods are still running")
|
||||
var allPods [](*v1.Pod)
|
||||
for _, p := range podsDecimal {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(
|
||||
context.TODO(),
|
||||
ctx,
|
||||
p.Name,
|
||||
metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Error retrieving pod")
|
||||
@@ -64,7 +64,7 @@ var _ = SIGDescribe("[Feature:Windows] Cpu Resources [Serial]", func() {
|
||||
}
|
||||
for _, p := range podsMilli {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(
|
||||
context.TODO(),
|
||||
ctx,
|
||||
p.Name,
|
||||
metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Error retrieving pod")
|
||||
@@ -74,7 +74,7 @@ var _ = SIGDescribe("[Feature:Windows] Cpu Resources [Serial]", func() {
|
||||
ginkgo.By("Ensuring cpu doesn't exceed limit by >5%")
|
||||
for _, p := range allPods {
|
||||
ginkgo.By("Gathering node summary stats")
|
||||
nodeStats, err := e2ekubelet.GetStatsSummary(f.ClientSet, p.Spec.NodeName)
|
||||
nodeStats, err := e2ekubelet.GetStatsSummary(ctx, f.ClientSet, p.Spec.NodeName)
|
||||
framework.ExpectNoError(err, "Error grabbing node summary stats")
|
||||
found := false
|
||||
cpuUsage := float64(0)
|
||||
|
@@ -67,7 +67,7 @@ var _ = SIGDescribe("[Feature:Windows] Density [Serial] [Slow]", func() {
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval", itArg.podsNr, itArg.interval)
|
||||
ginkgo.It(desc, func(ctx context.Context) {
|
||||
itArg.createMethod = "batch"
|
||||
runDensityBatchTest(f, itArg)
|
||||
runDensityBatchTest(ctx, f, itArg)
|
||||
})
|
||||
}
|
||||
})
|
||||
@@ -89,7 +89,7 @@ type densityTest struct {
|
||||
}
|
||||
|
||||
// runDensityBatchTest runs the density batch pod creation test
|
||||
func runDensityBatchTest(f *framework.Framework, testArg densityTest) (time.Duration, []e2emetrics.PodLatencyData) {
|
||||
func runDensityBatchTest(ctx context.Context, f *framework.Framework, testArg densityTest) (time.Duration, []e2emetrics.PodLatencyData) {
|
||||
const (
|
||||
podType = "density_test_pod"
|
||||
)
|
||||
@@ -103,17 +103,17 @@ func runDensityBatchTest(f *framework.Framework, testArg densityTest) (time.Dura
|
||||
pods := newDensityTestPods(testArg.podsNr, false, imageutils.GetPauseImageName(), podType)
|
||||
|
||||
// the controller watches the change of pod status
|
||||
controller := newInformerWatchPod(f, mutex, watchTimes, podType)
|
||||
controller := newInformerWatchPod(ctx, f, mutex, watchTimes, podType)
|
||||
go controller.Run(stopCh)
|
||||
defer close(stopCh)
|
||||
|
||||
ginkgo.By("Creating a batch of pods")
|
||||
// It returns a map['pod name']'creation time' containing the creation timestamps
|
||||
createTimes := createBatchPodWithRateControl(f, pods, testArg.interval)
|
||||
createTimes := createBatchPodWithRateControl(ctx, f, pods, testArg.interval)
|
||||
|
||||
ginkgo.By("Waiting for all Pods to be observed by the watch...")
|
||||
|
||||
gomega.Eventually(func() bool {
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return len(watchTimes) == testArg.podsNr
|
||||
}, 10*time.Minute, 10*time.Second).Should(gomega.BeTrue())
|
||||
|
||||
@@ -154,25 +154,25 @@ func runDensityBatchTest(f *framework.Framework, testArg densityTest) (time.Dura
|
||||
sort.Sort(e2emetrics.LatencySlice(e2eLags))
|
||||
batchLag := lastRunning.Time.Sub(firstCreate.Time)
|
||||
|
||||
deletePodsSync(f, pods)
|
||||
deletePodsSync(ctx, f, pods)
|
||||
|
||||
return batchLag, e2eLags
|
||||
}
|
||||
|
||||
// createBatchPodWithRateControl creates a batch of pods concurrently, uses one goroutine for each creation.
|
||||
// between creations there is an interval for throughput control
|
||||
func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]metav1.Time {
|
||||
func createBatchPodWithRateControl(ctx context.Context, f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]metav1.Time {
|
||||
createTimes := make(map[string]metav1.Time)
|
||||
for _, pod := range pods {
|
||||
createTimes[pod.ObjectMeta.Name] = metav1.Now()
|
||||
go e2epod.NewPodClient(f).Create(pod)
|
||||
go e2epod.NewPodClient(f).Create(ctx, pod)
|
||||
time.Sleep(interval)
|
||||
}
|
||||
return createTimes
|
||||
}
|
||||
|
||||
// newInformerWatchPod creates an informer to check whether all pods are running.
|
||||
func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]metav1.Time, podType string) cache.Controller {
|
||||
func newInformerWatchPod(ctx context.Context, f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]metav1.Time, podType string) cache.Controller {
|
||||
ns := f.Namespace.Name
|
||||
checkPodRunning := func(p *v1.Pod) {
|
||||
mutex.Lock()
|
||||
@@ -190,12 +190,12 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String()
|
||||
obj, err := f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), options)
|
||||
obj, err := f.ClientSet.CoreV1().Pods(ns).List(ctx, options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String()
|
||||
return f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), options)
|
||||
return f.ClientSet.CoreV1().Pods(ns).Watch(ctx, options)
|
||||
},
|
||||
},
|
||||
&v1.Pod{},
|
||||
@@ -265,7 +265,7 @@ func newDensityTestPods(numPods int, volume bool, imageName, podType string) []*
|
||||
}
|
||||
|
||||
// deletePodsSync deletes a list of pods and block until pods disappear.
|
||||
func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
|
||||
func deletePodsSync(ctx context.Context, f *framework.Framework, pods []*v1.Pod) {
|
||||
var wg sync.WaitGroup
|
||||
for _, pod := range pods {
|
||||
wg.Add(1)
|
||||
@@ -273,10 +273,10 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
err := e2epod.NewPodClient(f).Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30))
|
||||
err := e2epod.NewPodClient(f).Delete(ctx, pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
|
||||
err = e2epod.WaitForPodToDisappear(ctx, f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
|
||||
30*time.Second, 10*time.Minute)
|
||||
framework.ExpectNoError(err)
|
||||
}(pod)
|
||||
|
@@ -95,7 +95,7 @@ var _ = SIGDescribe("[Feature:GPUDevicePlugin] Device Plugin", func() {
|
||||
}
|
||||
|
||||
sysNs := "kube-system"
|
||||
_, err := cs.AppsV1().DaemonSets(sysNs).Create(context.TODO(), ds, metav1.CreateOptions{})
|
||||
_, err := cs.AppsV1().DaemonSets(sysNs).Create(ctx, ds, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("creating Windows testing Pod")
|
||||
@@ -104,10 +104,10 @@ var _ = SIGDescribe("[Feature:GPUDevicePlugin] Device Plugin", func() {
|
||||
windowsPod.Spec.Containers[0].Resources.Limits = v1.ResourceList{
|
||||
"microsoft.com/directx": resource.MustParse("1"),
|
||||
}
|
||||
windowsPod, err = cs.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), windowsPod, metav1.CreateOptions{})
|
||||
windowsPod, err = cs.CoreV1().Pods(f.Namespace.Name).Create(ctx, windowsPod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
ginkgo.By("Waiting for the pod Running")
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, windowsPod.Name, f.Namespace.Name, testSlowMultiplier*framework.PodStartTimeout)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, windowsPod.Name, f.Namespace.Name, testSlowMultiplier*framework.PodStartTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("verifying device access in Windows testing Pod")
|
||||
|
@@ -42,7 +42,7 @@ var _ = SIGDescribe("[Feature:Windows] DNS", func() {
|
||||
|
||||
ginkgo.By("Getting the IP address of the internal Kubernetes service")
|
||||
|
||||
svc, err := f.ClientSet.CoreV1().Services("kube-system").Get(context.TODO(), "kube-dns", metav1.GetOptions{})
|
||||
svc, err := f.ClientSet.CoreV1().Services("kube-system").Get(ctx, "kube-dns", metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Preparing a test DNS service with injected DNS names...")
|
||||
@@ -60,7 +60,7 @@ var _ = SIGDescribe("[Feature:Windows] DNS", func() {
|
||||
testPod.Spec.NodeSelector = map[string]string{
|
||||
"kubernetes.io/os": "windows",
|
||||
}
|
||||
testPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testPod, metav1.CreateOptions{})
|
||||
testPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, testPod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("confirming that the pod has a windows label")
|
||||
@@ -68,11 +68,11 @@ var _ = SIGDescribe("[Feature:Windows] DNS", func() {
|
||||
framework.Logf("Created pod %v", testPod)
|
||||
defer func() {
|
||||
framework.Logf("Deleting pod %s...", testPod.Name)
|
||||
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testPod.Name, *metav1.NewDeleteOptions(0)); err != nil {
|
||||
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, testPod.Name, *metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Failf("Failed to delete pod %s: %v", testPod.Name, err)
|
||||
}
|
||||
}()
|
||||
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, testPod.Name, f.Namespace.Name), "failed to wait for pod %s to be running", testPod.Name)
|
||||
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, testPod.Name, f.Namespace.Name), "failed to wait for pod %s to be running", testPod.Name)
|
||||
|
||||
// This isn't the best 'test' but it is a great diagnostic, see later test for the 'real' test.
|
||||
ginkgo.By("Calling ipconfig to get debugging info for this pod's DNS and confirm that a dns server 1.1.1.1 can be injected, along with ")
|
||||
|
@@ -99,17 +99,17 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Full [Serial] [Slow]", func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
|
||||
ginkgo.By("finding the worker node that fulfills this test's assumptions")
|
||||
nodes := findPreconfiguredGmsaNodes(f.ClientSet)
|
||||
nodes := findPreconfiguredGmsaNodes(ctx, f.ClientSet)
|
||||
if len(nodes) != 1 {
|
||||
e2eskipper.Skipf("Expected to find exactly one node with the %q label, found %d", gmsaFullNodeLabel, len(nodes))
|
||||
}
|
||||
node := nodes[0]
|
||||
|
||||
ginkgo.By("retrieving the contents of the GMSACredentialSpec custom resource manifest from the node")
|
||||
crdManifestContents := retrieveCRDManifestFileContents(f, node)
|
||||
crdManifestContents := retrieveCRDManifestFileContents(ctx, f, node)
|
||||
|
||||
ginkgo.By("deploying the GMSA webhook")
|
||||
err := deployGmsaWebhook(f)
|
||||
err := deployGmsaWebhook(ctx, f)
|
||||
if err != nil {
|
||||
framework.Failf(err.Error())
|
||||
}
|
||||
@@ -121,26 +121,26 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Full [Serial] [Slow]", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("creating an RBAC role to grant use access to that GMSA resource")
|
||||
rbacRoleName, err := createRBACRoleForGmsa(f)
|
||||
rbacRoleName, err := createRBACRoleForGmsa(ctx, f)
|
||||
if err != nil {
|
||||
framework.Failf(err.Error())
|
||||
}
|
||||
|
||||
ginkgo.By("creating a service account")
|
||||
serviceAccountName := createServiceAccount(f)
|
||||
serviceAccountName := createServiceAccount(ctx, f)
|
||||
|
||||
ginkgo.By("binding the RBAC role to the service account")
|
||||
bindRBACRoleToServiceAccount(f, serviceAccountName, rbacRoleName)
|
||||
bindRBACRoleToServiceAccount(ctx, f, serviceAccountName, rbacRoleName)
|
||||
|
||||
ginkgo.By("creating a pod using the GMSA cred spec")
|
||||
podName := createPodWithGmsa(f, serviceAccountName)
|
||||
podName := createPodWithGmsa(ctx, f, serviceAccountName)
|
||||
|
||||
// nltest /QUERY will only return successfully if there is a GMSA
|
||||
// identity configured, _and_ it succeeds in contacting the AD controller
|
||||
// and authenticating with it.
|
||||
ginkgo.By("checking that nltest /QUERY returns successfully")
|
||||
var output string
|
||||
gomega.Eventually(func() bool {
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
output, err = runKubectlExecInNamespace(f.Namespace.Name, podName, "nltest", "/QUERY")
|
||||
if err != nil {
|
||||
framework.Logf("unable to run command in container via exec: %s", err)
|
||||
@@ -166,17 +166,17 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Full [Serial] [Slow]", func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
|
||||
ginkgo.By("finding the worker node that fulfills this test's assumptions")
|
||||
nodes := findPreconfiguredGmsaNodes(f.ClientSet)
|
||||
nodes := findPreconfiguredGmsaNodes(ctx, f.ClientSet)
|
||||
if len(nodes) != 1 {
|
||||
e2eskipper.Skipf("Expected to find exactly one node with the %q label, found %d", gmsaFullNodeLabel, len(nodes))
|
||||
}
|
||||
node := nodes[0]
|
||||
|
||||
ginkgo.By("retrieving the contents of the GMSACredentialSpec custom resource manifest from the node")
|
||||
crdManifestContents := retrieveCRDManifestFileContents(f, node)
|
||||
crdManifestContents := retrieveCRDManifestFileContents(ctx, f, node)
|
||||
|
||||
ginkgo.By("deploying the GMSA webhook")
|
||||
err := deployGmsaWebhook(f)
|
||||
err := deployGmsaWebhook(ctx, f)
|
||||
if err != nil {
|
||||
framework.Failf(err.Error())
|
||||
}
|
||||
@@ -188,26 +188,26 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Full [Serial] [Slow]", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("creating an RBAC role to grant use access to that GMSA resource")
|
||||
rbacRoleName, err := createRBACRoleForGmsa(f)
|
||||
rbacRoleName, err := createRBACRoleForGmsa(ctx, f)
|
||||
if err != nil {
|
||||
framework.Failf(err.Error())
|
||||
}
|
||||
|
||||
ginkgo.By("creating a service account")
|
||||
serviceAccountName := createServiceAccount(f)
|
||||
serviceAccountName := createServiceAccount(ctx, f)
|
||||
|
||||
ginkgo.By("binding the RBAC role to the service account")
|
||||
bindRBACRoleToServiceAccount(f, serviceAccountName, rbacRoleName)
|
||||
bindRBACRoleToServiceAccount(ctx, f, serviceAccountName, rbacRoleName)
|
||||
|
||||
ginkgo.By("creating a pod using the GMSA cred spec")
|
||||
podName := createPodWithGmsa(f, serviceAccountName)
|
||||
podName := createPodWithGmsa(ctx, f, serviceAccountName)
|
||||
|
||||
ginkgo.By("getting the ip of GMSA domain")
|
||||
gmsaDomainIP := getGmsaDomainIP(f, podName)
|
||||
|
||||
ginkgo.By("checking that file can be read and write from the remote folder successfully")
|
||||
filePath := fmt.Sprintf("\\\\%s\\%s\\write-test-%s.txt", gmsaDomainIP, gmsaSharedFolder, string(uuid.NewUUID())[0:4])
|
||||
gomega.Eventually(func() bool {
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
// The filePath is a remote folder, do not change the format of it
|
||||
_, _ = runKubectlExecInNamespace(f.Namespace.Name, podName, "--", "powershell.exe", "-Command", "echo 'This is a test file.' > "+filePath)
|
||||
output, err := runKubectlExecInNamespace(f.Namespace.Name, podName, "powershell.exe", "--", "cat", filePath)
|
||||
@@ -229,11 +229,11 @@ func isValidOutput(output string) bool {
|
||||
}
|
||||
|
||||
// findPreconfiguredGmsaNode finds node with the gmsaFullNodeLabel label on it.
|
||||
func findPreconfiguredGmsaNodes(c clientset.Interface) []v1.Node {
|
||||
func findPreconfiguredGmsaNodes(ctx context.Context, c clientset.Interface) []v1.Node {
|
||||
nodeOpts := metav1.ListOptions{
|
||||
LabelSelector: gmsaFullNodeLabel,
|
||||
}
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), nodeOpts)
|
||||
nodes, err := c.CoreV1().Nodes().List(ctx, nodeOpts)
|
||||
if err != nil {
|
||||
framework.Failf("Unable to list nodes: %v", err)
|
||||
}
|
||||
@@ -245,7 +245,7 @@ func findPreconfiguredGmsaNodes(c clientset.Interface) []v1.Node {
|
||||
// on nodes with the gmsaFullNodeLabel label with that file's directory
|
||||
// mounted on it, and then exec-ing into that pod to retrieve the file's
|
||||
// contents.
|
||||
func retrieveCRDManifestFileContents(f *framework.Framework, node v1.Node) string {
|
||||
func retrieveCRDManifestFileContents(ctx context.Context, f *framework.Framework, node v1.Node) string {
|
||||
podName := "retrieve-gmsa-crd-contents"
|
||||
// we can't use filepath.Dir here since the test itself runs on a Linux machine
|
||||
splitPath := strings.Split(gmsaCrdManifestPath, `\`)
|
||||
@@ -283,7 +283,7 @@ func retrieveCRDManifestFileContents(f *framework.Framework, node v1.Node) strin
|
||||
},
|
||||
},
|
||||
}
|
||||
e2epod.NewPodClient(f).CreateSync(pod)
|
||||
e2epod.NewPodClient(f).CreateSync(ctx, pod)
|
||||
|
||||
output, err := runKubectlExecInNamespace(f.Namespace.Name, podName, "cmd", "/S", "/C", fmt.Sprintf("type %s", gmsaCrdManifestPath))
|
||||
if err != nil {
|
||||
@@ -297,7 +297,7 @@ func retrieveCRDManifestFileContents(f *framework.Framework, node v1.Node) strin
|
||||
// deployGmsaWebhook deploys the GMSA webhook, and returns a cleanup function
|
||||
// to be called when done with testing, that removes the temp files it's created
|
||||
// on disks as well as the API resources it's created.
|
||||
func deployGmsaWebhook(f *framework.Framework) error {
|
||||
func deployGmsaWebhook(ctx context.Context, f *framework.Framework) error {
|
||||
deployerName := "webhook-deployer"
|
||||
deployerNamespace := f.Namespace.Name
|
||||
webHookName := "gmsa-webhook"
|
||||
@@ -317,8 +317,8 @@ func deployGmsaWebhook(f *framework.Framework) error {
|
||||
})
|
||||
|
||||
// ensure the deployer has ability to approve certificatesigningrequests to install the webhook
|
||||
s := createServiceAccount(f)
|
||||
bindClusterRBACRoleToServiceAccount(f, s, "cluster-admin")
|
||||
s := createServiceAccount(ctx, f)
|
||||
bindClusterRBACRoleToServiceAccount(ctx, f, s, "cluster-admin")
|
||||
|
||||
installSteps := []string{
|
||||
"echo \"@testing http://dl-cdn.alpinelinux.org/alpine/edge/testing/\" >> /etc/apk/repositories",
|
||||
@@ -357,11 +357,11 @@ func deployGmsaWebhook(f *framework.Framework) error {
|
||||
},
|
||||
},
|
||||
}
|
||||
e2epod.NewPodClient(f).CreateSync(pod)
|
||||
e2epod.NewPodClient(f).CreateSync(ctx, pod)
|
||||
|
||||
// Wait for the Webhook deployment to become ready. The deployer pod takes a few seconds to initialize and create resources
|
||||
err := waitForDeployment(func() (*appsv1.Deployment, error) {
|
||||
return f.ClientSet.AppsV1().Deployments(webHookNamespace).Get(context.TODO(), webHookName, metav1.GetOptions{})
|
||||
return f.ClientSet.AppsV1().Deployments(webHookNamespace).Get(ctx, webHookName, metav1.GetOptions{})
|
||||
}, 10*time.Second, f.Timeouts.PodStart)
|
||||
if err == nil {
|
||||
framework.Logf("GMSA webhook successfully deployed")
|
||||
@@ -370,7 +370,7 @@ func deployGmsaWebhook(f *framework.Framework) error {
|
||||
}
|
||||
|
||||
// Dump deployer logs
|
||||
logs, _ := e2epod.GetPodLogs(f.ClientSet, deployerNamespace, deployerName, deployerName)
|
||||
logs, _ := e2epod.GetPodLogs(ctx, f.ClientSet, deployerNamespace, deployerName, deployerName)
|
||||
framework.Logf("GMSA deployment logs:\n%s", logs)
|
||||
|
||||
return err
|
||||
@@ -409,7 +409,7 @@ func createGmsaCustomResource(ns string, crdManifestContents string) error {
|
||||
// createRBACRoleForGmsa creates an RBAC cluster role to grant use
|
||||
// access to our test credential spec.
|
||||
// It returns the role's name, as well as a function to delete it when done.
|
||||
func createRBACRoleForGmsa(f *framework.Framework) (string, error) {
|
||||
func createRBACRoleForGmsa(ctx context.Context, f *framework.Framework) (string, error) {
|
||||
roleName := f.Namespace.Name + "-rbac-role"
|
||||
|
||||
role := &rbacv1.ClusterRole{
|
||||
@@ -427,7 +427,7 @@ func createRBACRoleForGmsa(f *framework.Framework) (string, error) {
|
||||
}
|
||||
|
||||
ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.RbacV1().ClusterRoles().Delete), roleName, metav1.DeleteOptions{})
|
||||
_, err := f.ClientSet.RbacV1().ClusterRoles().Create(context.TODO(), role, metav1.CreateOptions{})
|
||||
_, err := f.ClientSet.RbacV1().ClusterRoles().Create(ctx, role, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("unable to create RBAC cluster role %q: %w", roleName, err)
|
||||
}
|
||||
@@ -436,7 +436,7 @@ func createRBACRoleForGmsa(f *framework.Framework) (string, error) {
|
||||
}
|
||||
|
||||
// createServiceAccount creates a service account, and returns its name.
|
||||
func createServiceAccount(f *framework.Framework) string {
|
||||
func createServiceAccount(ctx context.Context, f *framework.Framework) string {
|
||||
accountName := f.Namespace.Name + "-sa-" + string(uuid.NewUUID())
|
||||
account := &v1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -444,14 +444,14 @@ func createServiceAccount(f *framework.Framework) string {
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
}
|
||||
if _, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), account, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(ctx, account, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create service account %q: %v", accountName, err)
|
||||
}
|
||||
return accountName
|
||||
}
|
||||
|
||||
// bindRBACRoleToServiceAccount binds the given RBAC cluster role to the given service account.
|
||||
func bindRBACRoleToServiceAccount(f *framework.Framework, serviceAccountName, rbacRoleName string) {
|
||||
func bindRBACRoleToServiceAccount(ctx context.Context, f *framework.Framework, serviceAccountName, rbacRoleName string) {
|
||||
binding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: f.Namespace.Name + "-rbac-binding",
|
||||
@@ -470,11 +470,11 @@ func bindRBACRoleToServiceAccount(f *framework.Framework, serviceAccountName, rb
|
||||
Name: rbacRoleName,
|
||||
},
|
||||
}
|
||||
_, err := f.ClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(context.TODO(), binding, metav1.CreateOptions{})
|
||||
_, err := f.ClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(ctx, binding, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func bindClusterRBACRoleToServiceAccount(f *framework.Framework, serviceAccountName, rbacRoleName string) {
|
||||
func bindClusterRBACRoleToServiceAccount(ctx context.Context, f *framework.Framework, serviceAccountName, rbacRoleName string) {
|
||||
binding := &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: f.Namespace.Name + "-rbac-binding",
|
||||
@@ -493,12 +493,12 @@ func bindClusterRBACRoleToServiceAccount(f *framework.Framework, serviceAccountN
|
||||
Name: rbacRoleName,
|
||||
},
|
||||
}
|
||||
_, err := f.ClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), binding, metav1.CreateOptions{})
|
||||
_, err := f.ClientSet.RbacV1().ClusterRoleBindings().Create(ctx, binding, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// createPodWithGmsa creates a pod using the test GMSA cred spec, and returns its name.
|
||||
func createPodWithGmsa(f *framework.Framework, serviceAccountName string) string {
|
||||
func createPodWithGmsa(ctx context.Context, f *framework.Framework, serviceAccountName string) string {
|
||||
podName := "pod-with-gmsa"
|
||||
credSpecName := gmsaCustomResourceName
|
||||
|
||||
@@ -527,7 +527,7 @@ func createPodWithGmsa(f *framework.Framework, serviceAccountName string) string
|
||||
},
|
||||
},
|
||||
}
|
||||
e2epod.NewPodClient(f).CreateSync(pod)
|
||||
e2epod.NewPodClient(f).CreateSync(ctx, pod)
|
||||
|
||||
return podName
|
||||
}
|
||||
|
@@ -95,7 +95,7 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Kubelet [Slow]", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("creating a pod with correct GMSA specs")
|
||||
e2epod.NewPodClient(f).CreateSync(pod)
|
||||
e2epod.NewPodClient(f).CreateSync(ctx, pod)
|
||||
|
||||
ginkgo.By("checking the domain reported by nltest in the containers")
|
||||
namespaceOption := fmt.Sprintf("--namespace=%s", f.Namespace.Name)
|
||||
@@ -112,7 +112,7 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Kubelet [Slow]", func() {
|
||||
// even for bogus creds, `nltest /PARENTDOMAIN` simply returns the AD domain, which is enough for our purpose here.
|
||||
// note that the "eventually" part seems to be needed to account for the fact that powershell containers
|
||||
// are a bit slow to become responsive, even when docker reports them as running.
|
||||
gomega.Eventually(func() bool {
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
output, err = e2ekubectl.RunKubectl(f.Namespace.Name, "exec", namespaceOption, podName, containerOption, "--", "nltest", "/PARENTDOMAIN")
|
||||
return err == nil
|
||||
}, 1*time.Minute, 1*time.Second).Should(gomega.BeTrue())
|
||||
|
@@ -95,7 +95,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
ginkgo.It("should run as a process on the host/node", func(ctx context.Context) {
|
||||
|
||||
ginkgo.By("selecting a Windows node")
|
||||
targetNode, err := findWindowsNode(f)
|
||||
targetNode, err := findWindowsNode(ctx, f)
|
||||
framework.ExpectNoError(err, "Error finding Windows node")
|
||||
framework.Logf("Using node: %v", targetNode.Name)
|
||||
|
||||
@@ -128,14 +128,14 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
},
|
||||
}
|
||||
|
||||
e2epod.NewPodClient(f).Create(pod)
|
||||
e2epod.NewPodClient(f).Create(ctx, pod)
|
||||
|
||||
ginkgo.By("Waiting for pod to run")
|
||||
e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute)
|
||||
e2epod.NewPodClient(f).WaitForFinish(ctx, podName, 3*time.Minute)
|
||||
|
||||
ginkgo.By("Then ensuring pod finished running successfully")
|
||||
p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(
|
||||
context.TODO(),
|
||||
ctx,
|
||||
podName,
|
||||
metav1.GetOptions{})
|
||||
|
||||
@@ -180,21 +180,21 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
},
|
||||
}
|
||||
|
||||
e2epod.NewPodClient(f).Create(pod)
|
||||
e2epod.NewPodClient(f).Create(ctx, pod)
|
||||
|
||||
ginkgo.By("Waiting for pod to run")
|
||||
e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute)
|
||||
e2epod.NewPodClient(f).WaitForFinish(ctx, podName, 3*time.Minute)
|
||||
|
||||
ginkgo.By("Then ensuring pod finished running successfully")
|
||||
p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(
|
||||
context.TODO(),
|
||||
ctx,
|
||||
podName,
|
||||
metav1.GetOptions{})
|
||||
|
||||
framework.ExpectNoError(err, "Error retrieving pod")
|
||||
|
||||
if p.Status.Phase != v1.PodSucceeded {
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, "read-configuration")
|
||||
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, "read-configuration")
|
||||
if err != nil {
|
||||
framework.Logf("Error pulling logs: %v", err)
|
||||
}
|
||||
@@ -212,7 +212,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
// See https://github.com/kubernetes/enhancements/blob/master/keps/sig-windows/1981-windows-privileged-container-support/README.md
|
||||
// for more details.
|
||||
ginkgo.By("Ensuring Windows nodes are running containerd v1.6.x")
|
||||
windowsNode, err := findWindowsNode(f)
|
||||
windowsNode, err := findWindowsNode(ctx, f)
|
||||
framework.ExpectNoError(err, "error finding Windows node")
|
||||
r, v, err := getNodeContainerRuntimeAndVersion(windowsNode)
|
||||
framework.ExpectNoError(err, "error getting node container runtime and version")
|
||||
@@ -418,14 +418,14 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
},
|
||||
},
|
||||
}
|
||||
e2epod.NewPodClient(f).Create(pod)
|
||||
e2epod.NewPodClient(f).Create(ctx, pod)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Waiting for pod '%s' to run", podName))
|
||||
e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute)
|
||||
e2epod.NewPodClient(f).WaitForFinish(ctx, podName, 3*time.Minute)
|
||||
|
||||
ginkgo.By("Then ensuring pod finished running successfully")
|
||||
p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(
|
||||
context.TODO(),
|
||||
ctx,
|
||||
podName,
|
||||
metav1.GetOptions{})
|
||||
|
||||
@@ -440,7 +440,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
"involvedObject.namespace": f.Namespace.Name,
|
||||
}.AsSelector().String(),
|
||||
}
|
||||
events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), options)
|
||||
events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, options)
|
||||
framework.ExpectNoError(err, "Error getting events for failed pod")
|
||||
for _, event := range events.Items {
|
||||
framework.Logf("%s: %s", event.Reason, event.Message)
|
||||
@@ -468,7 +468,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
"validation-script": validation_script,
|
||||
},
|
||||
}
|
||||
_, err := f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(context.TODO(), configMap, metav1.CreateOptions{})
|
||||
_, err := f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(ctx, configMap, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "unable to create create configmap")
|
||||
|
||||
ginkgo.By("Creating a secret containing test data")
|
||||
@@ -485,7 +485,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
"foo": []byte("bar"),
|
||||
},
|
||||
}
|
||||
_, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), secret, metav1.CreateOptions{})
|
||||
_, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(ctx, secret, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "unable to create secret")
|
||||
|
||||
ginkgo.By("Creating a pod with a HostProcess container that uses various types of volume mounts")
|
||||
@@ -493,18 +493,18 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
podAndContainerName := "host-process-volume-mounts"
|
||||
pod := makeTestPodWithVolumeMounts(podAndContainerName)
|
||||
|
||||
e2epod.NewPodClient(f).Create(pod)
|
||||
e2epod.NewPodClient(f).Create(ctx, pod)
|
||||
|
||||
ginkgo.By("Waiting for pod to run")
|
||||
e2epod.NewPodClient(f).WaitForFinish(podAndContainerName, 3*time.Minute)
|
||||
e2epod.NewPodClient(f).WaitForFinish(ctx, podAndContainerName, 3*time.Minute)
|
||||
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, ns.Name, podAndContainerName, podAndContainerName)
|
||||
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, ns.Name, podAndContainerName, podAndContainerName)
|
||||
framework.ExpectNoError(err, "Error getting pod logs")
|
||||
framework.Logf("Container logs: %s", logs)
|
||||
|
||||
ginkgo.By("Then ensuring pod finished running successfully")
|
||||
p, err := f.ClientSet.CoreV1().Pods(ns.Name).Get(
|
||||
context.TODO(),
|
||||
ctx,
|
||||
podAndContainerName,
|
||||
metav1.GetOptions{})
|
||||
|
||||
@@ -514,12 +514,12 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
|
||||
ginkgo.It("metrics should report count of started and failed to start HostProcess containers", func(ctx context.Context) {
|
||||
ginkgo.By("Selecting a Windows node")
|
||||
targetNode, err := findWindowsNode(f)
|
||||
targetNode, err := findWindowsNode(ctx, f)
|
||||
framework.ExpectNoError(err, "Error finding Windows node")
|
||||
framework.Logf("Using node: %v", targetNode.Name)
|
||||
|
||||
ginkgo.By("Getting initial kubelet metrics values")
|
||||
beforeMetrics, err := getCurrentHostProcessMetrics(f, targetNode.Name)
|
||||
beforeMetrics, err := getCurrentHostProcessMetrics(ctx, f, targetNode.Name)
|
||||
framework.ExpectNoError(err, "Error getting initial kubelet metrics for node")
|
||||
framework.Logf("Initial HostProcess container metrics -- StartedContainers: %v, StartedContainersErrors: %v, StartedInitContainers: %v, StartedInitContainersErrors: %v",
|
||||
beforeMetrics.StartedContainersCount, beforeMetrics.StartedContainersErrorCount, beforeMetrics.StartedInitContainersCount, beforeMetrics.StartedInitContainersErrorCount)
|
||||
@@ -565,8 +565,8 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
},
|
||||
}
|
||||
|
||||
e2epod.NewPodClient(f).Create(pod)
|
||||
e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute)
|
||||
e2epod.NewPodClient(f).Create(ctx, pod)
|
||||
e2epod.NewPodClient(f).WaitForFinish(ctx, podName, 3*time.Minute)
|
||||
|
||||
ginkgo.By("Scheduling a pod with a HostProcess container that will fail")
|
||||
podName = "host-process-metrics-pod-failing-container"
|
||||
@@ -599,12 +599,12 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
},
|
||||
}
|
||||
|
||||
e2epod.NewPodClient(f).Create(pod)
|
||||
e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute)
|
||||
e2epod.NewPodClient(f).Create(ctx, pod)
|
||||
e2epod.NewPodClient(f).WaitForFinish(ctx, podName, 3*time.Minute)
|
||||
|
||||
ginkgo.By("Getting subsequent kubelet metrics values")
|
||||
|
||||
afterMetrics, err := getCurrentHostProcessMetrics(f, targetNode.Name)
|
||||
afterMetrics, err := getCurrentHostProcessMetrics(ctx, f, targetNode.Name)
|
||||
framework.ExpectNoError(err, "Error getting subsequent kubelet metrics for node")
|
||||
framework.Logf("Subsequent HostProcess container metrics -- StartedContainers: %v, StartedContainersErrors: %v, StartedInitContainers: %v, StartedInitContainersErrors: %v",
|
||||
afterMetrics.StartedContainersCount, afterMetrics.StartedContainersErrorCount, afterMetrics.StartedInitContainersCount, afterMetrics.StartedInitContainersErrorCount)
|
||||
@@ -620,7 +620,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
|
||||
ginkgo.It("container stats validation", func(ctx context.Context) {
|
||||
ginkgo.By("selecting a Windows node")
|
||||
targetNode, err := findWindowsNode(f)
|
||||
targetNode, err := findWindowsNode(ctx, f)
|
||||
framework.ExpectNoError(err, "Error finding Windows node")
|
||||
framework.Logf("Using node: %v", targetNode.Name)
|
||||
|
||||
@@ -651,14 +651,14 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
},
|
||||
}
|
||||
|
||||
e2epod.NewPodClient(f).Create(pod)
|
||||
e2epod.NewPodClient(f).Create(ctx, pod)
|
||||
|
||||
ginkgo.By("Waiting for the pod to start running")
|
||||
timeout := 3 * time.Minute
|
||||
e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 1, 0, timeout, make(map[string]string))
|
||||
e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, timeout, make(map[string]string))
|
||||
|
||||
ginkgo.By("Getting container stats for pod")
|
||||
nodeStats, err := e2ekubelet.GetStatsSummary(f.ClientSet, targetNode.Name)
|
||||
nodeStats, err := e2ekubelet.GetStatsSummary(ctx, f.ClientSet, targetNode.Name)
|
||||
framework.ExpectNoError(err, "Error getting node stats")
|
||||
|
||||
statsChecked := false
|
||||
@@ -700,7 +700,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
ginkgo.It("should support querying api-server using in-cluster config", func(ctx context.Context) {
|
||||
// This functionality is only support on containerd v1.7+
|
||||
ginkgo.By("Ensuring Windows nodes are running containerd v1.7+")
|
||||
windowsNode, err := findWindowsNode(f)
|
||||
windowsNode, err := findWindowsNode(ctx, f)
|
||||
framework.ExpectNoError(err, "error finding Windows node")
|
||||
r, v, err := getNodeContainerRuntimeAndVersion(windowsNode)
|
||||
framework.ExpectNoError(err, "error getting node container runtime and version")
|
||||
@@ -748,10 +748,10 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
}
|
||||
|
||||
pc := e2epod.NewPodClient(f)
|
||||
pc.Create(pod)
|
||||
pc.Create(ctx, pod)
|
||||
|
||||
ginkgo.By("Waiting for pod to run")
|
||||
e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 1, 0, 3*time.Minute, make(map[string]string))
|
||||
e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, 3*time.Minute, make(map[string]string))
|
||||
|
||||
ginkgo.By("Waiting for 60 seconds")
|
||||
// We wait an additional 60 seconds after the pod is Running because the
|
||||
@@ -760,7 +760,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
time.Sleep(60 * time.Second)
|
||||
|
||||
ginkgo.By("Ensuring the test app was able to successfully query the api-server")
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, "hpc-agnhost")
|
||||
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, "hpc-agnhost")
|
||||
framework.ExpectNoError(err, "Error getting pod logs")
|
||||
|
||||
framework.Logf("Logs: %s\n", logs)
|
||||
@@ -779,7 +779,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
ginkgo.It("should run as localgroup accounts", func(ctx context.Context) {
|
||||
// This functionality is only supported on containerd v1.7+
|
||||
ginkgo.By("Ensuring Windows nodes are running containerd v1.7+")
|
||||
windowsNode, err := findWindowsNode(f)
|
||||
windowsNode, err := findWindowsNode(ctx, f)
|
||||
framework.ExpectNoError(err, "error finding Windows node")
|
||||
r, v, err := getNodeContainerRuntimeAndVersion(windowsNode)
|
||||
framework.ExpectNoError(err, "error getting node container runtime and version")
|
||||
@@ -835,10 +835,10 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
},
|
||||
}
|
||||
|
||||
e2epod.NewPodClient(f).Create(pod)
|
||||
e2epod.NewPodClient(f).Create(ctx, pod)
|
||||
|
||||
ginkgo.By("Waiting for pod to run")
|
||||
e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute)
|
||||
e2epod.NewPodClient(f).WaitForFinish(ctx, podName, 3*time.Minute)
|
||||
|
||||
ginkgo.By("Then ensuring pod finished running successfully")
|
||||
p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(
|
||||
@@ -854,7 +854,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
// because all of the 'built-in' accounts that can be used with HostProcess
|
||||
// are prefixed with this.
|
||||
ginkgo.By("Then ensuring pod was not running as a system account")
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, "localgroup-container")
|
||||
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, "localgroup-container")
|
||||
framework.ExpectNoError(err, "error retrieving container logs")
|
||||
framework.Logf("Pod logs: %s", logs)
|
||||
framework.ExpectEqual(
|
||||
@@ -1005,10 +1005,10 @@ type HostProcessContainersMetrics struct {
|
||||
|
||||
// getCurrentHostProcessMetrics returns a HostPRocessContainersMetrics object. Any metrics that do not have any
|
||||
// values reported will be set to 0.
|
||||
func getCurrentHostProcessMetrics(f *framework.Framework, nodeName string) (HostProcessContainersMetrics, error) {
|
||||
func getCurrentHostProcessMetrics(ctx context.Context, f *framework.Framework, nodeName string) (HostProcessContainersMetrics, error) {
|
||||
var result HostProcessContainersMetrics
|
||||
|
||||
metrics, err := e2emetrics.GetKubeletMetrics(f.ClientSet, nodeName)
|
||||
metrics, err := e2emetrics.GetKubeletMetrics(ctx, f.ClientSet, nodeName)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
@@ -58,44 +58,44 @@ var _ = SIGDescribe("Hybrid cluster network", func() {
|
||||
|
||||
linuxPod := createTestPod(f, linuxBusyBoxImage, linuxOS)
|
||||
ginkgo.By("creating a linux pod and waiting for it to be running")
|
||||
linuxPod = e2epod.NewPodClient(f).CreateSync(linuxPod)
|
||||
linuxPod = e2epod.NewPodClient(f).CreateSync(ctx, linuxPod)
|
||||
|
||||
windowsPod := createTestPod(f, windowsBusyBoximage, windowsOS)
|
||||
|
||||
windowsPod.Spec.Containers[0].Args = []string{"test-webserver"}
|
||||
ginkgo.By("creating a windows pod and waiting for it to be running")
|
||||
windowsPod = e2epod.NewPodClient(f).CreateSync(windowsPod)
|
||||
windowsPod = e2epod.NewPodClient(f).CreateSync(ctx, windowsPod)
|
||||
|
||||
ginkgo.By("verifying pod internal connectivity to the cluster dataplane")
|
||||
|
||||
ginkgo.By("checking connectivity from Linux to Windows")
|
||||
assertConsistentConnectivity(f, linuxPod.ObjectMeta.Name, linuxOS, linuxCheck(windowsPod.Status.PodIP, 80))
|
||||
assertConsistentConnectivity(ctx, f, linuxPod.ObjectMeta.Name, linuxOS, linuxCheck(windowsPod.Status.PodIP, 80))
|
||||
|
||||
ginkgo.By("checking connectivity from Windows to Linux")
|
||||
assertConsistentConnectivity(f, windowsPod.ObjectMeta.Name, windowsOS, windowsCheck(linuxPod.Status.PodIP))
|
||||
assertConsistentConnectivity(ctx, f, windowsPod.ObjectMeta.Name, windowsOS, windowsCheck(linuxPod.Status.PodIP))
|
||||
|
||||
})
|
||||
|
||||
ginkgo.It("should provide Internet connection for Linux containers using DNS [Feature:Networking-DNS]", func(ctx context.Context) {
|
||||
linuxPod := createTestPod(f, linuxBusyBoxImage, linuxOS)
|
||||
ginkgo.By("creating a linux pod and waiting for it to be running")
|
||||
linuxPod = e2epod.NewPodClient(f).CreateSync(linuxPod)
|
||||
linuxPod = e2epod.NewPodClient(f).CreateSync(ctx, linuxPod)
|
||||
|
||||
ginkgo.By("verifying pod external connectivity to the internet")
|
||||
|
||||
ginkgo.By("checking connectivity to 8.8.8.8 53 (google.com) from Linux")
|
||||
assertConsistentConnectivity(f, linuxPod.ObjectMeta.Name, linuxOS, linuxCheck("8.8.8.8", 53))
|
||||
assertConsistentConnectivity(ctx, f, linuxPod.ObjectMeta.Name, linuxOS, linuxCheck("8.8.8.8", 53))
|
||||
})
|
||||
|
||||
ginkgo.It("should provide Internet connection for Windows containers using DNS [Feature:Networking-DNS]", func(ctx context.Context) {
|
||||
windowsPod := createTestPod(f, windowsBusyBoximage, windowsOS)
|
||||
ginkgo.By("creating a windows pod and waiting for it to be running")
|
||||
windowsPod = e2epod.NewPodClient(f).CreateSync(windowsPod)
|
||||
windowsPod = e2epod.NewPodClient(f).CreateSync(ctx, windowsPod)
|
||||
|
||||
ginkgo.By("verifying pod external connectivity to the internet")
|
||||
|
||||
ginkgo.By("checking connectivity to 8.8.8.8 53 (google.com) from Windows")
|
||||
assertConsistentConnectivity(f, windowsPod.ObjectMeta.Name, windowsOS, windowsCheck("www.google.com"))
|
||||
assertConsistentConnectivity(ctx, f, windowsPod.ObjectMeta.Name, windowsOS, windowsCheck("www.google.com"))
|
||||
})
|
||||
|
||||
})
|
||||
@@ -107,7 +107,7 @@ var (
|
||||
timeoutSeconds = 10
|
||||
)
|
||||
|
||||
func assertConsistentConnectivity(f *framework.Framework, podName string, os string, cmd []string) {
|
||||
func assertConsistentConnectivity(ctx context.Context, f *framework.Framework, podName string, os string, cmd []string) {
|
||||
connChecker := func() error {
|
||||
ginkgo.By(fmt.Sprintf("checking connectivity of %s-container in %s", os, podName))
|
||||
// TODO, we should be retrying this similar to what is done in DialFromNode, in the test/e2e/networking/networking.go tests
|
||||
@@ -117,8 +117,8 @@ func assertConsistentConnectivity(f *framework.Framework, podName string, os str
|
||||
}
|
||||
return err
|
||||
}
|
||||
gomega.Eventually(connChecker, duration, pollInterval).ShouldNot(gomega.HaveOccurred())
|
||||
gomega.Consistently(connChecker, duration, pollInterval).ShouldNot(gomega.HaveOccurred())
|
||||
gomega.Eventually(ctx, connChecker, duration, pollInterval).ShouldNot(gomega.HaveOccurred())
|
||||
gomega.Consistently(ctx, connChecker, duration, pollInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
func linuxCheck(address string, port int) []string {
|
||||
|
@@ -47,18 +47,18 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats [Serial]", func() {
|
||||
ginkgo.It("should return within 10 seconds", func(ctx context.Context) {
|
||||
|
||||
ginkgo.By("Selecting a Windows node")
|
||||
targetNode, err := findWindowsNode(f)
|
||||
targetNode, err := findWindowsNode(ctx, f)
|
||||
framework.ExpectNoError(err, "Error finding Windows node")
|
||||
framework.Logf("Using node: %v", targetNode.Name)
|
||||
|
||||
ginkgo.By("Scheduling 10 pods")
|
||||
powershellImage := imageutils.GetConfig(imageutils.BusyBox)
|
||||
pods := newKubeletStatsTestPods(10, powershellImage, targetNode.Name)
|
||||
e2epod.NewPodClient(f).CreateBatch(pods)
|
||||
e2epod.NewPodClient(f).CreateBatch(ctx, pods)
|
||||
|
||||
ginkgo.By("Waiting up to 3 minutes for pods to be running")
|
||||
timeout := 3 * time.Minute
|
||||
err = e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 10, 0, timeout, make(map[string]string))
|
||||
err = e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 10, 0, timeout, make(map[string]string))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Getting kubelet stats 5 times and checking average duration")
|
||||
@@ -67,7 +67,7 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats [Serial]", func() {
|
||||
|
||||
for i := 0; i < iterations; i++ {
|
||||
start := time.Now()
|
||||
nodeStats, err := e2ekubelet.GetStatsSummary(f.ClientSet, targetNode.Name)
|
||||
nodeStats, err := e2ekubelet.GetStatsSummary(ctx, f.ClientSet, targetNode.Name)
|
||||
duration := time.Since(start)
|
||||
totalDurationMs += duration.Milliseconds()
|
||||
|
||||
@@ -122,7 +122,7 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() {
|
||||
ginkgo.Context("when windows is booted", func() {
|
||||
ginkgo.It("should return bootid within 10 seconds", func(ctx context.Context) {
|
||||
ginkgo.By("Selecting a Windows node")
|
||||
targetNode, err := findWindowsNode(f)
|
||||
targetNode, err := findWindowsNode(ctx, f)
|
||||
framework.ExpectNoError(err, "Error finding Windows node")
|
||||
framework.Logf("Using node: %v", targetNode.Name)
|
||||
|
||||
@@ -138,18 +138,18 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() {
|
||||
ginkgo.It("should return within 10 seconds", func(ctx context.Context) {
|
||||
|
||||
ginkgo.By("Selecting a Windows node")
|
||||
targetNode, err := findWindowsNode(f)
|
||||
targetNode, err := findWindowsNode(ctx, f)
|
||||
framework.ExpectNoError(err, "Error finding Windows node")
|
||||
framework.Logf("Using node: %v", targetNode.Name)
|
||||
|
||||
ginkgo.By("Scheduling 3 pods")
|
||||
powershellImage := imageutils.GetConfig(imageutils.BusyBox)
|
||||
pods := newKubeletStatsTestPods(3, powershellImage, targetNode.Name)
|
||||
e2epod.NewPodClient(f).CreateBatch(pods)
|
||||
e2epod.NewPodClient(f).CreateBatch(ctx, pods)
|
||||
|
||||
ginkgo.By("Waiting up to 3 minutes for pods to be running")
|
||||
timeout := 3 * time.Minute
|
||||
err = e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 3, 0, timeout, make(map[string]string))
|
||||
err = e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, 0, timeout, make(map[string]string))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Getting kubelet stats 1 time")
|
||||
@@ -158,7 +158,7 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() {
|
||||
|
||||
for i := 0; i < iterations; i++ {
|
||||
start := time.Now()
|
||||
nodeStats, err := e2ekubelet.GetStatsSummary(f.ClientSet, targetNode.Name)
|
||||
nodeStats, err := e2ekubelet.GetStatsSummary(ctx, f.ClientSet, targetNode.Name)
|
||||
duration := time.Since(start)
|
||||
totalDurationMs += duration.Milliseconds()
|
||||
|
||||
@@ -206,9 +206,9 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() {
|
||||
})
|
||||
|
||||
// findWindowsNode finds a Windows node that is Ready and Schedulable
|
||||
func findWindowsNode(f *framework.Framework) (v1.Node, error) {
|
||||
func findWindowsNode(ctx context.Context, f *framework.Framework) (v1.Node, error) {
|
||||
selector := labels.Set{"kubernetes.io/os": "windows"}.AsSelector()
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: selector.String()})
|
||||
|
||||
if err != nil {
|
||||
return v1.Node{}, err
|
||||
|
@@ -52,13 +52,13 @@ var _ = SIGDescribe("[Feature:Windows] Memory Limits [Serial] [Slow]", func() {
|
||||
|
||||
ginkgo.Context("Allocatable node memory", func() {
|
||||
ginkgo.It("should be equal to a calculated allocatable memory value", func(ctx context.Context) {
|
||||
checkNodeAllocatableTest(f)
|
||||
checkNodeAllocatableTest(ctx, f)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("attempt to deploy past allocatable memory limits", func() {
|
||||
ginkgo.It("should fail deployments of pods once there isn't enough memory", func(ctx context.Context) {
|
||||
overrideAllocatableMemoryTest(f, framework.TestContext.CloudConfig.NumNodes)
|
||||
overrideAllocatableMemoryTest(ctx, f, framework.TestContext.CloudConfig.NumNodes)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -81,9 +81,9 @@ type nodeMemory struct {
|
||||
|
||||
// runDensityBatchTest runs the density batch pod creation test
|
||||
// checks that a calculated value for NodeAllocatable is equal to the reported value
|
||||
func checkNodeAllocatableTest(f *framework.Framework) {
|
||||
func checkNodeAllocatableTest(ctx context.Context, f *framework.Framework) {
|
||||
|
||||
nodeMem := getNodeMemory(f)
|
||||
nodeMem := getNodeMemory(ctx, f)
|
||||
framework.Logf("nodeMem says: %+v", nodeMem)
|
||||
|
||||
// calculate the allocatable mem based on capacity - reserved amounts
|
||||
@@ -101,9 +101,9 @@ func checkNodeAllocatableTest(f *framework.Framework) {
|
||||
|
||||
// Deploys `allocatablePods + 1` pods, each with a memory limit of `1/allocatablePods` of the total allocatable
|
||||
// memory, then confirms that the last pod failed because of failedScheduling
|
||||
func overrideAllocatableMemoryTest(f *framework.Framework, allocatablePods int) {
|
||||
func overrideAllocatableMemoryTest(ctx context.Context, f *framework.Framework, allocatablePods int) {
|
||||
selector := labels.Set{"kubernetes.io/os": "windows"}.AsSelector()
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -133,7 +133,7 @@ func overrideAllocatableMemoryTest(f *framework.Framework, allocatablePods int)
|
||||
NodeName: node.Name,
|
||||
},
|
||||
}
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
podName := "mem-failure-pod"
|
||||
@@ -158,10 +158,10 @@ func overrideAllocatableMemoryTest(f *framework.Framework, allocatablePods int)
|
||||
},
|
||||
},
|
||||
}
|
||||
failurePod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), failurePod, metav1.CreateOptions{})
|
||||
failurePod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, failurePod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Eventually(func() bool {
|
||||
eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, e := range eventList.Items {
|
||||
// Look for an event that shows FailedScheduling
|
||||
@@ -176,9 +176,9 @@ func overrideAllocatableMemoryTest(f *framework.Framework, allocatablePods int)
|
||||
}
|
||||
|
||||
// getNodeMemory populates a nodeMemory struct with information from the first
|
||||
func getNodeMemory(f *framework.Framework) nodeMemory {
|
||||
func getNodeMemory(ctx context.Context, f *framework.Framework) nodeMemory {
|
||||
selector := labels.Set{"kubernetes.io/os": "windows"}.AsSelector()
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -193,7 +193,7 @@ func getNodeMemory(f *framework.Framework) nodeMemory {
|
||||
|
||||
framework.Logf("Getting configuration details for node %s", nodeName)
|
||||
request := f.ClientSet.CoreV1().RESTClient().Get().Resource("nodes").Name(nodeName).SubResource("proxy").Suffix("configz")
|
||||
rawbytes, err := request.DoRaw(context.Background())
|
||||
rawbytes, err := request.DoRaw(ctx)
|
||||
framework.ExpectNoError(err)
|
||||
kubeletConfig, err := decodeConfigz(rawbytes)
|
||||
framework.ExpectNoError(err)
|
||||
|
@@ -44,7 +44,7 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV
|
||||
ginkgo.It("should run as a reboot process on the host/node", func(ctx context.Context) {
|
||||
|
||||
ginkgo.By("selecting a Windows node")
|
||||
targetNode, err := findWindowsNode(f)
|
||||
targetNode, err := findWindowsNode(ctx, f)
|
||||
framework.ExpectNoError(err, "Error finding Windows node")
|
||||
framework.Logf("Using node: %v", targetNode.Name)
|
||||
|
||||
@@ -74,7 +74,7 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV
|
||||
}
|
||||
agnPod.Spec.Containers[0].Args = []string{"test-webserver"}
|
||||
ginkgo.By("creating a windows pod and waiting for it to be running")
|
||||
agnPod = e2epod.NewPodClient(f).CreateSync(agnPod)
|
||||
agnPod = e2epod.NewPodClient(f).CreateSync(ctx, agnPod)
|
||||
|
||||
// Create Linux pod to ping the windows pod
|
||||
linuxBusyBoxImage := imageutils.GetE2EImage(imageutils.Nginx)
|
||||
@@ -107,16 +107,16 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV
|
||||
},
|
||||
}
|
||||
ginkgo.By("Waiting for the Linux pod to run")
|
||||
nginxPod = e2epod.NewPodClient(f).CreateSync(nginxPod)
|
||||
nginxPod = e2epod.NewPodClient(f).CreateSync(ctx, nginxPod)
|
||||
|
||||
ginkgo.By("checking connectivity to 8.8.8.8 53 (google.com) from Linux")
|
||||
assertConsistentConnectivity(f, nginxPod.ObjectMeta.Name, "linux", linuxCheck("8.8.8.8", 53))
|
||||
assertConsistentConnectivity(ctx, f, nginxPod.ObjectMeta.Name, "linux", linuxCheck("8.8.8.8", 53))
|
||||
|
||||
ginkgo.By("checking connectivity to www.google.com from Windows")
|
||||
assertConsistentConnectivity(f, agnPod.ObjectMeta.Name, "windows", windowsCheck("www.google.com"))
|
||||
assertConsistentConnectivity(ctx, f, agnPod.ObjectMeta.Name, "windows", windowsCheck("www.google.com"))
|
||||
|
||||
ginkgo.By("checking connectivity from Linux to Windows for the first time")
|
||||
assertConsistentConnectivity(f, nginxPod.ObjectMeta.Name, "linux", linuxCheck(agnPod.Status.PodIP, 80))
|
||||
assertConsistentConnectivity(ctx, f, nginxPod.ObjectMeta.Name, "linux", linuxCheck(agnPod.Status.PodIP, 80))
|
||||
|
||||
initialRestartCount := podutil.GetExistingContainerStatus(agnPod.Status.ContainerStatuses, "windows-container").RestartCount
|
||||
|
||||
@@ -156,14 +156,14 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV
|
||||
},
|
||||
}
|
||||
|
||||
e2epod.NewPodClient(f).Create(pod)
|
||||
e2epod.NewPodClient(f).Create(ctx, pod)
|
||||
|
||||
ginkgo.By("Waiting for pod to run")
|
||||
e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute)
|
||||
e2epod.NewPodClient(f).WaitForFinish(ctx, podName, 3*time.Minute)
|
||||
|
||||
ginkgo.By("Then ensuring pod finished running successfully")
|
||||
p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(
|
||||
context.TODO(),
|
||||
ctx,
|
||||
podName,
|
||||
metav1.GetOptions{})
|
||||
|
||||
@@ -185,7 +185,7 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV
|
||||
break FOR
|
||||
}
|
||||
ginkgo.By("Then checking existed agn-test-pod is running on the rebooted host")
|
||||
agnPodOut, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), agnPod.Name, metav1.GetOptions{})
|
||||
agnPodOut, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, agnPod.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
lastRestartCount := podutil.GetExistingContainerStatus(agnPodOut.Status.ContainerStatuses, "windows-container").RestartCount
|
||||
restartCount = int(lastRestartCount - initialRestartCount)
|
||||
@@ -197,10 +197,10 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV
|
||||
ginkgo.By("Checking whether agn-test-pod is rebooted")
|
||||
framework.ExpectEqual(restartCount, 1)
|
||||
|
||||
agnPodOut, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), agnPod.Name, metav1.GetOptions{})
|
||||
agnPodOut, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, agnPod.Name, metav1.GetOptions{})
|
||||
framework.ExpectEqual(agnPodOut.Status.Phase, v1.PodRunning)
|
||||
framework.ExpectNoError(err, "getting pod info after reboot")
|
||||
assertConsistentConnectivity(f, nginxPod.ObjectMeta.Name, "linux", linuxCheck(agnPodOut.Status.PodIP, 80))
|
||||
assertConsistentConnectivity(ctx, f, nginxPod.ObjectMeta.Name, "linux", linuxCheck(agnPodOut.Status.PodIP, 80))
|
||||
|
||||
// create another host process pod to check system boot time
|
||||
checkPod := &v1.Pod{
|
||||
@@ -239,14 +239,14 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV
|
||||
},
|
||||
}
|
||||
|
||||
e2epod.NewPodClient(f).Create(checkPod)
|
||||
e2epod.NewPodClient(f).Create(ctx, checkPod)
|
||||
|
||||
ginkgo.By("Waiting for pod to run")
|
||||
e2epod.NewPodClient(f).WaitForFinish("check-reboot-pod", 3*time.Minute)
|
||||
e2epod.NewPodClient(f).WaitForFinish(ctx, "check-reboot-pod", 3*time.Minute)
|
||||
|
||||
ginkgo.By("Then ensuring pod finished running successfully")
|
||||
p, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(
|
||||
context.TODO(),
|
||||
ctx,
|
||||
"check-reboot-pod",
|
||||
metav1.GetOptions{})
|
||||
|
||||
|
@@ -47,15 +47,15 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() {
|
||||
ginkgo.It("should be able create pods and run containers with a given username", func(ctx context.Context) {
|
||||
ginkgo.By("Creating 2 pods: 1 with the default user, and one with a custom one.")
|
||||
podDefault := runAsUserNamePod(nil)
|
||||
e2eoutput.TestContainerOutput(f, "check default user", podDefault, 0, []string{"ContainerUser"})
|
||||
e2eoutput.TestContainerOutput(ctx, f, "check default user", podDefault, 0, []string{"ContainerUser"})
|
||||
|
||||
podUserName := runAsUserNamePod(toPtr("ContainerAdministrator"))
|
||||
e2eoutput.TestContainerOutput(f, "check set user", podUserName, 0, []string{"ContainerAdministrator"})
|
||||
e2eoutput.TestContainerOutput(ctx, f, "check set user", podUserName, 0, []string{"ContainerAdministrator"})
|
||||
})
|
||||
|
||||
ginkgo.It("should not be able to create pods with unknown usernames at Pod level", func(ctx context.Context) {
|
||||
ginkgo.By("Creating a pod with an invalid username")
|
||||
podInvalid := e2epod.NewPodClient(f).Create(runAsUserNamePod(toPtr("FooLish")))
|
||||
podInvalid := e2epod.NewPodClient(f).Create(ctx, runAsUserNamePod(toPtr("FooLish")))
|
||||
|
||||
failedSandboxEventSelector := fields.Set{
|
||||
"involvedObject.kind": "Pod",
|
||||
@@ -72,8 +72,8 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() {
|
||||
// Not all runtimes use the sandbox information. This means the test needs to check if the pod
|
||||
// sandbox failed or workload pod failed.
|
||||
framework.Logf("Waiting for pod %s to enter the error state.", podInvalid.Name)
|
||||
gomega.Eventually(func() bool {
|
||||
failedSandbox, err := eventOccurred(f.ClientSet, podInvalid.Namespace, failedSandboxEventSelector, hcsschimError)
|
||||
gomega.Eventually(ctx, func(ctx context.Context) bool {
|
||||
failedSandbox, err := eventOccurred(ctx, f.ClientSet, podInvalid.Namespace, failedSandboxEventSelector, hcsschimError)
|
||||
if err != nil {
|
||||
framework.Logf("Error retrieving events for pod. Ignoring...")
|
||||
}
|
||||
@@ -83,7 +83,7 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() {
|
||||
}
|
||||
|
||||
framework.Logf("No Sandbox error found. Looking for failure in workload pods")
|
||||
pod, err := e2epod.NewPodClient(f).Get(context.Background(), podInvalid.Name, metav1.GetOptions{})
|
||||
pod, err := e2epod.NewPodClient(f).Get(ctx, podInvalid.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Error retrieving pod: %s", err)
|
||||
return false
|
||||
@@ -104,12 +104,12 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() {
|
||||
ginkgo.By("Creating a pod with an invalid username at container level and pod running as ContainerUser")
|
||||
p := runAsUserNamePod(toPtr("FooLish"))
|
||||
p.Spec.SecurityContext.WindowsOptions.RunAsUserName = toPtr("ContainerUser")
|
||||
podInvalid := e2epod.NewPodClient(f).Create(p)
|
||||
podInvalid := e2epod.NewPodClient(f).Create(ctx, p)
|
||||
|
||||
framework.Logf("Waiting for pod %s to enter the error state.", podInvalid.Name)
|
||||
framework.ExpectNoError(e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, podInvalid.Name, "", f.Namespace.Name))
|
||||
framework.ExpectNoError(e2epod.WaitForPodTerminatedInNamespace(ctx, f.ClientSet, podInvalid.Name, "", f.Namespace.Name))
|
||||
|
||||
podInvalid, _ = e2epod.NewPodClient(f).Get(context.TODO(), podInvalid.Name, metav1.GetOptions{})
|
||||
podInvalid, _ = e2epod.NewPodClient(f).Get(ctx, podInvalid.Name, metav1.GetOptions{})
|
||||
podTerminatedReason := testutils.TerminatedContainers(podInvalid)[runAsUserNameContainerName]
|
||||
if podTerminatedReason != "ContainerCannotRun" && podTerminatedReason != "StartError" {
|
||||
framework.Failf("The container terminated reason was supposed to be: 'ContainerCannotRun' or 'StartError', not: '%q'", podTerminatedReason)
|
||||
@@ -127,8 +127,8 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() {
|
||||
Command: []string{"cmd", "/S", "/C", "echo %username%"},
|
||||
})
|
||||
|
||||
e2eoutput.TestContainerOutput(f, "check overridden username", pod, 0, []string{"ContainerUser"})
|
||||
e2eoutput.TestContainerOutput(f, "check pod SecurityContext username", pod, 1, []string{"ContainerAdministrator"})
|
||||
e2eoutput.TestContainerOutput(ctx, f, "check overridden username", pod, 0, []string{"ContainerUser"})
|
||||
e2eoutput.TestContainerOutput(ctx, f, "check pod SecurityContext username", pod, 1, []string{"ContainerAdministrator"})
|
||||
})
|
||||
|
||||
ginkgo.It("should ignore Linux Specific SecurityContext if set", func(ctx context.Context) {
|
||||
@@ -147,11 +147,11 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() {
|
||||
SELinuxOptions: &v1.SELinuxOptions{Level: "s0:c24,c9"},
|
||||
WindowsOptions: &v1.WindowsSecurityContextOptions{RunAsUserName: &containerUserName}}
|
||||
windowsPodWithSELinux.Spec.Tolerations = []v1.Toleration{{Key: "os", Value: "Windows"}}
|
||||
windowsPodWithSELinux, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(),
|
||||
windowsPodWithSELinux, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx,
|
||||
windowsPodWithSELinux, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Created pod %v", windowsPodWithSELinux)
|
||||
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, windowsPodWithSELinux.Name,
|
||||
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, windowsPodWithSELinux.Name,
|
||||
f.Namespace.Name), "failed to wait for pod %s to be running", windowsPodWithSELinux.Name)
|
||||
})
|
||||
|
||||
@@ -161,11 +161,11 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() {
|
||||
p := runAsUserNamePod(toPtr("ContainerAdministrator"))
|
||||
p.Spec.SecurityContext.RunAsNonRoot = &trueVar
|
||||
|
||||
podInvalid, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), p, metav1.CreateOptions{})
|
||||
podInvalid, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, p, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Error creating pod")
|
||||
|
||||
ginkgo.By("Waiting for pod to finish")
|
||||
event, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(podInvalid)
|
||||
event, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(ctx, podInvalid)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNotEqual(event, nil, "event should not be empty")
|
||||
framework.Logf("Got event: %v", event)
|
||||
@@ -179,11 +179,11 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() {
|
||||
p := runAsUserNamePod(toPtr("CONTAINERADMINISTRATOR"))
|
||||
p.Spec.SecurityContext.RunAsNonRoot = &trueVar
|
||||
|
||||
podInvalid, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), p, metav1.CreateOptions{})
|
||||
podInvalid, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, p, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Error creating pod")
|
||||
|
||||
ginkgo.By("Waiting for pod to finish")
|
||||
event, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(podInvalid)
|
||||
event, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(ctx, podInvalid)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNotEqual(event, nil, "event should not be empty")
|
||||
framework.Logf("Got event: %v", event)
|
||||
@@ -226,10 +226,10 @@ func toPtr(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
||||
func eventOccurred(c clientset.Interface, namespace, eventSelector, msg string) (bool, error) {
|
||||
func eventOccurred(ctx context.Context, c clientset.Interface, namespace, eventSelector, msg string) (bool, error) {
|
||||
options := metav1.ListOptions{FieldSelector: eventSelector}
|
||||
|
||||
events, err := c.CoreV1().Events(namespace).List(context.TODO(), options)
|
||||
events, err := c.CoreV1().Events(namespace).List(ctx, options)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("got error while getting events: %v", err)
|
||||
}
|
||||
|
@@ -51,11 +51,11 @@ var _ = SIGDescribe("Services", func() {
|
||||
ns := f.Namespace.Name
|
||||
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
nodeIP, err := e2enode.PickIP(jig.Client)
|
||||
nodeIP, err := e2enode.PickIP(ctx, jig.Client)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
|
||||
svc, err := jig.CreateTCPService(func(svc *v1.Service) {
|
||||
svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeNodePort
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -69,20 +69,20 @@ var _ = SIGDescribe("Services", func() {
|
||||
"kubernetes.io/os": "windows",
|
||||
}
|
||||
}
|
||||
_, err = jig.Run(windowsNodeSelectorTweak)
|
||||
_, err = jig.Run(ctx, windowsNodeSelectorTweak)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
//using hybrid_network methods
|
||||
ginkgo.By("creating Windows testing Pod")
|
||||
testPod := createTestPod(f, windowsBusyBoximage, windowsOS)
|
||||
testPod = e2epod.NewPodClient(f).CreateSync(testPod)
|
||||
testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod)
|
||||
|
||||
ginkgo.By("verifying that pod has the correct nodeSelector")
|
||||
// Admission controllers may sometimes do the wrong thing
|
||||
framework.ExpectEqual(testPod.Spec.NodeSelector["kubernetes.io/os"], "windows")
|
||||
|
||||
ginkgo.By(fmt.Sprintf("checking connectivity Pod to curl http://%s:%d", nodeIP, nodePort))
|
||||
assertConsistentConnectivity(f, testPod.ObjectMeta.Name, windowsOS, windowsCheck(fmt.Sprintf("http://%s", net.JoinHostPort(nodeIP, strconv.Itoa(nodePort)))))
|
||||
assertConsistentConnectivity(ctx, f, testPod.ObjectMeta.Name, windowsOS, windowsCheck(fmt.Sprintf("http://%s", net.JoinHostPort(nodeIP, strconv.Itoa(nodePort)))))
|
||||
|
||||
})
|
||||
|
||||
|
@@ -69,26 +69,26 @@ var _ = SIGDescribe("[Feature:Windows] Windows volume mounts ", func() {
|
||||
ginkgo.It("container should have readOnly permissions on emptyDir", func(ctx context.Context) {
|
||||
|
||||
ginkgo.By("creating a container with readOnly permissions on emptyDir volume")
|
||||
doReadOnlyTest(f, emptyDirSource, emptyDirVolumePath)
|
||||
doReadOnlyTest(ctx, f, emptyDirSource, emptyDirVolumePath)
|
||||
|
||||
ginkgo.By("creating two containers, one with readOnly permissions the other with read-write permissions on emptyDir volume")
|
||||
doReadWriteReadOnlyTest(f, emptyDirSource, emptyDirVolumePath)
|
||||
doReadWriteReadOnlyTest(ctx, f, emptyDirSource, emptyDirVolumePath)
|
||||
})
|
||||
|
||||
ginkgo.It("container should have readOnly permissions on hostMapPath", func(ctx context.Context) {
|
||||
|
||||
ginkgo.By("creating a container with readOnly permissions on hostMap volume")
|
||||
doReadOnlyTest(f, hostMapSource, hostMapPath)
|
||||
doReadOnlyTest(ctx, f, hostMapSource, hostMapPath)
|
||||
|
||||
ginkgo.By("creating two containers, one with readOnly permissions the other with read-write permissions on hostMap volume")
|
||||
doReadWriteReadOnlyTest(f, hostMapSource, hostMapPath)
|
||||
doReadWriteReadOnlyTest(ctx, f, hostMapSource, hostMapPath)
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func doReadOnlyTest(f *framework.Framework, source v1.VolumeSource, volumePath string) {
|
||||
func doReadOnlyTest(ctx context.Context, f *framework.Framework, source v1.VolumeSource, volumePath string) {
|
||||
var (
|
||||
filePath = volumePath + "\\test-file.txt"
|
||||
podName = "pod-" + string(uuid.NewUUID())
|
||||
@@ -98,7 +98,7 @@ func doReadOnlyTest(f *framework.Framework, source v1.VolumeSource, volumePath s
|
||||
"kubernetes.io/os": "windows",
|
||||
}
|
||||
|
||||
pod = e2epod.NewPodClient(f).CreateSync(pod)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
|
||||
ginkgo.By("verifying that pod has the correct nodeSelector")
|
||||
framework.ExpectEqual(pod.Spec.NodeSelector["kubernetes.io/os"], "windows")
|
||||
|
||||
@@ -109,7 +109,7 @@ func doReadOnlyTest(f *framework.Framework, source v1.VolumeSource, volumePath s
|
||||
framework.ExpectEqual(stderr, "Access is denied.")
|
||||
}
|
||||
|
||||
func doReadWriteReadOnlyTest(f *framework.Framework, source v1.VolumeSource, volumePath string) {
|
||||
func doReadWriteReadOnlyTest(ctx context.Context, f *framework.Framework, source v1.VolumeSource, volumePath string) {
|
||||
var (
|
||||
filePath = volumePath + "\\test-file" + string(uuid.NewUUID())
|
||||
podName = "pod-" + string(uuid.NewUUID())
|
||||
@@ -132,7 +132,7 @@ func doReadWriteReadOnlyTest(f *framework.Framework, source v1.VolumeSource, vol
|
||||
}
|
||||
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, rwcontainer)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(pod)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
|
||||
|
||||
ginkgo.By("verifying that pod has the correct nodeSelector")
|
||||
framework.ExpectEqual(pod.Spec.NodeSelector["kubernetes.io/os"], "windows")
|
||||
|
Reference in New Issue
Block a user