generated: run refactor

This commit is contained in:
Mike Danese
2020-02-07 18:16:47 -08:00
parent 7e88d8db66
commit 3aa59f7f30
697 changed files with 4380 additions and 3806 deletions

View File

@@ -17,6 +17,7 @@ limitations under the License.
package upgrades
import (
"context"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
@@ -86,7 +87,7 @@ func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) {
func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {
ginkgo.By("Verifying an AppArmor profile is continuously enforced for a pod")
pod, err := f.PodClient().Get(t.pod.Name, metav1.GetOptions{})
pod, err := f.PodClient().Get(context.TODO(), t.pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Should be able to get pod")
framework.ExpectEqual(pod.Status.Phase, v1.PodRunning, "Pod should stay running")
gomega.Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(gomega.BeNil(), "Container should be running")
@@ -100,7 +101,7 @@ func (t *AppArmorUpgradeTest) verifyNewPodSucceeds(f *framework.Framework) {
func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(f *framework.Framework) {
ginkgo.By("Verifying nodes are AppArmor enabled")
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to list nodes")
for _, node := range nodes.Items {
gomega.Expect(node.Status.Conditions).To(gstruct.MatchElements(conditionType, gstruct.IgnoreExtras, gstruct.Elements{

View File

@@ -17,6 +17,7 @@ limitations under the License.
package upgrades
import (
"context"
"github.com/onsi/ginkgo"
appsv1 "k8s.io/api/apps/v1"
@@ -79,7 +80,7 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) {
ginkgo.By("Creating a DaemonSet")
var err error
if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(t.daemonSet); err != nil {
if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(context.TODO(), t.daemonSet); err != nil {
framework.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err)
}
@@ -126,7 +127,7 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework)
}
func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector map[string]string) (bool, error) {
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
}
@@ -146,7 +147,7 @@ func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector m
func checkDaemonPodOnNodes(f *framework.Framework, namespace string, labelSet map[string]string, nodeNames []string) (bool, error) {
selector := labels.Set(labelSet).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := f.ClientSet.CoreV1().Pods(namespace).List(options)
podList, err := f.ClientSet.CoreV1().Pods(namespace).List(context.TODO(), options)
if err != nil {
return false, err
}
@@ -175,7 +176,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, namespace string, labelSet ma
}
func checkDaemonStatus(f *framework.Framework, namespace string, dsName string) (bool, error) {
ds, err := f.ClientSet.AppsV1().DaemonSets(namespace).Get(dsName, metav1.GetOptions{})
ds, err := f.ClientSet.AppsV1().DaemonSets(namespace).Get(context.TODO(), dsName, metav1.GetOptions{})
if err != nil {
return false, err
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package upgrades
import (
"context"
"fmt"
"time"
@@ -66,7 +67,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
ginkgo.By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns))
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, appsv1.RollingUpdateDeploymentStrategyType)
deployment, err := deploymentClient.Create(d)
deployment, err := deploymentClient.Create(context.TODO(), d)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
@@ -75,7 +76,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
ginkgo.By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName))
rsSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
framework.ExpectNoError(err)
rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
rsList, err := rsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: rsSelector.String()})
framework.ExpectNoError(err)
rss := rsList.Items
if len(rss) != 1 {
@@ -97,7 +98,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment))
ginkgo.By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName))
rsList, err = rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
rsList, err = rsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: rsSelector.String()})
framework.ExpectNoError(err)
rss = rsList.Items
if len(rss) != 2 {
@@ -131,7 +132,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
deploymentClient := c.AppsV1().Deployments(ns)
rsClient := c.AppsV1().ReplicaSets(ns)
deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{})
deployment, err := deploymentClient.Get(context.TODO(), deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Checking UID to verify deployment %q survives upgrade", deploymentName))
@@ -140,7 +141,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
ginkgo.By(fmt.Sprintf("Verifying deployment %q does not create new replicasets", deploymentName))
rsSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
framework.ExpectNoError(err)
rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
rsList, err := rsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: rsSelector.String()})
framework.ExpectNoError(err)
rss := rsList.Items
if len(rss) != 2 {
@@ -181,7 +182,7 @@ func (t *DeploymentUpgradeTest) Teardown(f *framework.Framework) {
// waitForDeploymentRevision waits for becoming the target revision of a delopyment.
func waitForDeploymentRevision(c clientset.Interface, d *appsv1.Deployment, targetRevision string) error {
err := wait.PollImmediate(poll, pollLongTimeout, func() (bool, error) {
deployment, err := c.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(d.Namespace).Get(context.TODO(), d.Name, metav1.GetOptions{})
if err != nil {
return false, err
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package upgrades
import (
"context"
"fmt"
"strings"
@@ -75,7 +76,7 @@ func (t *JobUpgradeTest) Teardown(f *framework.Framework) {
func ensureAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{e2ejob.JobSelectorKey: jobName}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(options)
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
if err != nil {
return err
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package upgrades
import (
"context"
"fmt"
"time"
@@ -58,7 +59,7 @@ func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) {
ginkgo.By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsName, ns))
replicaSet := newReplicaSet(rsName, ns, 1, map[string]string{"test": "upgrade"}, "nginx", nginxImage)
rs, err := c.AppsV1().ReplicaSets(ns).Create(replicaSet)
rs, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), replicaSet)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName))
@@ -79,7 +80,7 @@ func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{
// Verify the RS is the same (survives) after the upgrade
ginkgo.By(fmt.Sprintf("Checking UID to verify replicaset %s survives upgrade", rsName))
upgradedRS, err := rsClient.Get(rsName, metav1.GetOptions{})
upgradedRS, err := rsClient.Get(context.TODO(), rsName, metav1.GetOptions{})
framework.ExpectNoError(err)
if upgradedRS.UID != r.UID {
framework.ExpectNoError(fmt.Errorf("expected same replicaset UID: %v got: %v", r.UID, upgradedRS.UID))

View File

@@ -17,6 +17,7 @@ limitations under the License.
package upgrades
import (
"context"
"github.com/onsi/ginkgo"
appsv1 "k8s.io/api/apps/v1"
@@ -84,12 +85,12 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) {
e2esset.PauseNewPods(t.set)
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
_, err := f.ClientSet.CoreV1().Services(ns).Create(t.service)
_, err := f.ClientSet.CoreV1().Services(ns).Create(context.TODO(), t.service)
framework.ExpectNoError(err)
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
*(t.set.Spec.Replicas) = 3
_, err = f.ClientSet.AppsV1().StatefulSets(ns).Create(t.set)
_, err = f.ClientSet.AppsV1().StatefulSets(ns).Create(context.TODO(), t.set)
framework.ExpectNoError(err)
ginkgo.By("Saturating stateful set " + t.set.Name)

View File

@@ -17,6 +17,7 @@ limitations under the License.
package upgrades
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
@@ -150,7 +151,7 @@ func (t *CassandraUpgradeTest) addUser(name string) error {
// getServiceIP is a helper method to extract the Ingress IP from the service.
func (t *CassandraUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), svcName, metav1.GetOptions{})
framework.ExpectNoError(err)
ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 {

View File

@@ -17,6 +17,7 @@ limitations under the License.
package upgrades
import (
"context"
"fmt"
"k8s.io/api/core/v1"
@@ -57,7 +58,7 @@ func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) {
ginkgo.By("Creating a ConfigMap")
var err error
if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(t.configMap); err != nil {
if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(context.TODO(), t.configMap); err != nil {
framework.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package upgrades
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
@@ -142,7 +143,7 @@ func (t *EtcdUpgradeTest) addUser(name string) error {
}
func (t *EtcdUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), svcName, metav1.GetOptions{})
framework.ExpectNoError(err)
ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 {

View File

@@ -17,6 +17,7 @@ limitations under the License.
package upgrades
import (
"context"
"fmt"
"time"
@@ -230,11 +231,11 @@ func waitForKubeProxyDaemonSetDisappear(c clientset.Interface) error {
func getKubeProxyStaticPods(c clientset.Interface) (*v1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{clusterComponentKey: kubeProxyLabelName}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
return c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts)
return c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), listOpts)
}
func getKubeProxyDaemonSet(c clientset.Interface) (*appsv1.DaemonSetList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{clusterAddonLabelKey: kubeProxyLabelName}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
return c.AppsV1().DaemonSets(metav1.NamespaceSystem).List(listOpts)
return c.AppsV1().DaemonSets(metav1.NamespaceSystem).List(context.TODO(), listOpts)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package upgrades
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
@@ -65,7 +66,7 @@ func mysqlKubectlCreate(ns, file string) {
}
func (t *MySQLUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), svcName, metav1.GetOptions{})
framework.ExpectNoError(err)
ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 {

View File

@@ -17,6 +17,7 @@ limitations under the License.
package upgrades
import (
"context"
"fmt"
"k8s.io/api/core/v1"
@@ -55,7 +56,7 @@ func (t *SecretUpgradeTest) Setup(f *framework.Framework) {
ginkgo.By("Creating a secret")
var err error
if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(t.secret); err != nil {
if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), t.secret); err != nil {
framework.Failf("unable to create test secret %s: %v", t.secret.Name, err)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package storage
import (
"context"
"fmt"
"time"
@@ -87,10 +88,10 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) {
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, ns, t.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
t.pvc, err = cs.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Get(t.pvc.Name, metav1.GetOptions{})
t.pvc, err = cs.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Get(context.TODO(), t.pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
t.pv, err = cs.CoreV1().PersistentVolumes().Get(t.pvc.Spec.VolumeName, metav1.GetOptions{})
t.pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), t.pvc.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err)
ginkgo.By("Consuming the PVC before downgrade")
@@ -120,7 +121,7 @@ func (t *VolumeModeDowngradeTest) Teardown(f *framework.Framework) {
framework.ExpectNoError(e2epod.DeletePodWithWait(f.ClientSet, t.pod))
ginkgo.By("Deleting the PVC")
framework.ExpectNoError(f.ClientSet.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Delete(t.pvc.Name, nil))
framework.ExpectNoError(f.ClientSet.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Delete(context.TODO(), t.pvc.Name, nil))
ginkgo.By("Waiting for the PV to be deleted")
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(f.ClientSet, t.pv.Name, 5*time.Second, 20*time.Minute))

View File

@@ -17,6 +17,7 @@ limitations under the License.
package upgrades
import (
"context"
"fmt"
"github.com/onsi/ginkgo"
@@ -53,13 +54,13 @@ func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, u
switch upgrade {
case MasterUpgrade, ClusterUpgrade:
ginkgo.By("Checking the safe sysctl pod keeps running on master upgrade")
pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{})
pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(context.TODO(), t.validPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(pod.Status.Phase, v1.PodRunning)
}
ginkgo.By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade")
pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(t.invalidPod.Name, metav1.GetOptions{})
pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(context.TODO(), t.invalidPod.Name, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
framework.ExpectNoError(err)
}