add import-alias for k8s.io/api/apps/v1

This commit is contained in:
Aaron Crickenberger 2019-06-20 11:39:40 -07:00
parent 1aa22b8e22
commit 0aae740ede
31 changed files with 308 additions and 307 deletions

View File

@ -1,4 +1,5 @@
{
"k8s.io/api/admissionregistration/v1beta1": "admissionregistrationv1beta1",
"k8s.io/api/admission/v1beta1": "admissionv1beta1"
"k8s.io/api/admission/v1beta1": "admissionv1beta1",
"k8s.io/api/apps/v1": "appsv1"
}

View File

@ -24,7 +24,7 @@ import (
"strings"
"time"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
@ -238,18 +238,18 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
},
},
}
d := &apps.Deployment{
d := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Labels: podLabels,
},
Spec: apps.DeploymentSpec{
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: podLabels,
},
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{

View File

@ -22,7 +22,7 @@ import (
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
@ -278,18 +278,18 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
Image: image,
},
}
d := &apps.Deployment{
d := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentCRDName,
Labels: podLabels,
},
Spec: apps.DeploymentSpec{
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: podLabels,
},
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{

View File

@ -21,7 +21,7 @@ import (
"sync/atomic"
"time"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
"k8s.io/api/core/v1"
@ -113,17 +113,17 @@ func getPodTemplateSpec(labels map[string]string) v1.PodTemplateSpec {
}
}
func newOwnerDeployment(f *framework.Framework, deploymentName string, labels map[string]string) *apps.Deployment {
func newOwnerDeployment(f *framework.Framework, deploymentName string, labels map[string]string) *appsv1.Deployment {
replicas := int32(2)
return &apps.Deployment{
return &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
},
Spec: apps.DeploymentSpec{
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: labels},
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: getPodTemplateSpec(labels),
},

View File

@ -19,7 +19,7 @@ package apimachinery
import (
"fmt"
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
@ -350,18 +350,18 @@ func deployWebhookAndService(f *framework.Framework, image string, context *cert
Image: image,
},
}
d := &apps.Deployment{
d := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Labels: podLabels,
},
Spec: apps.DeploymentSpec{
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: podLabels,
},
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{

View File

@ -22,7 +22,7 @@ import (
"strings"
"time"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -280,7 +280,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
e2elog.Logf("Creating simple daemon set %s", dsName)
ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.OnDeleteDaemonSetStrategyType}
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.OnDeleteDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
framework.ExpectNoError(err)
@ -293,7 +293,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 1)
first := curHistory(listDaemonHistories(c, ns, label), ds)
firstHash := first.Labels[apps.DefaultDaemonSetUniqueLabelKey]
firstHash := first.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
gomega.Expect(first.Revision).To(gomega.Equal(int64(1)))
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash)
@ -316,7 +316,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
waitForHistoryCreated(c, ns, label, 2)
cur := curHistory(listDaemonHistories(c, ns, label), ds)
gomega.Expect(cur.Revision).To(gomega.Equal(int64(2)))
gomega.Expect(cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]).NotTo(gomega.Equal(firstHash))
gomega.Expect(cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]).NotTo(gomega.Equal(firstHash))
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash)
})
@ -329,7 +329,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
e2elog.Logf("Creating simple daemon set %s", dsName)
ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
framework.ExpectNoError(err)
@ -342,7 +342,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 1)
cur := curHistory(listDaemonHistories(c, ns, label), ds)
hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
hash := cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
gomega.Expect(cur.Revision).To(gomega.Equal(int64(1)))
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
@ -371,7 +371,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 2)
cur = curHistory(listDaemonHistories(c, ns, label), ds)
hash = cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
hash = cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
gomega.Expect(cur.Revision).To(gomega.Equal(int64(2)))
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
})
@ -387,7 +387,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
e2elog.Logf("Create a RollingUpdate DaemonSet")
label := map[string]string{daemonsetNameLabel: dsName}
ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
framework.ExpectNoError(err)
@ -398,7 +398,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
e2elog.Logf("Update the DaemonSet to trigger a rollout")
// We use a nonexistent image here, so that we make sure it won't finish
newImage := "foo:non-existent"
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
framework.ExpectNoError(err)
@ -430,7 +430,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
gomega.Expect(len(newPods)).NotTo(gomega.Equal(0))
e2elog.Logf("Roll back the DaemonSet before rollout is complete")
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) {
update.Spec.Template.Spec.Containers[0].Image = image
})
framework.ExpectNoError(err)
@ -456,12 +456,12 @@ func getDaemonSetImagePatch(containerName, containerImage string) string {
return fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","image":"%s"}]}}}}`, containerName, containerImage)
}
func newDaemonSet(dsName, image string, label map[string]string) *apps.DaemonSet {
return &apps.DaemonSet{
func newDaemonSet(dsName, image string, label map[string]string) *appsv1.DaemonSet {
return &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: dsName,
},
Spec: apps.DaemonSetSpec{
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: label,
},
@ -576,7 +576,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
return newNode, nil
}
func checkDaemonPodOnNodes(f *framework.Framework, ds *apps.DaemonSet, nodeNames []string) func() (bool, error) {
func checkDaemonPodOnNodes(f *framework.Framework, ds *appsv1.DaemonSet, nodeNames []string) func() (bool, error) {
return func() (bool, error) {
podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
@ -615,14 +615,14 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *apps.DaemonSet, nodeNames
}
}
func checkRunningOnAllNodes(f *framework.Framework, ds *apps.DaemonSet) func() (bool, error) {
func checkRunningOnAllNodes(f *framework.Framework, ds *appsv1.DaemonSet) func() (bool, error) {
return func() (bool, error) {
nodeNames := schedulableNodes(f.ClientSet, ds)
return checkDaemonPodOnNodes(f, ds, nodeNames)()
}
}
func schedulableNodes(c clientset.Interface, ds *apps.DaemonSet) []string {
func schedulableNodes(c clientset.Interface, ds *appsv1.DaemonSet) []string {
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err)
nodeNames := make([]string, 0)
@ -649,7 +649,7 @@ func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]st
}
// canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node
func canScheduleOnNode(node v1.Node, ds *apps.DaemonSet) bool {
func canScheduleOnNode(node v1.Node, ds *appsv1.DaemonSet) bool {
newPod := daemon.NewPod(ds, node.Name)
nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(&node)
@ -661,7 +661,7 @@ func canScheduleOnNode(node v1.Node, ds *apps.DaemonSet) bool {
return fit
}
func checkRunningOnNoNodes(f *framework.Framework, ds *apps.DaemonSet) func() (bool, error) {
func checkRunningOnNoNodes(f *framework.Framework, ds *appsv1.DaemonSet) func() (bool, error) {
return checkDaemonPodOnNodes(f, ds, make([]string, 0))
}
@ -677,7 +677,7 @@ func checkDaemonStatus(f *framework.Framework, dsName string) error {
return nil
}
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonSet, image string, maxUnavailable int) func() (bool, error) {
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *appsv1.DaemonSet, image string, maxUnavailable int) func() (bool, error) {
return func() (bool, error) {
podList, err := c.CoreV1().Pods(ds.Namespace).List(metav1.ListOptions{})
if err != nil {
@ -718,7 +718,7 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonS
func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) {
for _, pod := range podList.Items {
podHash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]
podHash := pod.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
gomega.Expect(len(podHash)).To(gomega.BeNumerically(">", 0))
if len(hash) > 0 {
gomega.Expect(podHash).To(gomega.Equal(hash))
@ -744,7 +744,7 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st
framework.ExpectNoError(err, "error waiting for controllerrevisions to be created")
}
func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *apps.ControllerRevisionList {
func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *appsv1.ControllerRevisionList {
selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
historyList, err := c.AppsV1().ControllerRevisions(ns).List(options)
@ -753,13 +753,13 @@ func listDaemonHistories(c clientset.Interface, ns string, label map[string]stri
return historyList
}
func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *apps.ControllerRevision {
var curHistory *apps.ControllerRevision
func curHistory(historyList *appsv1.ControllerRevisionList, ds *appsv1.DaemonSet) *appsv1.ControllerRevision {
var curHistory *appsv1.ControllerRevision
foundCurHistories := 0
for i := range historyList.Items {
history := &historyList.Items[i]
// Every history should have the hash label
gomega.Expect(len(history.Labels[apps.DefaultDaemonSetUniqueLabelKey])).To(gomega.BeNumerically(">", 0))
gomega.Expect(len(history.Labels[appsv1.DefaultDaemonSetUniqueLabelKey])).To(gomega.BeNumerically(">", 0))
match, err := daemon.Match(ds, history)
framework.ExpectNoError(err)
if match {

View File

@ -25,7 +25,7 @@ import (
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
@ -53,7 +53,7 @@ const (
)
var (
nilRs *apps.ReplicaSet
nilRs *appsv1.ReplicaSet
)
var _ = SIGDescribe("Deployment", func() {
@ -234,7 +234,7 @@ func testDeleteDeployment(f *framework.Framework) {
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(1)
e2elog.Logf("Creating simple deployment %s", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
deploy, err := c.AppsV1().Deployments(ns).Create(d)
framework.ExpectNoError(err)
@ -281,7 +281,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "test-rolling-update-deployment"
e2elog.Logf("Creating deployment %q", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(d)
framework.ExpectNoError(err)
@ -310,7 +310,7 @@ func testRecreateDeployment(f *framework.Framework) {
// Create a deployment that brings up redis pods.
deploymentName := "test-recreate-deployment"
e2elog.Logf("Creating deployment %q", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, apps.RecreateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, appsv1.RecreateDeploymentStrategyType)
deployment, err := c.AppsV1().Deployments(ns).Create(d)
framework.ExpectNoError(err)
@ -325,7 +325,7 @@ func testRecreateDeployment(f *framework.Framework) {
// Update deployment to delete redis pods and bring up nginx pods.
e2elog.Logf("Triggering a new rollout for deployment %q", deploymentName)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = NginxImageName
update.Spec.Template.Spec.Containers[0].Image = NginxImage
})
@ -396,7 +396,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
}
}
}()
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
_, err = c.AppsV1().Deployments(ns).Create(d)
framework.ExpectNoError(err)
@ -436,10 +436,10 @@ func testRolloverDeployment(f *framework.Framework) {
deploymentName, deploymentImageName := "test-rollover-deployment", "redis-slave"
deploymentReplicas := int32(1)
deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent"
deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType
deploymentStrategyType := appsv1.RollingUpdateDeploymentStrategyType
e2elog.Logf("Creating deployment %q", deploymentName)
newDeployment := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
newDeployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{
newDeployment.Spec.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{
MaxUnavailable: intOrStrP(0),
MaxSurge: intOrStrP(1),
}
@ -469,7 +469,7 @@ func testRolloverDeployment(f *framework.Framework) {
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods.
e2elog.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
})
@ -499,7 +499,7 @@ func testRolloverDeployment(f *framework.Framework) {
ensureReplicas(newRS, int32(0))
}
func ensureReplicas(rs *apps.ReplicaSet, replicas int32) {
func ensureReplicas(rs *appsv1.ReplicaSet, replicas int32) {
gomega.Expect(*rs.Spec.Replicas).Should(gomega.Equal(replicas))
gomega.Expect(rs.Status.Replicas).Should(gomega.Equal(replicas))
}
@ -519,7 +519,7 @@ func testRollbackDeployment(f *framework.Framework) {
deploymentName, deploymentImageName := "test-rollback-deployment", NginxImageName
deploymentReplicas := int32(1)
deploymentImage := NginxImage
deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType
deploymentStrategyType := appsv1.RollingUpdateDeploymentStrategyType
e2elog.Logf("Creating deployment %s", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
createAnnotation := map[string]string{"action": "create", "author": "node"}
@ -542,7 +542,7 @@ func testRollbackDeployment(f *framework.Framework) {
updatedDeploymentImage := RedisImage
updatedDeploymentImageName := RedisImageName
updateAnnotation := map[string]string{"action": "update", "log": "I need to update it"}
deployment, err := e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
deployment, err := e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
update.Annotations = updateAnnotation
@ -645,7 +645,7 @@ func testRollbackDeployment(f *framework.Framework) {
framework.ExpectNoError(err)
}
func randomScale(d *apps.Deployment, i int) {
func randomScale(d *appsv1.Deployment, i int) {
switch r := rand.Float32(); {
case r < 0.3:
e2elog.Logf("%02d: scaling up", i)
@ -670,7 +670,7 @@ func testIterativeDeployments(f *framework.Framework) {
// Create a nginx deployment.
deploymentName := "nginx"
thirty := int32(30)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Spec.ProgressDeadlineSeconds = &thirty
d.Spec.RevisionHistoryLimit = &two
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
@ -688,7 +688,7 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.2:
// trigger a new deployment
e2elog.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
randomScale(update, i)
@ -698,18 +698,18 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.4:
// rollback to the previous version
e2elog.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
if update.Annotations == nil {
update.Annotations = make(map[string]string)
}
update.Annotations[apps.DeprecatedRollbackTo] = "0"
update.Annotations[appsv1.DeprecatedRollbackTo] = "0"
})
framework.ExpectNoError(err)
case n < 0.6:
// just scaling
e2elog.Logf("%02d: scaling deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
randomScale(update, i)
})
framework.ExpectNoError(err)
@ -718,14 +718,14 @@ func testIterativeDeployments(f *framework.Framework) {
// toggling the deployment
if deployment.Spec.Paused {
e2elog.Logf("%02d: pausing deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Paused = true
randomScale(update, i)
})
framework.ExpectNoError(err)
} else {
e2elog.Logf("%02d: resuming deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Paused = false
randomScale(update, i)
})
@ -762,7 +762,7 @@ func testIterativeDeployments(f *framework.Framework) {
deployment, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
if deployment.Spec.Paused {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Paused = false
})
}
@ -776,7 +776,7 @@ func testIterativeDeployments(f *framework.Framework) {
framework.ExpectNoError(err)
e2elog.Logf("Checking deployment %q for a complete condition", deploymentName)
err = e2edeploy.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)
err = e2edeploy.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, appsv1.DeploymentProgressing)
framework.ExpectNoError(err)
}
@ -788,7 +788,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
e2elog.Logf("Creating Deployment %q", deploymentName)
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(1)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(d)
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
@ -815,7 +815,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
deploymentName = "test-adopt-deployment"
e2elog.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err = c.AppsV1().Deployments(ns).Create(d)
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
@ -845,8 +845,8 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Create a nginx deployment.
deploymentName := "nginx-deployment"
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d.Spec.Strategy.RollingUpdate = new(apps.RollingUpdateDeployment)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Spec.Strategy.RollingUpdate = new(appsv1.RollingUpdateDeployment)
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
@ -873,7 +873,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Update the deployment with a non-existent image so that the new replica set
// will be blocked to simulate a partial rollout.
e2elog.Logf("Updating deployment %q with a non-existent image", deploymentName)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
})
framework.ExpectNoError(err)
@ -938,7 +938,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Scale the deployment to 30 replicas.
newReplicas = int32(30)
e2elog.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Replicas = &newReplicas
})
framework.ExpectNoError(err)
@ -986,7 +986,7 @@ func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label m
}
}
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *apps.ReplicaSetList {
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *appsv1.ReplicaSetList {
selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
rsList, err := c.AppsV1().ReplicaSets(ns).List(options)
@ -995,7 +995,7 @@ func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[strin
return rsList
}
func orphanDeploymentReplicaSets(c clientset.Interface, d *apps.Deployment) error {
func orphanDeploymentReplicaSets(c clientset.Interface, d *appsv1.Deployment) error {
trueVar := true
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID))

View File

@ -23,7 +23,7 @@ import (
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -340,12 +340,12 @@ func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclu
}
}
rs := &apps.ReplicaSet{
rs := &appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: "rs",
Namespace: ns,
},
Spec: apps.ReplicaSetSpec{
Spec: appsv1.ReplicaSetSpec{
Replicas: &size,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},

View File

@ -20,7 +20,7 @@ import (
"fmt"
"time"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
@ -38,14 +38,14 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
)
func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *apps.ReplicaSet {
func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *appsv1.ReplicaSet {
zero := int64(0)
return &apps.ReplicaSet{
return &appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: rsName,
Labels: rsPodLabels,
},
Spec: apps.ReplicaSetSpec{
Spec: appsv1.ReplicaSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: rsPodLabels,
},
@ -220,7 +220,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
}
conditions = rs.Status.Conditions
cond := replicaset.GetCondition(rs.Status, apps.ReplicaSetReplicaFailure)
cond := replicaset.GetCondition(rs.Status, appsv1.ReplicaSetReplicaFailure)
return cond != nil, nil
})
@ -230,7 +230,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name))
rs, err = replicasetutil.UpdateReplicaSetWithRetries(c, namespace, name, func(update *apps.ReplicaSet) {
rs, err = replicasetutil.UpdateReplicaSetWithRetries(c, namespace, name, func(update *appsv1.ReplicaSet) {
x := int32(2)
update.Spec.Replicas = &x
})
@ -250,7 +250,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
}
conditions = rs.Status.Conditions
cond := replicaset.GetCondition(rs.Status, apps.ReplicaSetReplicaFailure)
cond := replicaset.GetCondition(rs.Status, appsv1.ReplicaSetReplicaFailure)
return cond == nil, nil
})
if err == wait.ErrWaitTimeout {

View File

@ -24,7 +24,7 @@ import (
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klabels "k8s.io/apimachinery/pkg/labels"
@ -71,7 +71,7 @@ var _ = SIGDescribe("StatefulSet", func() {
}
headlessSvcName := "test"
var statefulPodMounts, podMounts []v1.VolumeMount
var ss *apps.StatefulSet
var ss *appsv1.StatefulSet
ginkgo.BeforeEach(func() {
statefulPodMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
@ -285,10 +285,10 @@ var _ = SIGDescribe("StatefulSet", func() {
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
sst := framework.NewStatefulSetTester(c)
sst.SetHTTPProbe(ss)
ss.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
Type: apps.RollingUpdateStatefulSetStrategyType,
RollingUpdate: func() *apps.RollingUpdateStatefulSetStrategy {
return &apps.RollingUpdateStatefulSetStrategy{
ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
Type: appsv1.RollingUpdateStatefulSetStrategyType,
RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy {
return &appsv1.RollingUpdateStatefulSetStrategy{
Partition: func() *int32 {
i := int32(3)
return &i
@ -305,11 +305,11 @@ var _ = SIGDescribe("StatefulSet", func() {
ss.Namespace, ss.Name, updateRevision, currentRevision))
pods := sst.GetPodList(ss)
for i := range pods.Items {
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to currentRevision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
currentRevision))
}
newImage := NewNginxImage
@ -317,7 +317,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
framework.ExpectNoError(err)
@ -336,30 +336,30 @@ var _ = SIGDescribe("StatefulSet", func() {
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
oldImage))
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
currentRevision))
}
ginkgo.By("Performing a canary update")
ss.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
Type: apps.RollingUpdateStatefulSetStrategyType,
RollingUpdate: func() *apps.RollingUpdateStatefulSetStrategy {
return &apps.RollingUpdateStatefulSetStrategy{
ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
Type: appsv1.RollingUpdateStatefulSetStrategyType,
RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy {
return &appsv1.RollingUpdateStatefulSetStrategy{
Partition: func() *int32 {
i := int32(2)
return &i
}()}
}(),
}
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
Type: apps.RollingUpdateStatefulSetStrategyType,
RollingUpdate: func() *apps.RollingUpdateStatefulSetStrategy {
return &apps.RollingUpdateStatefulSetStrategy{
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
update.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
Type: appsv1.RollingUpdateStatefulSetStrategyType,
RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy {
return &appsv1.RollingUpdateStatefulSetStrategy{
Partition: func() *int32 {
i := int32(2)
return &i
@ -377,11 +377,11 @@ var _ = SIGDescribe("StatefulSet", func() {
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
oldImage))
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
currentRevision))
} else {
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage),
@ -390,11 +390,11 @@ var _ = SIGDescribe("StatefulSet", func() {
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
newImage))
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
updateRevision))
}
}
@ -413,11 +413,11 @@ var _ = SIGDescribe("StatefulSet", func() {
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
oldImage))
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
currentRevision))
} else {
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage),
@ -426,23 +426,23 @@ var _ = SIGDescribe("StatefulSet", func() {
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
newImage))
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
updateRevision))
}
}
ginkgo.By("Performing a phased rolling update")
for i := int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) - 1; i >= 0; i-- {
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
Type: apps.RollingUpdateStatefulSetStrategyType,
RollingUpdate: func() *apps.RollingUpdateStatefulSetStrategy {
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
update.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
Type: appsv1.RollingUpdateStatefulSetStrategyType,
RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy {
j := int32(i)
return &apps.RollingUpdateStatefulSetStrategy{
return &appsv1.RollingUpdateStatefulSetStrategy{
Partition: &j,
}
}(),
@ -458,11 +458,11 @@ var _ = SIGDescribe("StatefulSet", func() {
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
oldImage))
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
currentRevision))
} else {
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage),
@ -471,11 +471,11 @@ var _ = SIGDescribe("StatefulSet", func() {
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
newImage))
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
updateRevision))
}
}
@ -496,8 +496,8 @@ var _ = SIGDescribe("StatefulSet", func() {
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
sst := framework.NewStatefulSetTester(c)
sst.SetHTTPProbe(ss)
ss.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
Type: apps.OnDeleteStatefulSetStrategyType,
ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
Type: appsv1.OnDeleteStatefulSetStrategyType,
}
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
framework.ExpectNoError(err)
@ -509,11 +509,11 @@ var _ = SIGDescribe("StatefulSet", func() {
ss.Namespace, ss.Name, updateRevision, currentRevision))
pods := sst.GetPodList(ss)
for i := range pods.Items {
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
currentRevision))
}
@ -525,11 +525,11 @@ var _ = SIGDescribe("StatefulSet", func() {
ss = sst.GetStatefulSet(ss.Namespace, ss.Name)
pods = sst.GetPodList(ss)
for i := range pods.Items {
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
currentRevision))
}
newImage := NewNginxImage
@ -537,7 +537,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
framework.ExpectNoError(err)
@ -562,11 +562,11 @@ var _ = SIGDescribe("StatefulSet", func() {
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
newImage))
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
updateRevision))
}
})
@ -666,7 +666,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
ss.Spec.PodManagementPolicy = apps.ParallelPodManagement
ss.Spec.PodManagementPolicy = appsv1.ParallelPodManagement
sst := framework.NewStatefulSetTester(c)
sst.SetHTTPProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
@ -885,7 +885,7 @@ func kubectlExecWithRetries(args ...string) (out string) {
}
type statefulPodTester interface {
deploy(ns string) *apps.StatefulSet
deploy(ns string) *appsv1.StatefulSet
write(statefulPodIndex int, kv map[string]string)
read(statefulPodIndex int, key string) string
name() string
@ -922,7 +922,7 @@ func (c *clusterAppTester) run() {
}
type zookeeperTester struct {
ss *apps.StatefulSet
ss *appsv1.StatefulSet
tester *framework.StatefulSetTester
}
@ -930,7 +930,7 @@ func (z *zookeeperTester) name() string {
return "zookeeper"
}
func (z *zookeeperTester) deploy(ns string) *apps.StatefulSet {
func (z *zookeeperTester) deploy(ns string) *appsv1.StatefulSet {
z.ss = z.tester.CreateStatefulSet(zookeeperManifestPath, ns)
return z.ss
}
@ -952,7 +952,7 @@ func (z *zookeeperTester) read(statefulPodIndex int, key string) string {
}
type mysqlGaleraTester struct {
ss *apps.StatefulSet
ss *appsv1.StatefulSet
tester *framework.StatefulSetTester
}
@ -968,7 +968,7 @@ func (m *mysqlGaleraTester) mysqlExec(cmd, ns, podName string) string {
return kubectlExecWithRetries(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
}
func (m *mysqlGaleraTester) deploy(ns string) *apps.StatefulSet {
func (m *mysqlGaleraTester) deploy(ns string) *appsv1.StatefulSet {
m.ss = m.tester.CreateStatefulSet(mysqlGaleraManifestPath, ns)
e2elog.Logf("Deployed statefulset %v, initializing database", m.ss.Name)
@ -995,7 +995,7 @@ func (m *mysqlGaleraTester) read(statefulPodIndex int, key string) string {
}
type redisTester struct {
ss *apps.StatefulSet
ss *appsv1.StatefulSet
tester *framework.StatefulSetTester
}
@ -1008,7 +1008,7 @@ func (m *redisTester) redisExec(cmd, ns, podName string) string {
return framework.RunKubectlOrDie(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
}
func (m *redisTester) deploy(ns string) *apps.StatefulSet {
func (m *redisTester) deploy(ns string) *appsv1.StatefulSet {
m.ss = m.tester.CreateStatefulSet(redisManifestPath, ns)
return m.ss
}
@ -1026,7 +1026,7 @@ func (m *redisTester) read(statefulPodIndex int, key string) string {
}
type cockroachDBTester struct {
ss *apps.StatefulSet
ss *appsv1.StatefulSet
tester *framework.StatefulSetTester
}
@ -1039,7 +1039,7 @@ func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string {
return framework.RunKubectlOrDie(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
}
func (c *cockroachDBTester) deploy(ns string) *apps.StatefulSet {
func (c *cockroachDBTester) deploy(ns string) *appsv1.StatefulSet {
c.ss = c.tester.CreateStatefulSet(cockroachDBManifestPath, ns)
e2elog.Logf("Deployed statefulset %v, initializing database", c.ss.Name)
for _, cmd := range []string{
@ -1087,7 +1087,7 @@ func pollReadWithTimeout(statefulPod statefulPodTester, statefulPodNumber int, k
// This function is used by two tests to test StatefulSet rollbacks: one using
// PVCs and one using no storage.
func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
sst := framework.NewStatefulSetTester(c)
sst.SetHTTPProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
@ -1100,11 +1100,11 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
ss.Namespace, ss.Name, updateRevision, currentRevision))
pods := sst.GetPodList(ss)
for i := range pods.Items {
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
currentRevision))
}
sst.SortStatefulPods(pods)
@ -1116,7 +1116,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
ginkgo.By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
framework.ExpectNoError(err)
@ -1147,11 +1147,11 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
newImage))
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
updateRevision))
}
@ -1161,7 +1161,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
priorRevision := currentRevision
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = oldImage
})
framework.ExpectNoError(err)
@ -1192,11 +1192,11 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
oldImage))
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(priorRevision),
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(priorRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
priorRevision))
}
}

View File

@ -22,7 +22,7 @@ import (
"strings"
"time"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apiextensionclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
@ -203,7 +203,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch deployments.", func() {
podLabels := map[string]string{"name": "audit-deployment-pod"}
d := e2edeploy.NewDeployment("audit-deployment", int32(1), podLabels, "redis", imageutils.GetE2EImage(imageutils.Redis), apps.RecreateDeploymentStrategyType)
d := e2edeploy.NewDeployment("audit-deployment", int32(1), podLabels, "redis", imageutils.GetE2EImage(imageutils.Redis), appsv1.RecreateDeploymentStrategyType)
_, err := f.ClientSet.AppsV1().Deployments(namespace).Create(d)
framework.ExpectNoError(err, "failed to create audit-deployment")

View File

@ -23,7 +23,7 @@ import (
"github.com/pkg/errors"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
storage "k8s.io/api/storage/v1"
@ -353,9 +353,9 @@ func (f *Framework) patchItemRecursively(item interface{}) error {
}
case *v1.Service:
f.PatchNamespace(&item.ObjectMeta.Namespace)
case *apps.StatefulSet:
case *appsv1.StatefulSet:
f.PatchNamespace(&item.ObjectMeta.Namespace)
case *apps.DaemonSet:
case *appsv1.DaemonSet:
f.PatchNamespace(&item.ObjectMeta.Namespace)
default:
return errors.Errorf("missing support for patching item of type %T", item)
@ -497,11 +497,11 @@ func (*serviceFactory) Create(f *Framework, i interface{}) (func() error, error)
type statefulSetFactory struct{}
func (f *statefulSetFactory) New() runtime.Object {
return &apps.StatefulSet{}
return &appsv1.StatefulSet{}
}
func (*statefulSetFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*apps.StatefulSet)
item, ok := i.(*appsv1.StatefulSet)
if !ok {
return nil, errorItemNotSupported
}
@ -518,11 +518,11 @@ func (*statefulSetFactory) Create(f *Framework, i interface{}) (func() error, er
type daemonSetFactory struct{}
func (f *daemonSetFactory) New() runtime.Object {
return &apps.DaemonSet{}
return &appsv1.DaemonSet{}
}
func (*daemonSetFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*apps.DaemonSet)
item, ok := i.(*appsv1.DaemonSet)
if !ok {
return nil, errorItemNotSupported
}

View File

@ -23,7 +23,7 @@ import (
"github.com/onsi/ginkgo"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
@ -39,7 +39,7 @@ import (
)
// UpdateDeploymentWithRetries updates the specified deployment with retries.
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*apps.Deployment, error) {
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*appsv1.Deployment, error) {
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, e2elog.Logf, poll, pollShortTimeout)
}
@ -50,8 +50,8 @@ func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName,
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
// old pods.
func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error {
if d.Spec.Strategy.Type != apps.RecreateDeploymentStrategyType {
func WatchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error {
if d.Spec.Strategy.Type != appsv1.RecreateDeploymentStrategyType {
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
}
@ -63,7 +63,7 @@ func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error {
status := d.Status
condition := func(event watch.Event) (bool, error) {
d := event.Object.(*apps.Deployment)
d := event.Object.(*appsv1.Deployment)
status = d.Status
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
@ -92,17 +92,17 @@ func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error {
}
// NewDeployment returns a deployment spec with the specified argument.
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType apps.DeploymentStrategyType) *apps.Deployment {
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType appsv1.DeploymentStrategyType) *appsv1.Deployment {
zero := int64(0)
return &apps.Deployment{
return &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Labels: podLabels,
},
Spec: apps.DeploymentSpec{
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: podLabels},
Strategy: apps.DeploymentStrategy{
Strategy: appsv1.DeploymentStrategy{
Type: strategyType,
},
Template: v1.PodTemplateSpec{
@ -125,7 +125,7 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s
}
// CreateDeployment creates a deployment.
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*apps.Deployment, error) {
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*appsv1.Deployment, error) {
deploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command)
deployment, err := client.AppsV1().Deployments(namespace).Create(deploymentSpec)
if err != nil {
@ -140,7 +140,7 @@ func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[
}
// GetPodsForDeployment gets pods for the given deployment
func GetPodsForDeployment(client clientset.Interface, deployment *apps.Deployment) (*v1.PodList, error) {
func GetPodsForDeployment(client clientset.Interface, deployment *appsv1.Deployment) (*v1.PodList, error) {
replicaSet, err := deploymentutil.GetNewReplicaSet(deployment, client.AppsV1())
if err != nil {
return nil, fmt.Errorf("Failed to get new replica set for deployment %q: %v", deployment.Name, err)
@ -151,7 +151,7 @@ func GetPodsForDeployment(client clientset.Interface, deployment *apps.Deploymen
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
return client.CoreV1().Pods(namespace).List(options)
}
rsList := []*apps.ReplicaSet{replicaSet}
rsList := []*appsv1.ReplicaSet{replicaSet}
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
if err != nil {
return nil, fmt.Errorf("Failed to list Pods of Deployment %q: %v", deployment.Name, err)
@ -169,18 +169,18 @@ func RunDeployment(config testutils.DeploymentConfig) error {
// testDeployment creates a deployment definition based on the namespace. The deployment references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod
func testDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *apps.Deployment {
func testDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *appsv1.Deployment {
if len(command) == 0 {
command = "trap exit TERM; while true; do sleep 1; done"
}
zero := int64(0)
deploymentName := "deployment-" + string(uuid.NewUUID())
deploymentSpec := &apps.Deployment{
deploymentSpec := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Namespace: namespace,
},
Spec: apps.DeploymentSpec{
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: podLabels,

View File

@ -17,16 +17,16 @@ limitations under the License.
package deployment
import (
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
)
func logReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) {
func logReplicaSetsOfDeployment(deployment *appsv1.Deployment, allOldRSs []*appsv1.ReplicaSet, newRS *appsv1.ReplicaSet) {
testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, e2elog.Logf)
}
func logPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsList []*apps.ReplicaSet) {
func logPodsOfDeployment(c clientset.Interface, deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) {
testutils.LogPodsOfDeployment(c, deployment, rsList, e2elog.Logf)
}

View File

@ -20,7 +20,7 @@ import (
"fmt"
"time"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
@ -42,7 +42,7 @@ func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string,
}
// WaitForDeploymentWithCondition waits for the specified deployment condition.
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType apps.DeploymentConditionType) error {
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType appsv1.DeploymentConditionType) error {
return testutils.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, e2elog.Logf, poll, pollLongTimeout)
}
@ -56,13 +56,13 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
// WaitForDeploymentComplete waits for the deployment to complete, and don't check if rolling update strategy is broken.
// Rolling update strategy is used only during a rolling update, and can be violated in other situations,
// such as shortly after a scaling event or the deployment is just created.
func WaitForDeploymentComplete(c clientset.Interface, d *apps.Deployment) error {
func WaitForDeploymentComplete(c clientset.Interface, d *appsv1.Deployment) error {
return testutils.WaitForDeploymentComplete(c, d, e2elog.Logf, poll, pollLongTimeout)
}
// WaitForDeploymentCompleteAndCheckRolling waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
// Rolling update strategy should not be broken during a rolling update.
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *apps.Deployment) error {
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *appsv1.Deployment) error {
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, e2elog.Logf, poll, pollLongTimeout)
}
@ -79,8 +79,8 @@ func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName
// WaitForDeploymentOldRSsNum waits for the deployment to clean up old rcs.
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
var oldRSs []*apps.ReplicaSet
var d *apps.Deployment
var oldRSs []*appsv1.ReplicaSet
var d *appsv1.Deployment
pollErr := wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
@ -103,7 +103,7 @@ func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string
}
// WaitForDeploymentRevision waits for becoming the target revision of a delopyment.
func WaitForDeploymentRevision(c clientset.Interface, d *apps.Deployment, targetRevision string) error {
func WaitForDeploymentRevision(c clientset.Interface, d *appsv1.Deployment, targetRevision string) error {
err := wait.PollImmediate(poll, pollLongTimeout, func() (bool, error) {
deployment, err := c.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil {

View File

@ -37,7 +37,7 @@ import (
compute "google.golang.org/api/compute/v1"
"k8s.io/klog"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
@ -908,12 +908,12 @@ func generateBacksideHTTPSServiceSpec() *v1.Service {
}
}
func generateBacksideHTTPSDeploymentSpec() *apps.Deployment {
return &apps.Deployment{
func generateBacksideHTTPSDeploymentSpec() *appsv1.Deployment {
return &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "echoheaders-https",
},
Spec: apps.DeploymentSpec{
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{
"app": "echoheaders-https",
}},
@ -941,7 +941,7 @@ func generateBacksideHTTPSDeploymentSpec() *apps.Deployment {
}
// SetUpBacksideHTTPSIngress sets up deployment, service and ingress with backside HTTPS configured.
func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace string, staticIPName string) (*apps.Deployment, *v1.Service, *networkingv1beta1.Ingress, error) {
func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace string, staticIPName string) (*appsv1.Deployment, *v1.Service, *networkingv1beta1.Ingress, error) {
deployCreated, err := cs.AppsV1().Deployments(namespace).Create(generateBacksideHTTPSDeploymentSpec())
if err != nil {
return nil, nil, nil, err
@ -965,7 +965,7 @@ func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace st
}
// DeleteTestResource deletes given deployment, service and ingress.
func (j *TestJig) DeleteTestResource(cs clientset.Interface, deploy *apps.Deployment, svc *v1.Service, ing *networkingv1beta1.Ingress) []error {
func (j *TestJig) DeleteTestResource(cs clientset.Interface, deploy *appsv1.Deployment, svc *v1.Service, ing *networkingv1beta1.Ingress) []error {
var errs []error
if ing != nil {
if err := j.runDelete(ing); err != nil {

View File

@ -19,7 +19,7 @@ package pod
import (
"fmt"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
batch "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
@ -61,15 +61,15 @@ func getSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
return labels.SelectorFromSet(typed.Spec.Selector), nil
case *extensions.ReplicaSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *apps.ReplicaSet:
case *appsv1.ReplicaSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *extensions.Deployment:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *apps.Deployment:
case *appsv1.Deployment:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *extensions.DaemonSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *apps.DaemonSet:
case *appsv1.DaemonSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *batch.Job:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
@ -92,7 +92,7 @@ func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *apps.ReplicaSet:
case *appsv1.ReplicaSet:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
@ -102,14 +102,14 @@ func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *apps.Deployment:
case *appsv1.Deployment:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensions.DaemonSet:
return 0, nil
case *apps.DaemonSet:
case *appsv1.DaemonSet:
return 0, nil
case *batch.Job:
// TODO: currently we use pause pods so that's OK. When we'll want to switch to Pods

View File

@ -17,14 +17,14 @@ limitations under the License.
package replicaset
import (
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NewReplicaSet returns a new ReplicaSet.
func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *apps.ReplicaSet {
return &apps.ReplicaSet{
func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *appsv1.ReplicaSet {
return &appsv1.ReplicaSet{
TypeMeta: metav1.TypeMeta{
Kind: "ReplicaSet",
APIVersion: "apps/v1",
@ -33,7 +33,7 @@ func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]
Namespace: namespace,
Name: name,
},
Spec: apps.ReplicaSetSpec{
Spec: appsv1.ReplicaSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: podLabels,
},

View File

@ -19,7 +19,7 @@ package replicaset
import (
"fmt"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
@ -29,7 +29,7 @@ import (
)
// UpdateReplicaSetWithRetries updates replicaset template with retries.
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) {
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*appsv1.ReplicaSet, error) {
return testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, e2elog.Logf, framework.Poll, framework.PollShortTimeout)
}

View File

@ -20,7 +20,7 @@ import (
"fmt"
"github.com/onsi/ginkgo"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
@ -53,7 +53,7 @@ func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
}
// WaitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas.
func WaitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, replicaSet *apps.ReplicaSet) error {
func WaitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, replicaSet *appsv1.ReplicaSet) error {
desiredGeneration := replicaSet.Generation
err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
@ -69,7 +69,7 @@ func WaitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, rep
}
// WaitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum
func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *apps.ReplicaSet, targetReplicaNum int32) error {
func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error {
desiredGeneration := replicaSet.Generation
err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
@ -85,7 +85,7 @@ func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *apps
}
// WaitForReplicaSetTargetAvailableReplicas waits for .status.availableReplicas of a RS to equal targetReplicaNum
func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *apps.ReplicaSet, targetReplicaNum int32) error {
func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error {
desiredGeneration := replicaSet.Generation
err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})

View File

@ -26,7 +26,7 @@ import (
"strings"
"time"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
appsV1beta2 "k8s.io/api/apps/v1beta2"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
@ -82,7 +82,7 @@ func NewStatefulSetTester(c clientset.Interface) *StatefulSetTester {
}
// GetStatefulSet gets the StatefulSet named name in namespace.
func (s *StatefulSetTester) GetStatefulSet(namespace, name string) *apps.StatefulSet {
func (s *StatefulSetTester) GetStatefulSet(namespace, name string) *appsv1.StatefulSet {
ss, err := s.c.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
Failf("Failed to get StatefulSet %s/%s: %v", namespace, name, err)
@ -91,7 +91,7 @@ func (s *StatefulSetTester) GetStatefulSet(namespace, name string) *apps.Statefu
}
// CreateStatefulSet creates a StatefulSet from the manifest at manifestPath in the Namespace ns using kubectl create.
func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *apps.StatefulSet {
func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *appsv1.StatefulSet {
mkpath := func(file string) string {
return filepath.Join(manifestPath, file)
}
@ -115,7 +115,7 @@ func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *apps.Sta
}
// CheckMount checks that the mount at mountPath is valid for all Pods in ss.
func (s *StatefulSetTester) CheckMount(ss *apps.StatefulSet, mountPath string) error {
func (s *StatefulSetTester) CheckMount(ss *appsv1.StatefulSet, mountPath string) error {
for _, cmd := range []string{
// Print inode, size etc
fmt.Sprintf("ls -idlh %v", mountPath),
@ -132,7 +132,7 @@ func (s *StatefulSetTester) CheckMount(ss *apps.StatefulSet, mountPath string) e
}
// ExecInStatefulPods executes cmd in all Pods in ss. If a error occurs it is returned and cmd is not execute in any subsequent Pods.
func (s *StatefulSetTester) ExecInStatefulPods(ss *apps.StatefulSet, cmd string) error {
func (s *StatefulSetTester) ExecInStatefulPods(ss *appsv1.StatefulSet, cmd string) error {
podList := s.GetPodList(ss)
for _, statefulPod := range podList.Items {
stdout, err := RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
@ -145,7 +145,7 @@ func (s *StatefulSetTester) ExecInStatefulPods(ss *apps.StatefulSet, cmd string)
}
// CheckHostname verifies that all Pods in ss have the correct Hostname. If the returned error is not nil than verification failed.
func (s *StatefulSetTester) CheckHostname(ss *apps.StatefulSet) error {
func (s *StatefulSetTester) CheckHostname(ss *appsv1.StatefulSet) error {
cmd := "printf $(hostname)"
podList := s.GetPodList(ss)
for _, statefulPod := range podList.Items {
@ -161,7 +161,7 @@ func (s *StatefulSetTester) CheckHostname(ss *apps.StatefulSet) error {
}
// Saturate waits for all Pods in ss to become Running and Ready.
func (s *StatefulSetTester) Saturate(ss *apps.StatefulSet) {
func (s *StatefulSetTester) Saturate(ss *appsv1.StatefulSet) {
var i int32
for i = 0; i < *(ss.Spec.Replicas); i++ {
e2elog.Logf("Waiting for stateful pod at index %v to enter Running", i)
@ -172,7 +172,7 @@ func (s *StatefulSetTester) Saturate(ss *apps.StatefulSet) {
}
// DeleteStatefulPodAtIndex deletes the Pod with ordinal index in ss.
func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *apps.StatefulSet) {
func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *appsv1.StatefulSet) {
name := getStatefulSetPodNameAtIndex(index, ss)
noGrace := int64(0)
if err := s.c.CoreV1().Pods(ss.Namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil {
@ -184,26 +184,26 @@ func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *apps.Statefu
type VerifyStatefulPodFunc func(*v1.Pod)
// VerifyPodAtIndex applies a visitor patter to the Pod at index in ss. verify is applied to the Pod to "visit" it.
func (s *StatefulSetTester) VerifyPodAtIndex(index int, ss *apps.StatefulSet, verify VerifyStatefulPodFunc) {
func (s *StatefulSetTester) VerifyPodAtIndex(index int, ss *appsv1.StatefulSet, verify VerifyStatefulPodFunc) {
name := getStatefulSetPodNameAtIndex(index, ss)
pod, err := s.c.CoreV1().Pods(ss.Namespace).Get(name, metav1.GetOptions{})
ExpectNoError(err, fmt.Sprintf("Failed to get stateful pod %s for StatefulSet %s/%s", name, ss.Namespace, ss.Name))
verify(pod)
}
func getStatefulSetPodNameAtIndex(index int, ss *apps.StatefulSet) string {
func getStatefulSetPodNameAtIndex(index int, ss *appsv1.StatefulSet) string {
// TODO: we won't use "-index" as the name strategy forever,
// pull the name out from an identity mapper.
return fmt.Sprintf("%v-%v", ss.Name, index)
}
// Scale scales ss to count replicas.
func (s *StatefulSetTester) Scale(ss *apps.StatefulSet, count int32) (*apps.StatefulSet, error) {
func (s *StatefulSetTester) Scale(ss *appsv1.StatefulSet, count int32) (*appsv1.StatefulSet, error) {
name := ss.Name
ns := ss.Namespace
e2elog.Logf("Scaling statefulset %s to %d", name, count)
ss = s.update(ns, name, func(ss *apps.StatefulSet) { *(ss.Spec.Replicas) = count })
ss = s.update(ns, name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count })
var statefulPodList *v1.PodList
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
@ -227,12 +227,12 @@ func (s *StatefulSetTester) Scale(ss *apps.StatefulSet, count int32) (*apps.Stat
}
// UpdateReplicas updates the replicas of ss to count.
func (s *StatefulSetTester) UpdateReplicas(ss *apps.StatefulSet, count int32) {
s.update(ss.Namespace, ss.Name, func(ss *apps.StatefulSet) { *(ss.Spec.Replicas) = count })
func (s *StatefulSetTester) UpdateReplicas(ss *appsv1.StatefulSet, count int32) {
s.update(ss.Namespace, ss.Name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count })
}
// Restart scales ss to 0 and then back to its previous number of replicas.
func (s *StatefulSetTester) Restart(ss *apps.StatefulSet) {
func (s *StatefulSetTester) Restart(ss *appsv1.StatefulSet) {
oldReplicas := *(ss.Spec.Replicas)
ss, err := s.Scale(ss, 0)
ExpectNoError(err)
@ -240,10 +240,10 @@ func (s *StatefulSetTester) Restart(ss *apps.StatefulSet) {
// This way we know the controller has observed all Pod deletions
// before we scale it back up.
s.WaitForStatusReplicas(ss, 0)
s.update(ss.Namespace, ss.Name, func(ss *apps.StatefulSet) { *(ss.Spec.Replicas) = oldReplicas })
s.update(ss.Namespace, ss.Name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = oldReplicas })
}
func (s *StatefulSetTester) update(ns, name string, update func(ss *apps.StatefulSet)) *apps.StatefulSet {
func (s *StatefulSetTester) update(ns, name string, update func(ss *appsv1.StatefulSet)) *appsv1.StatefulSet {
for i := 0; i < 3; i++ {
ss, err := s.c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
@ -263,7 +263,7 @@ func (s *StatefulSetTester) update(ns, name string, update func(ss *apps.Statefu
}
// GetPodList gets the current Pods in ss.
func (s *StatefulSetTester) GetPodList(ss *apps.StatefulSet) *v1.PodList {
func (s *StatefulSetTester) GetPodList(ss *appsv1.StatefulSet) *v1.PodList {
selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
ExpectNoError(err)
podList, err := s.c.CoreV1().Pods(ss.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
@ -273,7 +273,7 @@ func (s *StatefulSetTester) GetPodList(ss *apps.StatefulSet) *v1.PodList {
// ConfirmStatefulPodCount asserts that the current number of Pods in ss is count waiting up to timeout for ss to
// to scale to count.
func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *apps.StatefulSet, timeout time.Duration, hard bool) {
func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *appsv1.StatefulSet, timeout time.Duration, hard bool) {
start := time.Now()
deadline := start.Add(timeout)
for t := time.Now(); t.Before(deadline); t = time.Now() {
@ -296,7 +296,7 @@ func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *apps.Stateful
// WaitForRunning waits for numPodsRunning in ss to be Running and for the first
// numPodsReady ordinals to be Ready.
func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, ss *apps.StatefulSet) {
func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, ss *appsv1.StatefulSet) {
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
func() (bool, error) {
podList := s.GetPodList(ss)
@ -325,7 +325,7 @@ func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, s
}
// WaitForState periodically polls for the ss and its pods until the until function returns either true or an error
func (s *StatefulSetTester) WaitForState(ss *apps.StatefulSet, until func(*apps.StatefulSet, *v1.PodList) (bool, error)) {
func (s *StatefulSetTester) WaitForState(ss *appsv1.StatefulSet, until func(*appsv1.StatefulSet, *v1.PodList) (bool, error)) {
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
func() (bool, error) {
ssGet, err := s.c.AppsV1().StatefulSets(ss.Namespace).Get(ss.Name, metav1.GetOptions{})
@ -342,8 +342,8 @@ func (s *StatefulSetTester) WaitForState(ss *apps.StatefulSet, until func(*apps.
// WaitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation.
// The returned StatefulSet contains such a StatefulSetStatus
func (s *StatefulSetTester) WaitForStatus(set *apps.StatefulSet) *apps.StatefulSet {
s.WaitForState(set, func(set2 *apps.StatefulSet, pods *v1.PodList) (bool, error) {
func (s *StatefulSetTester) WaitForStatus(set *appsv1.StatefulSet) *appsv1.StatefulSet {
s.WaitForState(set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) {
if set2.Status.ObservedGeneration >= set.Generation {
set = set2
return true, nil
@ -354,14 +354,14 @@ func (s *StatefulSetTester) WaitForStatus(set *apps.StatefulSet) *apps.StatefulS
}
// WaitForRunningAndReady waits for numStatefulPods in ss to be Running and Ready.
func (s *StatefulSetTester) WaitForRunningAndReady(numStatefulPods int32, ss *apps.StatefulSet) {
func (s *StatefulSetTester) WaitForRunningAndReady(numStatefulPods int32, ss *appsv1.StatefulSet) {
s.WaitForRunning(numStatefulPods, numStatefulPods, ss)
}
// WaitForPodReady waits for the Pod named podName in set to exist and have a Ready condition.
func (s *StatefulSetTester) WaitForPodReady(set *apps.StatefulSet, podName string) (*apps.StatefulSet, *v1.PodList) {
func (s *StatefulSetTester) WaitForPodReady(set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) {
var pods *v1.PodList
s.WaitForState(set, func(set2 *apps.StatefulSet, pods2 *v1.PodList) (bool, error) {
s.WaitForState(set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
set = set2
pods = pods2
for i := range pods.Items {
@ -376,9 +376,9 @@ func (s *StatefulSetTester) WaitForPodReady(set *apps.StatefulSet, podName strin
}
// WaitForPodNotReady waist for the Pod named podName in set to exist and to not have a Ready condition.
func (s *StatefulSetTester) WaitForPodNotReady(set *apps.StatefulSet, podName string) (*apps.StatefulSet, *v1.PodList) {
func (s *StatefulSetTester) WaitForPodNotReady(set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) {
var pods *v1.PodList
s.WaitForState(set, func(set2 *apps.StatefulSet, pods2 *v1.PodList) (bool, error) {
s.WaitForState(set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
set = set2
pods = pods2
for i := range pods.Items {
@ -394,15 +394,15 @@ func (s *StatefulSetTester) WaitForPodNotReady(set *apps.StatefulSet, podName st
// WaitForRollingUpdate waits for all Pods in set to exist and have the correct revision and for the RollingUpdate to
// complete. set must have a RollingUpdateStatefulSetStrategyType.
func (s *StatefulSetTester) WaitForRollingUpdate(set *apps.StatefulSet) (*apps.StatefulSet, *v1.PodList) {
func (s *StatefulSetTester) WaitForRollingUpdate(set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
var pods *v1.PodList
if set.Spec.UpdateStrategy.Type != apps.RollingUpdateStatefulSetStrategyType {
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s",
set.Namespace,
set.Name,
set.Spec.UpdateStrategy.Type)
}
s.WaitForState(set, func(set2 *apps.StatefulSet, pods2 *v1.PodList) (bool, error) {
s.WaitForState(set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
set = set2
pods = pods2
if len(pods.Items) < int(*set.Spec.Replicas) {
@ -415,12 +415,12 @@ func (s *StatefulSetTester) WaitForRollingUpdate(set *apps.StatefulSet) (*apps.S
)
s.SortStatefulPods(pods)
for i := range pods.Items {
if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
set.Status.UpdateRevision,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel])
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel])
}
}
return false, nil
@ -434,9 +434,9 @@ func (s *StatefulSetTester) WaitForRollingUpdate(set *apps.StatefulSet) (*apps.S
// a RollingUpdateStatefulSetStrategyType with a non-nil RollingUpdate and Partition. All Pods with ordinals less
// than or equal to the Partition are expected to be at set's current revision. All other Pods are expected to be
// at its update revision.
func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *apps.StatefulSet) (*apps.StatefulSet, *v1.PodList) {
func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
var pods *v1.PodList
if set.Spec.UpdateStrategy.Type != apps.RollingUpdateStatefulSetStrategyType {
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s",
set.Namespace,
set.Name,
@ -447,7 +447,7 @@ func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *apps.StatefulSe
set.Namespace,
set.Name)
}
s.WaitForState(set, func(set2 *apps.StatefulSet, pods2 *v1.PodList) (bool, error) {
s.WaitForState(set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
set = set2
pods = pods2
partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition)
@ -461,23 +461,23 @@ func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *apps.StatefulSe
)
s.SortStatefulPods(pods)
for i := range pods.Items {
if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
set.Status.UpdateRevision,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel])
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel])
}
}
return false, nil
}
for i := int(*set.Spec.Replicas) - 1; i >= partition; i-- {
if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
set.Status.UpdateRevision,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel])
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel])
return false, nil
}
}
@ -487,7 +487,7 @@ func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *apps.StatefulSe
}
// WaitForRunningAndNotReady waits for numStatefulPods in ss to be Running and not Ready.
func (s *StatefulSetTester) WaitForRunningAndNotReady(numStatefulPods int32, ss *apps.StatefulSet) {
func (s *StatefulSetTester) WaitForRunningAndNotReady(numStatefulPods int32, ss *appsv1.StatefulSet) {
s.WaitForRunning(numStatefulPods, 0, ss)
}
@ -506,12 +506,12 @@ var httpProbe = &v1.Probe{
// SetHTTPProbe sets the pod template's ReadinessProbe for Nginx StatefulSet containers.
// This probe can then be controlled with BreakHTTPProbe() and RestoreHTTPProbe().
// Note that this cannot be used together with PauseNewPods().
func (s *StatefulSetTester) SetHTTPProbe(ss *apps.StatefulSet) {
func (s *StatefulSetTester) SetHTTPProbe(ss *appsv1.StatefulSet) {
ss.Spec.Template.Spec.Containers[0].ReadinessProbe = httpProbe
}
// BreakHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in ss.
func (s *StatefulSetTester) BreakHTTPProbe(ss *apps.StatefulSet) error {
func (s *StatefulSetTester) BreakHTTPProbe(ss *appsv1.StatefulSet) error {
path := httpProbe.HTTPGet.Path
if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path)
@ -522,7 +522,7 @@ func (s *StatefulSetTester) BreakHTTPProbe(ss *apps.StatefulSet) error {
}
// BreakPodHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in one pod.
func (s *StatefulSetTester) BreakPodHTTPProbe(ss *apps.StatefulSet, pod *v1.Pod) error {
func (s *StatefulSetTester) BreakPodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error {
path := httpProbe.HTTPGet.Path
if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path)
@ -535,7 +535,7 @@ func (s *StatefulSetTester) BreakPodHTTPProbe(ss *apps.StatefulSet, pod *v1.Pod)
}
// RestoreHTTPProbe restores the readiness probe for Nginx StatefulSet containers in ss.
func (s *StatefulSetTester) RestoreHTTPProbe(ss *apps.StatefulSet) error {
func (s *StatefulSetTester) RestoreHTTPProbe(ss *appsv1.StatefulSet) error {
path := httpProbe.HTTPGet.Path
if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path)
@ -546,7 +546,7 @@ func (s *StatefulSetTester) RestoreHTTPProbe(ss *apps.StatefulSet) error {
}
// RestorePodHTTPProbe restores the readiness probe for Nginx StatefulSet containers in pod.
func (s *StatefulSetTester) RestorePodHTTPProbe(ss *apps.StatefulSet, pod *v1.Pod) error {
func (s *StatefulSetTester) RestorePodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error {
path := httpProbe.HTTPGet.Path
if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path)
@ -576,7 +576,7 @@ func hasPauseProbe(pod *v1.Pod) bool {
// This causes all newly-created Pods to stay Unready until they are manually resumed
// with ResumeNextPod().
// Note that this cannot be used together with SetHTTPProbe().
func (s *StatefulSetTester) PauseNewPods(ss *apps.StatefulSet) {
func (s *StatefulSetTester) PauseNewPods(ss *appsv1.StatefulSet) {
ss.Spec.Template.Spec.Containers[0].ReadinessProbe = pauseProbe
}
@ -585,7 +585,7 @@ func (s *StatefulSetTester) PauseNewPods(ss *apps.StatefulSet) {
// It fails the test if it finds any pods that are not in phase Running,
// or if it finds more than one paused Pod existing at the same time.
// This is a no-op if there are no paused pods.
func (s *StatefulSetTester) ResumeNextPod(ss *apps.StatefulSet) {
func (s *StatefulSetTester) ResumeNextPod(ss *appsv1.StatefulSet) {
podList := s.GetPodList(ss)
resumedPod := ""
for _, pod := range podList.Items {
@ -606,7 +606,7 @@ func (s *StatefulSetTester) ResumeNextPod(ss *apps.StatefulSet) {
}
// WaitForStatusReadyReplicas waits for the ss.Status.ReadyReplicas to be equal to expectedReplicas
func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *apps.StatefulSet, expectedReplicas int32) {
func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *appsv1.StatefulSet, expectedReplicas int32) {
e2elog.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name
@ -631,7 +631,7 @@ func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *apps.StatefulSet, exp
}
// WaitForStatusReplicas waits for the ss.Status.Replicas to be equal to expectedReplicas
func (s *StatefulSetTester) WaitForStatusReplicas(ss *apps.StatefulSet, expectedReplicas int32) {
func (s *StatefulSetTester) WaitForStatusReplicas(ss *appsv1.StatefulSet, expectedReplicas int32) {
e2elog.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name
@ -656,7 +656,7 @@ func (s *StatefulSetTester) WaitForStatusReplicas(ss *apps.StatefulSet, expected
}
// CheckServiceName asserts that the ServiceName for ss is equivalent to expectedServiceName.
func (s *StatefulSetTester) CheckServiceName(ss *apps.StatefulSet, expectedServiceName string) error {
func (s *StatefulSetTester) CheckServiceName(ss *appsv1.StatefulSet, expectedServiceName string) error {
e2elog.Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName)
if expectedServiceName != ss.Spec.ServiceName {
@ -767,7 +767,7 @@ func NewStatefulSetPVC(name string) v1.PersistentVolumeClaim {
// NewStatefulSet creates a new NGINX StatefulSet for testing. The StatefulSet is named name, is in namespace ns,
// statefulPodsMounts are the mounts that will be backed by PVs. podsMounts are the mounts that are mounted directly
// to the Pod. labels are the labels that will be usd for the StatefulSet selector.
func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulPodMounts []v1.VolumeMount, podMounts []v1.VolumeMount, labels map[string]string) *apps.StatefulSet {
func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulPodMounts []v1.VolumeMount, podMounts []v1.VolumeMount, labels map[string]string) *appsv1.StatefulSet {
mounts := append(statefulPodMounts, podMounts...)
claims := []v1.PersistentVolumeClaim{}
for _, m := range statefulPodMounts {
@ -786,7 +786,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
})
}
return &apps.StatefulSet{
return &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
APIVersion: "apps/v1",
@ -795,7 +795,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
Name: name,
Namespace: ns,
},
Spec: apps.StatefulSetSpec{
Spec: appsv1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
@ -817,7 +817,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
Volumes: vols,
},
},
UpdateStrategy: apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType},
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{Type: appsv1.RollingUpdateStatefulSetStrategyType},
VolumeClaimTemplates: claims,
ServiceName: governingSvcName,
},
@ -825,7 +825,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
}
// NewStatefulSetScale creates a new StatefulSet scale subresource and returns it
func NewStatefulSetScale(ss *apps.StatefulSet) *appsV1beta2.Scale {
func NewStatefulSetScale(ss *appsv1.StatefulSet) *appsV1beta2.Scale {
return &appsV1beta2.Scale{
// TODO: Create a variant of ObjectMeta type that only contains the fields below.
ObjectMeta: metav1.ObjectMeta{
@ -869,10 +869,10 @@ func (sp statefulPodsByOrdinal) Less(i, j int) bool {
return getStatefulPodOrdinal(&sp[i]) < getStatefulPodOrdinal(&sp[j])
}
type updateStatefulSetFunc func(*apps.StatefulSet)
type updateStatefulSetFunc func(*appsv1.StatefulSet)
// UpdateStatefulSetWithRetries updates statfulset template with retries.
func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *apps.StatefulSet, err error) {
func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *appsv1.StatefulSet, err error) {
statefulSets := c.AppsV1().StatefulSets(namespace)
var updateErr error
pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {

View File

@ -47,7 +47,7 @@ import (
"github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
batch "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
@ -2249,15 +2249,15 @@ func getSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
return labels.SelectorFromSet(typed.Spec.Selector), nil
case *extensions.ReplicaSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *apps.ReplicaSet:
case *appsv1.ReplicaSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *extensions.Deployment:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *apps.Deployment:
case *appsv1.Deployment:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *extensions.DaemonSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *apps.DaemonSet:
case *appsv1.DaemonSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *batch.Job:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
@ -2278,7 +2278,7 @@ func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *apps.ReplicaSet:
case *appsv1.ReplicaSet:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
@ -2288,14 +2288,14 @@ func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *apps.Deployment:
case *appsv1.Deployment:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensions.DaemonSet:
return 0, nil
case *apps.DaemonSet:
case *appsv1.DaemonSet:
return 0, nil
case *batch.Job:
// TODO: currently we use pause pods so that's OK. When we'll want to switch to Pods
@ -2379,11 +2379,11 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
return nil
}
type updateDSFunc func(*apps.DaemonSet)
type updateDSFunc func(*appsv1.DaemonSet)
// UpdateDaemonSetWithRetries updates daemonsets with the given applyUpdate func
// until it succeeds or a timeout expires.
func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *apps.DaemonSet, err error) {
func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *appsv1.DaemonSet, err error) {
daemonsets := c.AppsV1().DaemonSets(namespace)
var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
@ -3432,8 +3432,8 @@ func DumpDebugInfo(c clientset.Interface, ns string) {
}
// DsFromManifest reads a .json/yaml file and returns the daemonset in it.
func DsFromManifest(url string) (*apps.DaemonSet, error) {
var controller apps.DaemonSet
func DsFromManifest(url string) (*appsv1.DaemonSet, error) {
var controller appsv1.DaemonSet
e2elog.Logf("Parsing ds from %v", url)
var response *http.Response

View File

@ -20,7 +20,7 @@ import (
"fmt"
"io/ioutil"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
rbac "k8s.io/api/rbac/v1"
@ -119,8 +119,8 @@ func IngressToManifest(ing *networkingv1beta1.Ingress, path string) error {
}
// StatefulSetFromManifest returns a StatefulSet from a manifest stored in fileName in the Namespace indicated by ns.
func StatefulSetFromManifest(fileName, ns string) (*apps.StatefulSet, error) {
var ss apps.StatefulSet
func StatefulSetFromManifest(fileName, ns string) (*appsv1.StatefulSet, error) {
var ss appsv1.StatefulSet
data, err := testfiles.Read(fileName)
if err != nil {
return nil, err
@ -143,8 +143,8 @@ func StatefulSetFromManifest(fileName, ns string) (*apps.StatefulSet, error) {
}
// DaemonSetFromManifest returns a DaemonSet from a manifest stored in fileName in the Namespace indicated by ns.
func DaemonSetFromManifest(fileName, ns string) (*apps.DaemonSet, error) {
var ds apps.DaemonSet
func DaemonSetFromManifest(fileName, ns string) (*appsv1.DaemonSet, error) {
var ds appsv1.DaemonSet
data, err := testfiles.Read(fileName)
if err != nil {
return nil, err

View File

@ -22,7 +22,7 @@ import (
"sync"
"time"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -73,7 +73,7 @@ type IngressScaleFramework struct {
NumIngressesTest []int
OutputFile string
ScaleTestDeploy *apps.Deployment
ScaleTestDeploy *appsv1.Deployment
ScaleTestSvcs []*v1.Service
ScaleTestIngs []*networkingv1beta1.Ingress
@ -438,12 +438,12 @@ func generateScaleTestServiceSpec(suffix string) *v1.Service {
}
}
func generateScaleTestBackendDeploymentSpec(numReplicas int32) *apps.Deployment {
return &apps.Deployment{
func generateScaleTestBackendDeploymentSpec(numReplicas int32) *appsv1.Deployment {
return &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: scaleTestBackendName,
},
Spec: apps.DeploymentSpec{
Spec: appsv1.DeploymentSpec{
Replicas: &numReplicas,
Selector: &metav1.LabelSelector{MatchLabels: scaleTestLabels},
Template: v1.PodTemplateSpec{

View File

@ -21,7 +21,7 @@ import (
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
@ -163,7 +163,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
})
})
func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *apps.Deployment) (v1.Pod, error) {
func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *appsv1.Deployment) (v1.Pod, error) {
var runningPod v1.Pod
waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
podList, err := e2edeploy.GetPodsForDeployment(client, deployment)

View File

@ -26,7 +26,7 @@ import (
"github.com/vmware/govmomi/object"
vimtypes "github.com/vmware/govmomi/vim25/types"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
@ -144,7 +144,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
})
// Wait until the pod failed over to a different node, or time out after 3 minutes
func waitForPodToFailover(client clientset.Interface, deployment *apps.Deployment, oldNode string) (string, error) {
func waitForPodToFailover(client clientset.Interface, deployment *appsv1.Deployment, oldNode string) (string, error) {
var (
err error
newNode string
@ -179,7 +179,7 @@ func waitForPodToFailover(client clientset.Interface, deployment *apps.Deploymen
}
// getNodeForDeployment returns node name for the Deployment
func getNodeForDeployment(client clientset.Interface, deployment *apps.Deployment) (string, error) {
func getNodeForDeployment(client clientset.Interface, deployment *appsv1.Deployment) (string, error) {
podList, err := e2edeploy.GetPodsForDeployment(client, deployment)
if err != nil {
return "", err

View File

@ -19,7 +19,7 @@ package upgrades
import (
"github.com/onsi/ginkgo"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -33,7 +33,7 @@ import (
// DaemonSetUpgradeTest tests that a DaemonSet is running before and after
// a cluster upgrade.
type DaemonSetUpgradeTest struct {
daemonSet *apps.DaemonSet
daemonSet *appsv1.DaemonSet
}
// Name returns the tracking name of the test.
@ -47,12 +47,12 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) {
ns := f.Namespace
t.daemonSet = &apps.DaemonSet{
t.daemonSet = &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.Name,
Name: daemonSetName,
},
Spec: apps.DaemonSetSpec{
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: labelSet,
},

View File

@ -19,7 +19,7 @@ package upgrades
import (
"fmt"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
@ -60,7 +60,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
rsClient := c.AppsV1().ReplicaSets(ns)
ginkgo.By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns))
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, apps.RollingUpdateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, appsv1.RollingUpdateDeploymentStrategyType)
deployment, err := deploymentClient.Create(d)
framework.ExpectNoError(err)
@ -83,7 +83,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
// Trigger a new rollout so that we have some history.
ginkgo.By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName))
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = "updated-name"
})
framework.ExpectNoError(err)
@ -159,7 +159,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
// Verify the upgraded deployment is active by scaling up the deployment by 1
ginkgo.By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
deploymentWithUpdatedReplicas, err := e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *apps.Deployment) {
deploymentWithUpdatedReplicas, err := e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *appsv1.Deployment) {
*deployment.Spec.Replicas = *deployment.Spec.Replicas + 1
})
framework.ExpectNoError(err)

View File

@ -20,7 +20,7 @@ import (
"fmt"
"time"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/test/e2e/framework"
@ -89,7 +89,7 @@ func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{
// Verify the upgraded RS is active by scaling up the RS to scaleNum and ensuring all pods are Ready
ginkgo.By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum))
_, err = replicaset.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *apps.ReplicaSet) {
_, err = replicaset.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *appsv1.ReplicaSet) {
*rs.Spec.Replicas = scaleNum
})
framework.ExpectNoError(err)

View File

@ -19,7 +19,7 @@ package upgrades
import (
"github.com/onsi/ginkgo"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/version"
@ -31,7 +31,7 @@ import (
type StatefulSetUpgradeTest struct {
tester *framework.StatefulSetTester
service *v1.Service
set *apps.StatefulSet
set *appsv1.StatefulSet
}
// Name returns the tracking name of the test.

View File

@ -20,7 +20,7 @@ import (
"fmt"
"time"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -221,7 +221,7 @@ func getKubeProxyStaticPods(c clientset.Interface) (*v1.PodList, error) {
return c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts)
}
func getKubeProxyDaemonSet(c clientset.Interface) (*apps.DaemonSetList, error) {
func getKubeProxyDaemonSet(c clientset.Interface) (*appsv1.DaemonSetList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{clusterAddonLabelKey: kubeProxyLabelName}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
return c.AppsV1().DaemonSets(metav1.NamespaceSystem).List(listOpts)