mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #46425 from wanghaoran1988/move_workload_e2e_to_own_package
Automatic merge from submit-queue Move the workload e2e tests to it's own package **What this PR does / why we need it**: Move the workload e2e tests to it's own package **Release note**: ``` None ```
This commit is contained in:
commit
49b83c6cb4
@ -49,6 +49,7 @@ package_group(
|
||||
packages = [
|
||||
"//test/e2e",
|
||||
"//test/e2e/framework",
|
||||
"//test/e2e/workload",
|
||||
"//test/integration/etcd",
|
||||
"//test/integration/framework",
|
||||
"//test/integration/kubectl",
|
||||
|
@ -28,6 +28,7 @@ go_test(
|
||||
"//test/e2e/perf:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
"//test/e2e/storage:go_default_library",
|
||||
"//test/e2e/workload:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
@ -50,12 +51,8 @@ go_library(
|
||||
"cadvisor.go",
|
||||
"certificates.go",
|
||||
"cluster_upgrade.go",
|
||||
"cronjob.go",
|
||||
"custom_resource_definition.go",
|
||||
"daemon_restart.go",
|
||||
"daemon_set.go",
|
||||
"dashboard.go",
|
||||
"deployment.go",
|
||||
"disruption.go",
|
||||
"dns.go",
|
||||
"dns_common.go",
|
||||
@ -74,7 +71,6 @@ go_library(
|
||||
"gke_node_pools.go",
|
||||
"ha_master.go",
|
||||
"ingress.go",
|
||||
"job.go",
|
||||
"kibana_logging.go",
|
||||
"kube_proxy.go",
|
||||
"kubectl.go",
|
||||
@ -97,9 +93,7 @@ go_library(
|
||||
"portforward.go",
|
||||
"pre_stop.go",
|
||||
"proxy.go",
|
||||
"rc.go",
|
||||
"reboot.go",
|
||||
"replica_set.go",
|
||||
"resize_nodes.go",
|
||||
"resource_quota.go",
|
||||
"restart.go",
|
||||
@ -110,7 +104,6 @@ go_library(
|
||||
"serviceloadbalancers.go",
|
||||
"ssh.go",
|
||||
"stackdriver_monitoring.go",
|
||||
"statefulset.go",
|
||||
"third-party.go",
|
||||
"ubernetes_lite.go",
|
||||
"util_iperf.go",
|
||||
@ -120,25 +113,17 @@ go_library(
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/apis/batch/v2alpha1:go_default_library",
|
||||
"//pkg/apis/networking:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/certificates/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/daemon:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/endpoint:go_default_library",
|
||||
"//pkg/controller/job:go_default_library",
|
||||
"//pkg/controller/node:go_default_library",
|
||||
"//pkg/controller/replicaset:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
@ -146,12 +131,10 @@ go_library(
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/metrics:go_default_library",
|
||||
"//pkg/quota/evaluator/core:go_default_library",
|
||||
"//pkg/util:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/logs:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//plugin/pkg/admission/serviceaccount:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
"//test/e2e/chaosmonkey:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
@ -161,11 +144,11 @@ go_library(
|
||||
"//test/e2e/perf:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
"//test/e2e/upgrades:go_default_library",
|
||||
"//test/e2e/workload:go_default_library",
|
||||
"//test/e2e_federation:go_default_library",
|
||||
"//test/images/net/nat:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/junit:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/elazarl/goproxy:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
@ -179,7 +162,6 @@ go_library(
|
||||
"//vendor/golang.org/x/net/websocket:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2/google:go_default_library",
|
||||
"//vendor/google.golang.org/api/monitoring/v3:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v2alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
@ -191,7 +173,6 @@ go_library(
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
@ -254,6 +235,7 @@ filegroup(
|
||||
"//test/e2e/storage:all-srcs",
|
||||
"//test/e2e/testing-manifests:all-srcs",
|
||||
"//test/e2e/upgrades:all-srcs",
|
||||
"//test/e2e/workload:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
@ -280,29 +280,6 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames
|
||||
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
|
||||
}
|
||||
|
||||
func createServiceSpec(serviceName, externalName string, isHeadless bool, selector map[string]string) *v1.Service {
|
||||
headlessService := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
if externalName != "" {
|
||||
headlessService.Spec.Type = v1.ServiceTypeExternalName
|
||||
headlessService.Spec.ExternalName = externalName
|
||||
} else {
|
||||
headlessService.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: "TCP"},
|
||||
}
|
||||
}
|
||||
if isHeadless {
|
||||
headlessService.Spec.ClusterIP = "None"
|
||||
}
|
||||
return headlessService
|
||||
}
|
||||
|
||||
func reverseArray(arr []string) []string {
|
||||
for i := 0; i < len(arr)/2; i++ {
|
||||
j := len(arr) - i - 1
|
||||
@ -346,7 +323,7 @@ var _ = framework.KubeDescribe("DNS", func() {
|
||||
testServiceSelector := map[string]string{
|
||||
"dns-test": "true",
|
||||
}
|
||||
headlessService := createServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
|
||||
headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
|
||||
_, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
@ -355,7 +332,7 @@ var _ = framework.KubeDescribe("DNS", func() {
|
||||
f.ClientSet.Core().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
|
||||
}()
|
||||
|
||||
regularService := createServiceSpec("test-service-2", "", false, testServiceSelector)
|
||||
regularService := framework.CreateServiceSpec("test-service-2", "", false, testServiceSelector)
|
||||
regularService, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(regularService)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
@ -396,7 +373,7 @@ var _ = framework.KubeDescribe("DNS", func() {
|
||||
}
|
||||
serviceName := "dns-test-service-2"
|
||||
podHostname := "dns-querier-2"
|
||||
headlessService := createServiceSpec(serviceName, "", true, testServiceSelector)
|
||||
headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector)
|
||||
_, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
@ -427,7 +404,7 @@ var _ = framework.KubeDescribe("DNS", func() {
|
||||
// Create a test ExternalName service.
|
||||
By("Creating a test externalName service")
|
||||
serviceName := "dns-test-service-3"
|
||||
externalNameService := createServiceSpec(serviceName, "foo.example.com", false, nil)
|
||||
externalNameService := framework.CreateServiceSpec(serviceName, "foo.example.com", false, nil)
|
||||
_, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(externalNameService)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
_ "k8s.io/kubernetes/test/e2e/perf"
|
||||
_ "k8s.io/kubernetes/test/e2e/scheduling"
|
||||
_ "k8s.io/kubernetes/test/e2e/storage"
|
||||
_ "k8s.io/kubernetes/test/e2e/workload"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/workload"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -73,7 +74,7 @@ func etcdFailTest(f *framework.Framework, failCommand, fixCommand string) {
|
||||
|
||||
checkExistingRCRecovers(f)
|
||||
|
||||
testReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage)
|
||||
workload.TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage)
|
||||
}
|
||||
|
||||
// For this duration, etcd will be failed by executing a failCommand on the master.
|
||||
|
@ -12,6 +12,7 @@ go_library(
|
||||
srcs = [
|
||||
"authorizer_util.go",
|
||||
"cleanup.go",
|
||||
"deployment_util.go",
|
||||
"exec_util.go",
|
||||
"federation_util.go",
|
||||
"firewall_util.go",
|
||||
@ -28,7 +29,9 @@ go_library(
|
||||
"perf_util.go",
|
||||
"pods.go",
|
||||
"pv_util.go",
|
||||
"rc_util.go",
|
||||
"resource_usage_gatherer.go",
|
||||
"rs_util.go",
|
||||
"service_util.go",
|
||||
"size.go",
|
||||
"statefulset_utils.go",
|
||||
|
314
test/e2e/framework/deployment_util.go
Normal file
314
test/e2e/framework/deployment_util.go
Normal file
@ -0,0 +1,314 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
type updateDeploymentFunc func(d *extensions.Deployment)
|
||||
|
||||
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDeploymentFunc) (*extensions.Deployment, error) {
|
||||
var deployment *extensions.Deployment
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
var err error
|
||||
if deployment, err = c.Extensions().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(deployment)
|
||||
if deployment, err = c.Extensions().Deployments(namespace).Update(deployment); err == nil {
|
||||
Logf("Updating deployment %s", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to deployment %q: %v", name, updateErr)
|
||||
}
|
||||
return deployment, pollErr
|
||||
}
|
||||
|
||||
// Waits for the deployment to clean up old rcs.
|
||||
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
|
||||
var oldRSs []*extensions.ReplicaSet
|
||||
var d *extensions.Deployment
|
||||
|
||||
pollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
d = deployment
|
||||
|
||||
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(oldRSs) == desiredRSNum, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
|
||||
logReplicaSetsOfDeployment(d, oldRSs, nil)
|
||||
}
|
||||
return pollErr
|
||||
}
|
||||
|
||||
func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {
|
||||
testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, Logf)
|
||||
}
|
||||
|
||||
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
|
||||
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) {
|
||||
return c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
}, desiredGeneration, Poll, 1*time.Minute)
|
||||
}
|
||||
|
||||
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType) error {
|
||||
var deployment *extensions.Deployment
|
||||
pollErr := wait.PollImmediate(time.Second, 5*time.Minute, func() (bool, error) {
|
||||
d, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
deployment = d
|
||||
cond := deploymentutil.GetDeploymentCondition(deployment.Status, condType)
|
||||
return cond != nil && cond.Reason == reason, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("deployment %q never updated with the desired condition and reason: %v", deployment.Name, deployment.Status.Conditions)
|
||||
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c)
|
||||
if err == nil {
|
||||
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
|
||||
logPodsOfDeployment(c, deployment, append(allOldRSs, newRS))
|
||||
}
|
||||
}
|
||||
return pollErr
|
||||
}
|
||||
|
||||
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
|
||||
// Note that deployment revision and its new RS revision should be updated shortly most of the time, but an overwhelmed RS controller
|
||||
// may result in taking longer to relabel a RS.
|
||||
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
|
||||
return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType extensions.DeploymentStrategyType) *extensions.Deployment {
|
||||
zero := int64(0)
|
||||
return &extensions.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: podLabels},
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
Type: strategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: imageName,
|
||||
Image: image,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Waits for the deployment status to become valid (i.e. max unavailable and max surge aren't violated anymore).
|
||||
// Note that the status should stay valid at all times unless shortly after a scaling event or the deployment is just created.
|
||||
// To verify that the deployment status is valid and wait for the rollout to finish, use WaitForDeploymentStatus instead.
|
||||
func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment) error {
|
||||
return testutils.WaitForDeploymentStatusValid(c, d, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// Waits for the deployment to reach desired state.
|
||||
// Returns an error if the deployment's rolling update strategy (max unavailable or max surge) is broken at any times.
|
||||
func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) error {
|
||||
var (
|
||||
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
|
||||
newRS *extensions.ReplicaSet
|
||||
deployment *extensions.Deployment
|
||||
)
|
||||
|
||||
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
|
||||
var err error
|
||||
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newRS == nil {
|
||||
// New RS hasn't been created yet.
|
||||
return false, nil
|
||||
}
|
||||
allRSs = append(oldRSs, newRS)
|
||||
// The old/new ReplicaSets need to contain the pod-template-hash label
|
||||
for i := range allRSs {
|
||||
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
||||
maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
|
||||
if totalCreated > maxCreated {
|
||||
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
|
||||
logPodsOfDeployment(c, deployment, allRSs)
|
||||
return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
|
||||
}
|
||||
minAvailable := deploymentutil.MinAvailable(deployment)
|
||||
if deployment.Status.AvailableReplicas < minAvailable {
|
||||
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
|
||||
logPodsOfDeployment(c, deployment, allRSs)
|
||||
return false, fmt.Errorf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable)
|
||||
}
|
||||
|
||||
// When the deployment status and its underlying resources reach the desired state, we're done
|
||||
return deploymentutil.DeploymentComplete(deployment, &deployment.Status), nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
|
||||
logPodsOfDeployment(c, deployment, allRSs)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
|
||||
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
|
||||
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if deployment.Status.ObservedGeneration >= desiredGeneration && deployment.Status.UpdatedReplicas >= minUpdatedReplicas {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for deployment %s to have at least %d updpatedReplicas: %v", deploymentName, minUpdatedReplicas, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
|
||||
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
|
||||
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
|
||||
err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Rollback not set or is kicked off
|
||||
if deployment.Spec.RollbackTo == nil {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for deployment %s rollbackTo to be cleared: %v", deploymentName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
|
||||
// old pods.
|
||||
func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) error {
|
||||
if d.Spec.Strategy.Type != extensions.RecreateDeploymentStrategyType {
|
||||
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
|
||||
}
|
||||
|
||||
w, err := c.Extensions().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
status := d.Status
|
||||
|
||||
condition := func(event watch.Event) (bool, error) {
|
||||
d := event.Object.(*extensions.Deployment)
|
||||
status = d.Status
|
||||
|
||||
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c)
|
||||
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c)
|
||||
if err == nil && nerr == nil {
|
||||
Logf("%+v", d)
|
||||
logReplicaSetsOfDeployment(d, allOldRSs, newRS)
|
||||
logPodsOfDeployment(c, d, append(allOldRSs, newRS))
|
||||
}
|
||||
return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status)
|
||||
}
|
||||
|
||||
return *(d.Spec.Replicas) == d.Status.Replicas &&
|
||||
*(d.Spec.Replicas) == d.Status.UpdatedReplicas &&
|
||||
d.Generation <= d.Status.ObservedGeneration, nil
|
||||
}
|
||||
|
||||
_, err = watch.Until(2*time.Minute, w, condition)
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, internalClientset, ns, name, size, wait, extensionsinternal.Kind("Deployment"))
|
||||
}
|
||||
|
||||
func RunDeployment(config testutils.DeploymentConfig) error {
|
||||
By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
|
||||
config.NodeDumpFunc = DumpNodeDebugInfo
|
||||
config.ContainerDumpFunc = LogFailedContainers
|
||||
return testutils.RunDeployment(config)
|
||||
}
|
||||
|
||||
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) {
|
||||
testutils.LogPodsOfDeployment(c, deployment, rsList, Logf)
|
||||
}
|
@ -229,3 +229,27 @@ func newBool(val bool) *bool {
|
||||
*p = val
|
||||
return p
|
||||
}
|
||||
|
||||
type updateJobFunc func(*batch.Job)
|
||||
|
||||
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) {
|
||||
jobs := c.Batch().Jobs(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(job)
|
||||
if job, err = jobs.Update(job); err == nil {
|
||||
Logf("Updating job %s", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to job %q: %v", name, updateErr)
|
||||
}
|
||||
return job, pollErr
|
||||
}
|
||||
|
286
test/e2e/framework/rc_util.go
Normal file
286
test/e2e/framework/rc_util.go
Normal file
@ -0,0 +1,286 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
// RcByNamePort returns a ReplicationController with specified name and port
|
||||
func RcByNamePort(name string, replicas int32, image string, port int, protocol v1.Protocol,
|
||||
labels map[string]string, gracePeriod *int64) *v1.ReplicationController {
|
||||
|
||||
return RcByNameContainer(name, replicas, image, labels, v1.Container{
|
||||
Name: name,
|
||||
Image: image,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: int32(port), Protocol: protocol}},
|
||||
}, gracePeriod)
|
||||
}
|
||||
|
||||
// RcByNameContainer returns a ReplicationControoler with specified name and container
|
||||
func RcByNameContainer(name string, replicas int32, image string, labels map[string]string, c v1.Container,
|
||||
gracePeriod *int64) *v1.ReplicationController {
|
||||
|
||||
zeroGracePeriod := int64(0)
|
||||
|
||||
// Add "name": name to the labels, overwriting if it exists.
|
||||
labels["name"] = name
|
||||
if gracePeriod == nil {
|
||||
gracePeriod = &zeroGracePeriod
|
||||
}
|
||||
return &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicationController",
|
||||
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
Selector: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{c},
|
||||
TerminationGracePeriodSeconds: gracePeriod,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
|
||||
// none are running, otherwise it does what a synchronous scale operation would do.
|
||||
func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, ns string, l map[string]string, replicas uint) error {
|
||||
listOpts := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()}
|
||||
rcs, err := clientset.Core().ReplicationControllers(ns).List(listOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(rcs.Items) == 0 {
|
||||
return fmt.Errorf("RC with labels %v not found in ns %v", l, ns)
|
||||
}
|
||||
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
|
||||
for _, labelRC := range rcs.Items {
|
||||
name := labelRC.Name
|
||||
if err := ScaleRC(clientset, internalClientset, ns, name, replicas, false); err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := clientset.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if replicas == 0 {
|
||||
ps, err := podStoreForSelector(clientset, rc.Namespace, labels.SelectorFromSet(rc.Spec.Selector))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ps.Stop()
|
||||
if err = waitForPodsGone(ps, 10*time.Second, 10*time.Minute); err != nil {
|
||||
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
|
||||
}
|
||||
} else {
|
||||
if err := testutils.WaitForPodsWithLabelRunning(
|
||||
clientset, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type updateRcFunc func(d *v1.ReplicationController)
|
||||
|
||||
func UpdateReplicationControllerWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateRcFunc) (*v1.ReplicationController, error) {
|
||||
var rc *v1.ReplicationController
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
var err error
|
||||
if rc, err = c.Core().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(rc)
|
||||
if rc, err = c.Core().ReplicationControllers(namespace).Update(rc); err == nil {
|
||||
Logf("Updating replication controller %q", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to rc %q: %v", name, updateErr)
|
||||
}
|
||||
return rc, pollErr
|
||||
}
|
||||
|
||||
// DeleteRCAndWaitForGC deletes only the Replication Controller and waits for GC to delete the pods.
|
||||
func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error {
|
||||
return DeleteResourceAndWaitForGC(c, api.Kind("ReplicationController"), ns, name)
|
||||
}
|
||||
|
||||
func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
|
||||
return DeleteResourceAndPods(clientset, internalClientset, api.Kind("ReplicationController"), ns, name)
|
||||
}
|
||||
|
||||
func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, internalClientset, ns, name, size, wait, api.Kind("ReplicationController"))
|
||||
}
|
||||
|
||||
func RunRC(config testutils.RCConfig) error {
|
||||
By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
|
||||
config.NodeDumpFunc = DumpNodeDebugInfo
|
||||
config.ContainerDumpFunc = LogFailedContainers
|
||||
return testutils.RunRC(config)
|
||||
}
|
||||
|
||||
// WaitForRCPodToDisappear returns nil if the pod from the given replication controller (described by rcName) no longer exists.
|
||||
// In case of failure or too long waiting time, an error is returned.
|
||||
func WaitForRCPodToDisappear(c clientset.Interface, ns, rcName, podName string) error {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
|
||||
// NodeController evicts pod after 5 minutes, so we need timeout greater than that to observe effects.
|
||||
// The grace period must be set to 0 on the pod for it to be deleted during the partition.
|
||||
// Otherwise, it goes to the 'Terminating' state till the kubelet confirms deletion.
|
||||
return WaitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute)
|
||||
}
|
||||
|
||||
// WaitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false)
|
||||
func WaitForReplicationController(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
_, err := c.Core().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
|
||||
return !exist, nil
|
||||
} else {
|
||||
Logf("ReplicationController %s in namespace %s found.", name, namespace)
|
||||
return exist, nil
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
|
||||
return fmt.Errorf("error waiting for ReplicationController %s/%s %s: %v", namespace, name, stateMsg[exist], err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForReplicationControllerwithSelector waits until any RC with given selector appears (exist == true), or disappears (exist == false)
|
||||
func WaitForReplicationControllerwithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval,
|
||||
timeout time.Duration) error {
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
rcs, err := c.Core().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
switch {
|
||||
case len(rcs.Items) != 0:
|
||||
Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace)
|
||||
return exist, nil
|
||||
case len(rcs.Items) == 0:
|
||||
Logf("ReplicationController with %s in namespace %s disappeared.", selector.String(), namespace)
|
||||
return !exist, nil
|
||||
default:
|
||||
Logf("List ReplicationController with %s in namespace %s failed: %v", selector.String(), namespace, err)
|
||||
return false, nil
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
|
||||
return fmt.Errorf("error waiting for ReplicationControllers with %s in namespace %s %s: %v", selector.String(), namespace, stateMsg[exist], err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validatorFn is the function which is individual tests will implement.
|
||||
// we may want it to return more than just an error, at some point.
|
||||
type validatorFn func(c clientset.Interface, podID string) error
|
||||
|
||||
// ValidateController is a generic mechanism for testing RC's that are running.
|
||||
// It takes a container name, a test name, and a validator function which is plugged in by a specific test.
|
||||
// "containername": this is grepped for.
|
||||
// "containerImage" : this is the name of the image we expect to be launched. Not to confuse w/ images (kitten.jpg) which are validated.
|
||||
// "testname": which gets bubbled up to the logging/failure messages if errors happen.
|
||||
// "validator" function: This function is given a podID and a client, and it can do some specific validations that way.
|
||||
func ValidateController(c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
|
||||
getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
|
||||
// NB: kubectl adds the "exists" function to the standard template functions.
|
||||
// This lets us check to see if the "running" entry exists for each of the containers
|
||||
// we care about. Exists will never return an error and it's safe to check a chain of
|
||||
// things, any one of which may not exist. In the below template, all of info,
|
||||
// containername, and running might be nil, so the normal index function isn't very
|
||||
// helpful.
|
||||
// This template is unit-tested in kubectl, so if you change it, update the unit test.
|
||||
// You can read about the syntax here: http://golang.org/pkg/text/template/.
|
||||
getContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "%s") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`, containername)
|
||||
|
||||
getImageTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername)
|
||||
|
||||
By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
|
||||
waitLoop:
|
||||
for start := time.Now(); time.Since(start) < PodStartTimeout; time.Sleep(5 * time.Second) {
|
||||
getPodsOutput := RunKubectlOrDie("get", "pods", "-o", "template", getPodsTemplate, "-l", testname, fmt.Sprintf("--namespace=%v", ns))
|
||||
pods := strings.Fields(getPodsOutput)
|
||||
if numPods := len(pods); numPods != replicas {
|
||||
By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods))
|
||||
continue
|
||||
}
|
||||
var runningPods []string
|
||||
for _, podID := range pods {
|
||||
running := RunKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, fmt.Sprintf("--namespace=%v", ns))
|
||||
if running != "true" {
|
||||
Logf("%s is created but not running", podID)
|
||||
continue waitLoop
|
||||
}
|
||||
|
||||
currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns))
|
||||
if currentImage != containerImage {
|
||||
Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
|
||||
continue waitLoop
|
||||
}
|
||||
|
||||
// Call the generic validator function here.
|
||||
// This might validate for example, that (1) getting a url works and (2) url is serving correct content.
|
||||
if err := validator(c, podID); err != nil {
|
||||
Logf("%s is running right image but validator function failed: %v", podID, err)
|
||||
continue waitLoop
|
||||
}
|
||||
|
||||
Logf("%s is verified up and running", podID)
|
||||
runningPods = append(runningPods, podID)
|
||||
}
|
||||
// If we reach here, then all our checks passed.
|
||||
if len(runningPods) == replicas {
|
||||
return
|
||||
}
|
||||
}
|
||||
// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.
|
||||
Failf("Timed out after %v seconds waiting for %s pods to reach valid state", PodStartTimeout.Seconds(), testname)
|
||||
}
|
150
test/e2e/framework/rs_util.go
Normal file
150
test/e2e/framework/rs_util.go
Normal file
@ -0,0 +1,150 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
type updateRsFunc func(d *extensions.ReplicaSet)
|
||||
|
||||
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateRsFunc) (*extensions.ReplicaSet, error) {
|
||||
var rs *extensions.ReplicaSet
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
var err error
|
||||
if rs, err = c.Extensions().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(rs)
|
||||
if rs, err = c.Extensions().ReplicaSets(namespace).Update(rs); err == nil {
|
||||
Logf("Updating replica set %q", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to replicaset %q: %v", name, updateErr)
|
||||
}
|
||||
return rs, pollErr
|
||||
}
|
||||
|
||||
// CheckNewRSAnnotations check if the new RS's annotation is as expected
|
||||
func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range expectedAnnotations {
|
||||
// Skip checking revision annotations
|
||||
if k != deploymentutil.RevisionAnnotation && v != newRS.Annotations[k] {
|
||||
return fmt.Errorf("Expected new RS annotations = %+v, got %+v", expectedAnnotations, newRS.Annotations)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete a ReplicaSet and all pods it spawned
|
||||
func DeleteReplicaSet(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
|
||||
By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns))
|
||||
rc, err := clientset.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
Logf("ReplicaSet %s was already deleted: %v", name, err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("ReplicaSet"), internalClientset)
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
Logf("ReplicaSet %s was already deleted: %v", name, err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
startTime := time.Now()
|
||||
err = reaper.Stop(ns, name, 0, nil)
|
||||
if apierrs.IsNotFound(err) {
|
||||
Logf("ReplicaSet %s was already deleted: %v", name, err)
|
||||
return nil
|
||||
}
|
||||
deleteRSTime := time.Now().Sub(startTime)
|
||||
Logf("Deleting RS %s took: %v", name, deleteRSTime)
|
||||
if err == nil {
|
||||
err = waitForReplicaSetPodsGone(clientset, rc)
|
||||
}
|
||||
terminatePodTime := time.Now().Sub(startTime) - deleteRSTime
|
||||
Logf("Terminating ReplicaSet %s pods took: %v", name, terminatePodTime)
|
||||
return err
|
||||
}
|
||||
|
||||
// waitForReplicaSetPodsGone waits until there are no pods reported under a
|
||||
// ReplicaSet selector (because the pods have completed termination).
|
||||
func waitForReplicaSetPodsGone(c clientset.Interface, rs *extensions.ReplicaSet) error {
|
||||
return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
ExpectNoError(err)
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
if pods, err := c.Core().Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForReadyReplicaSet waits until the replica set has all of its replicas ready.
|
||||
func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
|
||||
err := wait.Poll(Poll, pollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return *(rs.Spec.Replicas) == rs.Status.Replicas && *(rs.Spec.Replicas) == rs.Status.ReadyReplicas, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("replica set %q never became ready", name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func RunReplicaSet(config testutils.ReplicaSetConfig) error {
|
||||
By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
|
||||
config.NodeDumpFunc = DumpNodeDebugInfo
|
||||
config.ContainerDumpFunc = LogFailedContainers
|
||||
return testutils.RunReplicaSet(config)
|
||||
}
|
@ -1326,3 +1326,26 @@ func DescribeSvc(ns string) {
|
||||
"describe", "svc", fmt.Sprintf("--namespace=%v", ns))
|
||||
Logf(desc)
|
||||
}
|
||||
|
||||
func CreateServiceSpec(serviceName, externalName string, isHeadless bool, selector map[string]string) *v1.Service {
|
||||
headlessService := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
if externalName != "" {
|
||||
headlessService.Spec.Type = v1.ServiceTypeExternalName
|
||||
headlessService.Spec.ExternalName = externalName
|
||||
} else {
|
||||
headlessService.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: "TCP"},
|
||||
}
|
||||
}
|
||||
if isHeadless {
|
||||
headlessService.Spec.ClusterIP = "None"
|
||||
}
|
||||
return headlessService
|
||||
}
|
||||
|
@ -808,3 +808,27 @@ func (sp statefulPodsByOrdinal) Swap(i, j int) {
|
||||
func (sp statefulPodsByOrdinal) Less(i, j int) bool {
|
||||
return getStatefulPodOrdinal(&sp[i]) < getStatefulPodOrdinal(&sp[j])
|
||||
}
|
||||
|
||||
type updateStatefulSetFunc func(*apps.StatefulSet)
|
||||
|
||||
func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *apps.StatefulSet, err error) {
|
||||
statefulSets := c.Apps().StatefulSets(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
if statefulSet, err = statefulSets.Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(statefulSet)
|
||||
if statefulSet, err = statefulSets.Update(statefulSet); err == nil {
|
||||
Logf("Updating stateful set %s", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to stateful set %q: %v", name, updateErr)
|
||||
}
|
||||
return statefulSet, pollErr
|
||||
}
|
||||
|
@ -51,6 +51,9 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
gomegatypes "github.com/onsi/gomega/types"
|
||||
|
||||
batch "k8s.io/api/batch/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
@ -63,17 +66,12 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/dynamic"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
batch "k8s.io/api/batch/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
|
||||
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
@ -86,14 +84,12 @@ import (
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
nodectlr "k8s.io/kubernetes/pkg/controller/node"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
sshutil "k8s.io/kubernetes/pkg/ssh"
|
||||
uexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||
@ -1450,16 +1446,6 @@ func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labe
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForRCPodToDisappear returns nil if the pod from the given replication controller (described by rcName) no longer exists.
|
||||
// In case of failure or too long waiting time, an error is returned.
|
||||
func WaitForRCPodToDisappear(c clientset.Interface, ns, rcName, podName string) error {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
|
||||
// NodeController evicts pod after 5 minutes, so we need timeout greater than that to observe effects.
|
||||
// The grace period must be set to 0 on the pod for it to be deleted during the partition.
|
||||
// Otherwise, it goes to the 'Terminating' state till the kubelet confirms deletion.
|
||||
return WaitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute)
|
||||
}
|
||||
|
||||
// WaitForService waits until the service appears (exist == true), or disappears (exist == false)
|
||||
func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
@ -1533,49 +1519,6 @@ func countEndpointsNum(e *v1.Endpoints) int {
|
||||
return num
|
||||
}
|
||||
|
||||
// WaitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false)
|
||||
func WaitForReplicationController(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
_, err := c.Core().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
|
||||
return !exist, nil
|
||||
} else {
|
||||
Logf("ReplicationController %s in namespace %s found.", name, namespace)
|
||||
return exist, nil
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
|
||||
return fmt.Errorf("error waiting for ReplicationController %s/%s %s: %v", namespace, name, stateMsg[exist], err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForReplicationControllerwithSelector waits until any RC with given selector appears (exist == true), or disappears (exist == false)
|
||||
func WaitForReplicationControllerwithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval,
|
||||
timeout time.Duration) error {
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
rcs, err := c.Core().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
switch {
|
||||
case len(rcs.Items) != 0:
|
||||
Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace)
|
||||
return exist, nil
|
||||
case len(rcs.Items) == 0:
|
||||
Logf("ReplicationController with %s in namespace %s disappeared.", selector.String(), namespace)
|
||||
return !exist, nil
|
||||
default:
|
||||
Logf("List ReplicationController with %s in namespace %s failed: %v", selector.String(), namespace, err)
|
||||
return false, nil
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
|
||||
return fmt.Errorf("error waiting for ReplicationControllers with %s in namespace %s %s: %v", selector.String(), namespace, stateMsg[exist], err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WaitForEndpoint(c clientset.Interface, ns, name string) error {
|
||||
for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) {
|
||||
endpoint, err := c.Core().Endpoints(ns).Get(name, metav1.GetOptions{})
|
||||
@ -1976,72 +1919,6 @@ func AssertCleanup(ns string, selectors ...string) {
|
||||
}
|
||||
}
|
||||
|
||||
// validatorFn is the function which is individual tests will implement.
|
||||
// we may want it to return more than just an error, at some point.
|
||||
type validatorFn func(c clientset.Interface, podID string) error
|
||||
|
||||
// ValidateController is a generic mechanism for testing RC's that are running.
|
||||
// It takes a container name, a test name, and a validator function which is plugged in by a specific test.
|
||||
// "containername": this is grepped for.
|
||||
// "containerImage" : this is the name of the image we expect to be launched. Not to confuse w/ images (kitten.jpg) which are validated.
|
||||
// "testname": which gets bubbled up to the logging/failure messages if errors happen.
|
||||
// "validator" function: This function is given a podID and a client, and it can do some specific validations that way.
|
||||
func ValidateController(c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
|
||||
getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
|
||||
// NB: kubectl adds the "exists" function to the standard template functions.
|
||||
// This lets us check to see if the "running" entry exists for each of the containers
|
||||
// we care about. Exists will never return an error and it's safe to check a chain of
|
||||
// things, any one of which may not exist. In the below template, all of info,
|
||||
// containername, and running might be nil, so the normal index function isn't very
|
||||
// helpful.
|
||||
// This template is unit-tested in kubectl, so if you change it, update the unit test.
|
||||
// You can read about the syntax here: http://golang.org/pkg/text/template/.
|
||||
getContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "%s") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`, containername)
|
||||
|
||||
getImageTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername)
|
||||
|
||||
By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
|
||||
waitLoop:
|
||||
for start := time.Now(); time.Since(start) < PodStartTimeout; time.Sleep(5 * time.Second) {
|
||||
getPodsOutput := RunKubectlOrDie("get", "pods", "-o", "template", getPodsTemplate, "-l", testname, fmt.Sprintf("--namespace=%v", ns))
|
||||
pods := strings.Fields(getPodsOutput)
|
||||
if numPods := len(pods); numPods != replicas {
|
||||
By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods))
|
||||
continue
|
||||
}
|
||||
var runningPods []string
|
||||
for _, podID := range pods {
|
||||
running := RunKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, fmt.Sprintf("--namespace=%v", ns))
|
||||
if running != "true" {
|
||||
Logf("%s is created but not running", podID)
|
||||
continue waitLoop
|
||||
}
|
||||
|
||||
currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns))
|
||||
if currentImage != containerImage {
|
||||
Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
|
||||
continue waitLoop
|
||||
}
|
||||
|
||||
// Call the generic validator function here.
|
||||
// This might validate for example, that (1) getting a url works and (2) url is serving correct content.
|
||||
if err := validator(c, podID); err != nil {
|
||||
Logf("%s is running right image but validator function failed: %v", podID, err)
|
||||
continue waitLoop
|
||||
}
|
||||
|
||||
Logf("%s is verified up and running", podID)
|
||||
runningPods = append(runningPods, podID)
|
||||
}
|
||||
// If we reach here, then all our checks passed.
|
||||
if len(runningPods) == replicas {
|
||||
return
|
||||
}
|
||||
}
|
||||
// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.
|
||||
Failf("Timed out after %v seconds waiting for %s pods to reach valid state", PodStartTimeout.Seconds(), testname)
|
||||
}
|
||||
|
||||
// KubectlCmd runs the kubectl executable through the wrapper script.
|
||||
func KubectlCmd(args ...string) *exec.Cmd {
|
||||
defaultArgs := []string{}
|
||||
@ -2290,27 +2167,6 @@ func (f *Framework) MatchContainerOutput(
|
||||
return nil
|
||||
}
|
||||
|
||||
func RunDeployment(config testutil.DeploymentConfig) error {
|
||||
By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
|
||||
config.NodeDumpFunc = DumpNodeDebugInfo
|
||||
config.ContainerDumpFunc = LogFailedContainers
|
||||
return testutil.RunDeployment(config)
|
||||
}
|
||||
|
||||
func RunReplicaSet(config testutil.ReplicaSetConfig) error {
|
||||
By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
|
||||
config.NodeDumpFunc = DumpNodeDebugInfo
|
||||
config.ContainerDumpFunc = LogFailedContainers
|
||||
return testutil.RunReplicaSet(config)
|
||||
}
|
||||
|
||||
func RunRC(config testutil.RCConfig) error {
|
||||
By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
|
||||
config.NodeDumpFunc = DumpNodeDebugInfo
|
||||
config.ContainerDumpFunc = LogFailedContainers
|
||||
return testutil.RunRC(config)
|
||||
}
|
||||
|
||||
type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error)
|
||||
|
||||
func DumpEventsInNamespace(eventsLister EventsLister, namespace string) {
|
||||
@ -2736,14 +2592,6 @@ func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind s
|
||||
return nil
|
||||
}
|
||||
|
||||
func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, internalClientset, ns, name, size, wait, api.Kind("ReplicationController"))
|
||||
}
|
||||
|
||||
func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, internalClientset, ns, name, size, wait, extensionsinternal.Kind("Deployment"))
|
||||
}
|
||||
|
||||
// Returns true if all the specified pods are scheduled, else returns false.
|
||||
func podsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (bool, error) {
|
||||
PodStore := testutil.NewPodStore(c, ns, label, fields.Everything())
|
||||
@ -2959,10 +2807,6 @@ func DeleteResourceAndPods(clientset clientset.Interface, internalClientset inte
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
|
||||
return DeleteResourceAndPods(clientset, internalClientset, api.Kind("ReplicationController"), ns, name)
|
||||
}
|
||||
|
||||
// DeleteResourceAndWaitForGC deletes only given resource and waits for GC to delete the pods.
|
||||
func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns, name string) error {
|
||||
By(fmt.Sprintf("deleting %v %s in namespace %s, will wait for the garbage collector to delete the pods", kind, name, ns))
|
||||
@ -3035,11 +2879,6 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteRCAndWaitForGC deletes only the Replication Controller and waits for GC to delete the pods.
|
||||
func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error {
|
||||
return DeleteResourceAndWaitForGC(c, api.Kind("ReplicationController"), ns, name)
|
||||
}
|
||||
|
||||
// podStoreForSelector creates a PodStore that monitors pods from given namespace matching given selector.
|
||||
// It waits until the reflector does a List() before returning.
|
||||
func podStoreForSelector(c clientset.Interface, ns string, selector labels.Selector) (*testutil.PodStore, error) {
|
||||
@ -3079,254 +2918,6 @@ func waitForPodsGone(ps *testutil.PodStore, interval, timeout time.Duration) err
|
||||
})
|
||||
}
|
||||
|
||||
// Delete a ReplicaSet and all pods it spawned
|
||||
func DeleteReplicaSet(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
|
||||
By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns))
|
||||
rc, err := clientset.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
Logf("ReplicaSet %s was already deleted: %v", name, err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("ReplicaSet"), internalClientset)
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
Logf("ReplicaSet %s was already deleted: %v", name, err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
startTime := time.Now()
|
||||
err = reaper.Stop(ns, name, 0, nil)
|
||||
if apierrs.IsNotFound(err) {
|
||||
Logf("ReplicaSet %s was already deleted: %v", name, err)
|
||||
return nil
|
||||
}
|
||||
deleteRSTime := time.Now().Sub(startTime)
|
||||
Logf("Deleting RS %s took: %v", name, deleteRSTime)
|
||||
if err == nil {
|
||||
err = waitForReplicaSetPodsGone(clientset, rc)
|
||||
}
|
||||
terminatePodTime := time.Now().Sub(startTime) - deleteRSTime
|
||||
Logf("Terminating ReplicaSet %s pods took: %v", name, terminatePodTime)
|
||||
return err
|
||||
}
|
||||
|
||||
// waitForReplicaSetPodsGone waits until there are no pods reported under a
|
||||
// ReplicaSet selector (because the pods have completed termination).
|
||||
func waitForReplicaSetPodsGone(c clientset.Interface, rs *extensions.ReplicaSet) error {
|
||||
return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
ExpectNoError(err)
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
if pods, err := c.Core().Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForReadyReplicaSet waits until the replica set has all of its replicas ready.
|
||||
func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
|
||||
err := wait.Poll(Poll, pollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return *(rs.Spec.Replicas) == rs.Status.Replicas && *(rs.Spec.Replicas) == rs.Status.ReadyReplicas, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("replica set %q never became ready", name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType extensions.DeploymentStrategyType) *extensions.Deployment {
|
||||
zero := int64(0)
|
||||
return &extensions.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: podLabels},
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
Type: strategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: imageName,
|
||||
Image: image,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Waits for the deployment status to become valid (i.e. max unavailable and max surge aren't violated anymore).
|
||||
// Note that the status should stay valid at all times unless shortly after a scaling event or the deployment is just created.
|
||||
// To verify that the deployment status is valid and wait for the rollout to finish, use WaitForDeploymentStatus instead.
|
||||
func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment) error {
|
||||
return testutil.WaitForDeploymentStatusValid(c, d, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// Waits for the deployment to reach desired state.
|
||||
// Returns an error if the deployment's rolling update strategy (max unavailable or max surge) is broken at any times.
|
||||
func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) error {
|
||||
var (
|
||||
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
|
||||
newRS *extensions.ReplicaSet
|
||||
deployment *extensions.Deployment
|
||||
)
|
||||
|
||||
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
|
||||
var err error
|
||||
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newRS == nil {
|
||||
// New RS hasn't been created yet.
|
||||
return false, nil
|
||||
}
|
||||
allRSs = append(oldRSs, newRS)
|
||||
// The old/new ReplicaSets need to contain the pod-template-hash label
|
||||
for i := range allRSs {
|
||||
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
||||
maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
|
||||
if totalCreated > maxCreated {
|
||||
return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
|
||||
}
|
||||
minAvailable := deploymentutil.MinAvailable(deployment)
|
||||
if deployment.Status.AvailableReplicas < minAvailable {
|
||||
return false, fmt.Errorf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable)
|
||||
}
|
||||
|
||||
// When the deployment status and its underlying resources reach the desired state, we're done
|
||||
return deploymentutil.DeploymentComplete(deployment, &deployment.Status), nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
|
||||
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
|
||||
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if deployment.Status.ObservedGeneration >= desiredGeneration && deployment.Status.UpdatedReplicas >= minUpdatedReplicas {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for deployment %s to have at least %d updpatedReplicas: %v", deploymentName, minUpdatedReplicas, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
|
||||
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
|
||||
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
|
||||
err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Rollback not set or is kicked off
|
||||
if deployment.Spec.RollbackTo == nil {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for deployment %s rollbackTo to be cleared: %v", deploymentName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
|
||||
// old pods.
|
||||
func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) error {
|
||||
if d.Spec.Strategy.Type != extensions.RecreateDeploymentStrategyType {
|
||||
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
|
||||
}
|
||||
|
||||
w, err := c.Extensions().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
status := d.Status
|
||||
|
||||
condition := func(event watch.Event) (bool, error) {
|
||||
d := event.Object.(*extensions.Deployment)
|
||||
status = d.Status
|
||||
|
||||
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
|
||||
return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status)
|
||||
}
|
||||
|
||||
return *(d.Spec.Replicas) == d.Status.Replicas &&
|
||||
*(d.Spec.Replicas) == d.Status.UpdatedReplicas &&
|
||||
d.Generation <= d.Status.ObservedGeneration, nil
|
||||
}
|
||||
|
||||
_, err = watch.Until(2*time.Minute, w, condition)
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
|
||||
// Note that deployment revision and its new RS revision should be updated shortly most of the time, but an overwhelmed RS controller
|
||||
// may result in taking longer to relabel a RS.
|
||||
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
|
||||
return testutil.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// CheckNewRSAnnotations check if the new RS's annotation is as expected
|
||||
func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range expectedAnnotations {
|
||||
// Skip checking revision annotations
|
||||
if k != deploymentutil.RevisionAnnotation && v != newRS.Annotations[k] {
|
||||
return fmt.Errorf("Expected new RS annotations = %+v, got %+v", expectedAnnotations, newRS.Annotations)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
@ -3344,53 +2935,6 @@ func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds in
|
||||
})
|
||||
}
|
||||
|
||||
// Waits for the deployment to clean up old rcs.
|
||||
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
|
||||
var oldRSs []*extensions.ReplicaSet
|
||||
var d *extensions.Deployment
|
||||
|
||||
pollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
d = deployment
|
||||
|
||||
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(oldRSs) == desiredRSNum, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
|
||||
}
|
||||
return pollErr
|
||||
}
|
||||
|
||||
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
|
||||
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) {
|
||||
return c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
}, desiredGeneration, Poll, 1*time.Minute)
|
||||
}
|
||||
|
||||
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType) error {
|
||||
var deployment *extensions.Deployment
|
||||
pollErr := wait.PollImmediate(time.Second, 5*time.Minute, func() (bool, error) {
|
||||
d, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
deployment = d
|
||||
cond := deploymentutil.GetDeploymentCondition(deployment.Status, condType)
|
||||
return cond != nil && cond.Reason == reason, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("deployment %q never updated with the desired condition and reason: %v", deployment.Name, deployment.Status.Conditions)
|
||||
}
|
||||
return pollErr
|
||||
}
|
||||
|
||||
// Waits for the number of events on the given object to reach a desired count.
|
||||
func WaitForEvents(c clientset.Interface, ns string, objOrRef runtime.Object, desiredEventsCount int) error {
|
||||
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
|
||||
@ -3425,129 +2969,6 @@ func WaitForPartialEvents(c clientset.Interface, ns string, objOrRef runtime.Obj
|
||||
})
|
||||
}
|
||||
|
||||
type updateDeploymentFunc func(d *extensions.Deployment)
|
||||
|
||||
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDeploymentFunc) (*extensions.Deployment, error) {
|
||||
var deployment *extensions.Deployment
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
var err error
|
||||
if deployment, err = c.Extensions().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(deployment)
|
||||
if deployment, err = c.Extensions().Deployments(namespace).Update(deployment); err == nil {
|
||||
Logf("Updating deployment %s", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to deployment %q: %v", name, updateErr)
|
||||
}
|
||||
return deployment, pollErr
|
||||
}
|
||||
|
||||
type updateRsFunc func(d *extensions.ReplicaSet)
|
||||
|
||||
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateRsFunc) (*extensions.ReplicaSet, error) {
|
||||
var rs *extensions.ReplicaSet
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
var err error
|
||||
if rs, err = c.Extensions().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(rs)
|
||||
if rs, err = c.Extensions().ReplicaSets(namespace).Update(rs); err == nil {
|
||||
Logf("Updating replica set %q", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to replicaset %q: %v", name, updateErr)
|
||||
}
|
||||
return rs, pollErr
|
||||
}
|
||||
|
||||
type updateRcFunc func(d *v1.ReplicationController)
|
||||
|
||||
func UpdateReplicationControllerWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateRcFunc) (*v1.ReplicationController, error) {
|
||||
var rc *v1.ReplicationController
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
var err error
|
||||
if rc, err = c.Core().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(rc)
|
||||
if rc, err = c.Core().ReplicationControllers(namespace).Update(rc); err == nil {
|
||||
Logf("Updating replication controller %q", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to rc %q: %v", name, updateErr)
|
||||
}
|
||||
return rc, pollErr
|
||||
}
|
||||
|
||||
type updateStatefulSetFunc func(*apps.StatefulSet)
|
||||
|
||||
func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *apps.StatefulSet, err error) {
|
||||
statefulSets := c.Apps().StatefulSets(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
if statefulSet, err = statefulSets.Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(statefulSet)
|
||||
if statefulSet, err = statefulSets.Update(statefulSet); err == nil {
|
||||
Logf("Updating stateful set %s", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to stateful set %q: %v", name, updateErr)
|
||||
}
|
||||
return statefulSet, pollErr
|
||||
}
|
||||
|
||||
type updateJobFunc func(*batch.Job)
|
||||
|
||||
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) {
|
||||
jobs := c.Batch().Jobs(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(job)
|
||||
if job, err = jobs.Update(job); err == nil {
|
||||
Logf("Updating job %s", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to job %q: %v", name, updateErr)
|
||||
}
|
||||
return job, pollErr
|
||||
}
|
||||
|
||||
type updateDSFunc func(*extensions.DaemonSet)
|
||||
|
||||
func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *extensions.DaemonSet, err error) {
|
||||
@ -4536,46 +3957,6 @@ func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (s
|
||||
return "", fmt.Errorf("Failed to find external address for service %v", name)
|
||||
}
|
||||
|
||||
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
|
||||
// none are running, otherwise it does what a synchronous scale operation would do.
|
||||
func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, ns string, l map[string]string, replicas uint) error {
|
||||
listOpts := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()}
|
||||
rcs, err := clientset.Core().ReplicationControllers(ns).List(listOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(rcs.Items) == 0 {
|
||||
return fmt.Errorf("RC with labels %v not found in ns %v", l, ns)
|
||||
}
|
||||
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
|
||||
for _, labelRC := range rcs.Items {
|
||||
name := labelRC.Name
|
||||
if err := ScaleRC(clientset, internalClientset, ns, name, replicas, false); err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := clientset.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if replicas == 0 {
|
||||
ps, err := podStoreForSelector(clientset, rc.Namespace, labels.SelectorFromSet(rc.Spec.Selector))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ps.Stop()
|
||||
if err = waitForPodsGone(ps, 10*time.Second, 10*time.Minute); err != nil {
|
||||
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
|
||||
}
|
||||
} else {
|
||||
if err := testutil.WaitForPodsWithLabelRunning(
|
||||
clientset, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO(random-liu): Change this to be a member function of the framework.
|
||||
func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
|
||||
return getPodLogsInternal(c, namespace, podName, containerName, false)
|
||||
@ -5231,54 +4612,6 @@ func GetNodeExternalIP(node *v1.Node) string {
|
||||
return host
|
||||
}
|
||||
|
||||
// RcByNamePort returns a ReplicationController with specified name and port
|
||||
func RcByNamePort(name string, replicas int32, image string, port int, protocol v1.Protocol,
|
||||
labels map[string]string, gracePeriod *int64) *v1.ReplicationController {
|
||||
|
||||
return RcByNameContainer(name, replicas, image, labels, v1.Container{
|
||||
Name: name,
|
||||
Image: image,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: int32(port), Protocol: protocol}},
|
||||
}, gracePeriod)
|
||||
}
|
||||
|
||||
// RcByNameContainer returns a ReplicationControoler with specified name and container
|
||||
func RcByNameContainer(name string, replicas int32, image string, labels map[string]string, c v1.Container,
|
||||
gracePeriod *int64) *v1.ReplicationController {
|
||||
|
||||
zeroGracePeriod := int64(0)
|
||||
|
||||
// Add "name": name to the labels, overwriting if it exists.
|
||||
labels["name"] = name
|
||||
if gracePeriod == nil {
|
||||
gracePeriod = &zeroGracePeriod
|
||||
}
|
||||
return &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicationController",
|
||||
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
Selector: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{c},
|
||||
TerminationGracePeriodSeconds: gracePeriod,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// SimpleGET executes a get on the given url, returns error if non-200 returned.
|
||||
func SimpleGET(c *http.Client, url, host string) (string, error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
@ -5400,3 +4733,14 @@ func PrintSummaries(summaries []TestDataSummary, testBaseName string) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func DumpDebugInfo(c clientset.Interface, ns string) {
|
||||
sl, _ := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
for _, s := range sl.Items {
|
||||
desc, _ := RunKubectl("describe", "po", s.Name, fmt.Sprintf("--namespace=%v", ns))
|
||||
Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc)
|
||||
|
||||
l, _ := RunKubectl("logs", s.Name, fmt.Sprintf("--namespace=%v", ns), "--tail=100")
|
||||
Logf("\nLast 100 log lines of %v:\n%v", s.Name, l)
|
||||
}
|
||||
}
|
||||
|
@ -53,6 +53,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
|
||||
batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
@ -98,6 +99,7 @@ const (
|
||||
nginxDeployment1Filename = "nginx-deployment1.yaml"
|
||||
nginxDeployment2Filename = "nginx-deployment2.yaml"
|
||||
nginxDeployment3Filename = "nginx-deployment3.yaml"
|
||||
redisImage = "gcr.io/k8s-testimages/redis:e2e"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -140,6 +142,10 @@ var (
|
||||
// Returning container command exit codes in kubectl run/exec was introduced in #26541 (v1.4)
|
||||
// so we don't expect tests that verifies return code to work on kubectl clients before that.
|
||||
kubectlContainerExitCodeVersion = utilversion.MustParseSemantic("v1.4.0-alpha.3")
|
||||
|
||||
CronJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "cronjobs"}
|
||||
|
||||
ScheduledJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "scheduledjobs"}
|
||||
)
|
||||
|
||||
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
|
||||
|
@ -341,7 +341,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name)
|
||||
headlessService := createServiceSpec(headlessSvcName, "", true, labels)
|
||||
headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels)
|
||||
_, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService)
|
||||
framework.ExpectNoError(err)
|
||||
c = f.ClientSet
|
||||
@ -350,7 +350,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
dumpDebugInfo(c, ns)
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
framework.Logf("Deleting all stateful set in ns %v", ns)
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
|
81
test/e2e/workload/BUILD
Normal file
81
test/e2e/workload/BUILD
Normal file
@ -0,0 +1,81 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cronjob.go",
|
||||
"daemon_restart.go",
|
||||
"daemon_set.go",
|
||||
"deployment.go",
|
||||
"job.go",
|
||||
"rc.go",
|
||||
"replica_set.go",
|
||||
"statefulset.go",
|
||||
"types.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/batch/v2alpha1:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/daemon:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/job:go_default_library",
|
||||
"//pkg/controller/replicaset:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/util:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v2alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
7
test/e2e/workload/OWNERS
Executable file
7
test/e2e/workload/OWNERS
Executable file
@ -0,0 +1,7 @@
|
||||
approvers:
|
||||
- janetkuo
|
||||
- nikhiljindal
|
||||
- kargakis
|
||||
- mfojtik
|
||||
reviewers:
|
||||
- sig-apps-reviewers
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
package workload
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -28,7 +28,6 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
@ -43,11 +42,6 @@ const (
|
||||
cronJobTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
CronJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "cronjobs"}
|
||||
ScheduledJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "scheduledjobs"}
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("CronJob", func() {
|
||||
f := framework.NewDefaultFramework("cronjob")
|
||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
package workload
|
||||
|
||||
import (
|
||||
"fmt"
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
package workload
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -275,7 +275,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash, templateGeneration)
|
||||
|
||||
By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, redisImage)
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.Extensions().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(int64(2)))
|
||||
@ -330,14 +330,14 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash, fmt.Sprint(templateGeneration))
|
||||
|
||||
By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, redisImage)
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.Extensions().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
templateGeneration++
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
|
||||
By("Check that daemon pods images are updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, redisImage, 1))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Make sure all daemon pods have correct template generation %d", templateGeneration))
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
package workload
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -49,11 +49,6 @@ import (
|
||||
const (
|
||||
dRetryPeriod = 2 * time.Second
|
||||
dRetryTimeout = 5 * time.Minute
|
||||
|
||||
// nginxImage defined in kubectl.go
|
||||
nginxImageName = "nginx"
|
||||
redisImage = "gcr.io/k8s-testimages/redis:e2e"
|
||||
redisImageName = "redis"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -256,16 +251,16 @@ func testDeleteDeployment(f *framework.Framework) {
|
||||
internalClient := f.InternalClientset
|
||||
|
||||
deploymentName := "test-new-deployment"
|
||||
podLabels := map[string]string{"name": nginxImageName}
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(1)
|
||||
framework.Logf("Creating simple deployment %s", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
|
||||
deploy, err := c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", nginxImage)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", NginxImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatusValid(c, deploy)
|
||||
@ -286,7 +281,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
deploymentPodLabels := map[string]string{"name": "sample-pod"}
|
||||
rsPodLabels := map[string]string{
|
||||
"name": "sample-pod",
|
||||
"pod": nginxImageName,
|
||||
"pod": NginxImageName,
|
||||
}
|
||||
|
||||
rsName := "test-rolling-update-controller"
|
||||
@ -294,7 +289,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
rsRevision := "3546343826724305832"
|
||||
annotations := make(map[string]string)
|
||||
annotations[deploymentutil.RevisionAnnotation] = rsRevision
|
||||
rs := newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage)
|
||||
rs := newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage)
|
||||
rs.Annotations = annotations
|
||||
framework.Logf("Creating replica set %q (going to be adopted)", rs.Name)
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
|
||||
@ -306,13 +301,13 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
deploymentName := "test-rolling-update-deployment"
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 3546343826724305833.
|
||||
framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", redisImage)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", RedisImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name)
|
||||
@ -339,13 +334,13 @@ func testRecreateDeployment(f *framework.Framework) {
|
||||
// Create a deployment that brings up redis pods.
|
||||
deploymentName := "test-recreate-deployment"
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, redisImageName, redisImage, extensions.RecreateDeploymentStrategyType)
|
||||
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, extensions.RecreateDeploymentStrategyType)
|
||||
deployment, err := c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", RedisImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting deployment %q to complete", deploymentName)
|
||||
@ -354,8 +349,8 @@ func testRecreateDeployment(f *framework.Framework) {
|
||||
// Update deployment to delete redis pods and bring up nginx pods.
|
||||
framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *extensions.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = nginxImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = nginxImage
|
||||
update.Spec.Template.Spec.Containers[0].Name = NginxImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = NginxImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@ -371,12 +366,12 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
|
||||
rsPodLabels := map[string]string{
|
||||
"name": "cleanup-pod",
|
||||
"pod": nginxImageName,
|
||||
"pod": NginxImageName,
|
||||
}
|
||||
rsName := "test-cleanup-controller"
|
||||
replicas := int32(1)
|
||||
revisionHistoryLimit := util.Int32Ptr(0)
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage))
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
@ -415,15 +410,15 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
if !ok {
|
||||
framework.Failf("Expect event Object to be a pod")
|
||||
}
|
||||
if pod.Spec.Containers[0].Name != redisImageName {
|
||||
framework.Failf("Expect the created pod to have container name %s, got pod %#v\n", redisImageName, pod)
|
||||
if pod.Spec.Containers[0].Name != RedisImageName {
|
||||
framework.Failf("Expect the created pod to have container name %s, got pod %#v\n", RedisImageName, pod)
|
||||
}
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
|
||||
_, err = c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -442,12 +437,12 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
deploymentPodLabels := map[string]string{"name": podName}
|
||||
rsPodLabels := map[string]string{
|
||||
"name": podName,
|
||||
"pod": nginxImageName,
|
||||
"pod": NginxImageName,
|
||||
}
|
||||
|
||||
rsName := "test-rollover-controller"
|
||||
rsReplicas := int32(1)
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, nginxImageName, nginxImage))
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
|
||||
@ -493,7 +488,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
|
||||
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods.
|
||||
framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
|
||||
updatedDeploymentImageName, updatedDeploymentImage := redisImageName, redisImage
|
||||
updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
@ -534,8 +529,8 @@ func testPausedDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
deploymentName := "test-paused-deployment"
|
||||
podLabels := map[string]string{"name": nginxImageName}
|
||||
d := framework.NewDeployment(deploymentName, 1, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
d := framework.NewDeployment(deploymentName, 1, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.Paused = true
|
||||
tgps := int64(1)
|
||||
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &tgps
|
||||
@ -626,9 +621,9 @@ func testRollbackDeployment(f *framework.Framework) {
|
||||
deploymentPodLabels := map[string]string{"name": podName}
|
||||
|
||||
// 1. Create a deployment to create nginx pods.
|
||||
deploymentName, deploymentImageName := "test-rollback-deployment", nginxImageName
|
||||
deploymentName, deploymentImageName := "test-rollback-deployment", NginxImageName
|
||||
deploymentReplicas := int32(1)
|
||||
deploymentImage := nginxImage
|
||||
deploymentImage := NginxImage
|
||||
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
|
||||
framework.Logf("Creating deployment %s", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
|
||||
@ -649,8 +644,8 @@ func testRollbackDeployment(f *framework.Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 2. Update the deployment to create redis pods.
|
||||
updatedDeploymentImage := redisImage
|
||||
updatedDeploymentImageName := redisImageName
|
||||
updatedDeploymentImage := RedisImage
|
||||
updatedDeploymentImageName := RedisImageName
|
||||
updateAnnotation := map[string]string{"action": "update", "log": "I need to update it"}
|
||||
deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
@ -733,22 +728,22 @@ func testRollbackDeploymentRSNoRevision(f *framework.Framework) {
|
||||
deploymentPodLabels := map[string]string{"name": podName}
|
||||
rsPodLabels := map[string]string{
|
||||
"name": podName,
|
||||
"pod": nginxImageName,
|
||||
"pod": NginxImageName,
|
||||
}
|
||||
|
||||
// Create an old RS without revision
|
||||
rsName := "test-rollback-no-revision-controller"
|
||||
rsReplicas := int32(0)
|
||||
rs := newRS(rsName, rsReplicas, rsPodLabels, nginxImageName, nginxImage)
|
||||
rs := newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage)
|
||||
rs.Annotations = make(map[string]string)
|
||||
rs.Annotations["make"] = "difference"
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 1. Create a deployment to create nginx pods, which have different template than the replica set created above.
|
||||
deploymentName, deploymentImageName := "test-rollback-no-revision-deployment", nginxImageName
|
||||
deploymentName, deploymentImageName := "test-rollback-no-revision-deployment", NginxImageName
|
||||
deploymentReplicas := int32(1)
|
||||
deploymentImage := nginxImage
|
||||
deploymentImage := NginxImage
|
||||
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
|
||||
framework.Logf("Creating deployment %s", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
|
||||
@ -785,8 +780,8 @@ func testRollbackDeploymentRSNoRevision(f *framework.Framework) {
|
||||
checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
|
||||
|
||||
// 3. Update the deployment to create redis pods.
|
||||
updatedDeploymentImage := redisImage
|
||||
updatedDeploymentImageName := redisImageName
|
||||
updatedDeploymentImage := RedisImage
|
||||
updatedDeploymentImageName := RedisImageName
|
||||
deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
@ -868,7 +863,7 @@ func testDeploymentLabelAdopted(f *framework.Framework) {
|
||||
|
||||
rsName := "test-adopted-controller"
|
||||
replicas := int32(1)
|
||||
image := nginxImage
|
||||
image := NginxImage
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, podLabels, podName, image))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Verify that the required pods have come up.
|
||||
@ -915,12 +910,12 @@ func testScalePausedDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
podLabels := map[string]string{"name": nginxImageName}
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(0)
|
||||
|
||||
// Create a nginx deployment.
|
||||
deploymentName := "nginx-deployment"
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
_, err := c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -965,12 +960,12 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
podLabels := map[string]string{"name": nginxImageName}
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(10)
|
||||
|
||||
// Create a nginx deployment.
|
||||
deploymentName := "nginx"
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.Strategy.RollingUpdate = new(extensions.RollingUpdateDeployment)
|
||||
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
|
||||
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
|
||||
@ -984,7 +979,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
framework.Logf("Waiting for all required pods to come up")
|
||||
err = framework.VerifyPodsRunning(f.ClientSet, ns, nginxImageName, false, *(deployment.Spec.Replicas))
|
||||
err = framework.VerifyPodsRunning(f.ClientSet, ns, NginxImageName, false, *(deployment.Spec.Replicas))
|
||||
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
|
||||
framework.Logf("Waiting for deployment %q to complete", deployment.Name)
|
||||
@ -1030,7 +1025,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
||||
newReplicas := int32(20)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
update.Spec.Template.Spec.Containers[0].Image = nautilusImage
|
||||
update.Spec.Template.Spec.Containers[0].Image = NautilusImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@ -1090,7 +1085,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
||||
newReplicas = int32(5)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
update.Spec.Template.Spec.Containers[0].Image = kittenImage
|
||||
update.Spec.Template.Spec.Containers[0].Image = KittenImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@ -1120,27 +1115,27 @@ func testOverlappingDeployment(f *framework.Framework) {
|
||||
|
||||
// Create first deployment.
|
||||
deploymentName := "first-deployment"
|
||||
podLabels := map[string]string{"name": redisImageName}
|
||||
podLabels := map[string]string{"name": RedisImageName}
|
||||
replicas := int32(1)
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, redisImageName, redisImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed creating the first deployment")
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploy.Name, "1", redisImage)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploy.Name, "1", RedisImage)
|
||||
Expect(err).NotTo(HaveOccurred(), "The first deployment failed to update to revision 1")
|
||||
|
||||
// Create second deployment with overlapping selector.
|
||||
deploymentName = "second-deployment"
|
||||
framework.Logf("Creating deployment %q with overlapping selector", deploymentName)
|
||||
podLabels["other-label"] = "random-label"
|
||||
d = framework.NewDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deployOverlapping, err := c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed creating the second deployment")
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deployOverlapping.Name, "1", nginxImage)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deployOverlapping.Name, "1", NginxImage)
|
||||
Expect(err).NotTo(HaveOccurred(), "The second deployment failed to update to revision 1")
|
||||
|
||||
// Both deployments should proceed independently.
|
||||
@ -1155,14 +1150,14 @@ func testFailedDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
podLabels := map[string]string{"name": nginxImageName}
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(1)
|
||||
|
||||
// Create a nginx deployment.
|
||||
deploymentName := "progress-check"
|
||||
nonExistentImage := "nginx:not-there"
|
||||
ten := int32(10)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, nginxImageName, nonExistentImage, extensions.RecreateDeploymentStrategyType)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, nonExistentImage, extensions.RecreateDeploymentStrategyType)
|
||||
d.Spec.ProgressDeadlineSeconds = &ten
|
||||
|
||||
framework.Logf("Creating deployment %q with progressDeadlineSeconds set to %ds and a non-existent image", deploymentName, ten)
|
||||
@ -1177,7 +1172,7 @@ func testFailedDeployment(f *framework.Framework) {
|
||||
|
||||
framework.Logf("Updating deployment %q with a good image", deploymentName)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = nginxImage
|
||||
update.Spec.Template.Spec.Containers[0].Image = NginxImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@ -1208,7 +1203,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
podLabels := map[string]string{"name": nginxImageName}
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(6)
|
||||
zero := int64(0)
|
||||
two := int32(2)
|
||||
@ -1216,7 +1211,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
// Create a nginx deployment.
|
||||
deploymentName := "nginx"
|
||||
thirty := int32(30)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.ProgressDeadlineSeconds = &thirty
|
||||
d.Spec.RevisionHistoryLimit = &two
|
||||
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
|
||||
@ -1338,9 +1333,9 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
|
||||
deploymentName := "test-orphan-deployment"
|
||||
framework.Logf("Creating Deployment %q", deploymentName)
|
||||
podLabels := map[string]string{"name": nginxImageName}
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(1)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForDeploymentStatusValid(c, deploy)
|
||||
@ -1360,7 +1355,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
|
||||
deploymentName = "test-adopt-deployment"
|
||||
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
|
||||
d = framework.NewDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err = c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForDeploymentStatusValid(c, deploy)
|
||||
@ -1427,11 +1422,11 @@ func testDeploymentHashCollisionAvoidance(f *framework.Framework) {
|
||||
|
||||
deploymentName := "test-hash-collision"
|
||||
framework.Logf("Creating Deployment %q", deploymentName)
|
||||
podLabels := map[string]string{"name": nginxImageName}
|
||||
d := framework.NewDeployment(deploymentName, int32(0), podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
d := framework.NewDeployment(deploymentName, int32(0), podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deployment, err := c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", nginxImage)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", NginxImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// TODO: Switch this to do a non-cascading deletion of the Deployment, mutate the ReplicaSet
|
||||
@ -1461,6 +1456,6 @@ func testDeploymentHashCollisionAvoidance(f *framework.Framework) {
|
||||
}
|
||||
|
||||
framework.Logf("Expect a new ReplicaSet to be created")
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", nginxImage)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", NginxImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
@ -14,23 +14,22 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
package workload
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Job", func() {
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
package workload
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -38,14 +38,14 @@ var _ = framework.KubeDescribe("ReplicationController", func() {
|
||||
f := framework.NewDefaultFramework("replication-controller")
|
||||
|
||||
It("should serve a basic image on each replica with a public image [Conformance]", func() {
|
||||
testReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage)
|
||||
TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage)
|
||||
})
|
||||
|
||||
It("should serve a basic image on each replica with a private image", func() {
|
||||
// requires private images
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
testReplicationControllerServeImageOrFail(f, "private", "gcr.io/k8s-authenticated-test/serve_hostname:v1.4")
|
||||
TestReplicationControllerServeImageOrFail(f, "private", "gcr.io/k8s-authenticated-test/serve_hostname:v1.4")
|
||||
})
|
||||
|
||||
It("should surface a failure condition on a common issue like exceeded quota", func() {
|
||||
@ -90,7 +90,7 @@ func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageNa
|
||||
// A basic test to check the deployment of an image using
|
||||
// a replication controller. The image serves its hostname
|
||||
// which is checked for each replica.
|
||||
func testReplicationControllerServeImageOrFail(f *framework.Framework, test string, image string) {
|
||||
func TestReplicationControllerServeImageOrFail(f *framework.Framework, test string, image string) {
|
||||
name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
|
||||
replicas := int32(1)
|
||||
|
||||
@ -176,7 +176,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name))
|
||||
rc := newRC(name, 3, map[string]string{"name": name}, nginxImageName, nginxImage)
|
||||
rc := newRC(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
|
||||
rc, err = c.Core().ReplicationControllers(namespace).Create(rc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@ -246,7 +246,7 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: nginxImageName,
|
||||
Image: NginxImageName,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -254,7 +254,7 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
|
||||
|
||||
By("When a replication controller with a matching selector is created")
|
||||
replicas := int32(1)
|
||||
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, nginxImageName)
|
||||
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImageName)
|
||||
rcSt.Spec.Selector = map[string]string{"name": name}
|
||||
rc, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(rcSt)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -283,7 +283,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
|
||||
name := "pod-release"
|
||||
By("Given a ReplicationController is created")
|
||||
replicas := int32(1)
|
||||
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, nginxImageName)
|
||||
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImageName)
|
||||
rcSt.Spec.Selector = map[string]string{"name": name}
|
||||
rc, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(rcSt)
|
||||
Expect(err).NotTo(HaveOccurred())
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
package workload
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -188,7 +188,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name))
|
||||
rs := newRS(name, 3, map[string]string{"name": name}, nginxImageName, nginxImage)
|
||||
rs := newRS(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
|
||||
rs, err = c.Extensions().ReplicaSets(namespace).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@ -259,7 +259,7 @@ func testRSAdoptMatchingOrphans(f *framework.Framework) {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: nginxImageName,
|
||||
Image: NginxImageName,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -267,7 +267,7 @@ func testRSAdoptMatchingOrphans(f *framework.Framework) {
|
||||
|
||||
By("When a replicaset with a matching selector is created")
|
||||
replicas := int32(1)
|
||||
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, nginxImageName)
|
||||
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, NginxImageName)
|
||||
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
|
||||
rs, err := f.ClientSet.Extensions().ReplicaSets(f.Namespace.Name).Create(rsSt)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -296,7 +296,7 @@ func testRSReleaseControlledNotMatching(f *framework.Framework) {
|
||||
name := "pod-release"
|
||||
By("Given a ReplicaSet is created")
|
||||
replicas := int32(1)
|
||||
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, nginxImageName)
|
||||
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, NginxImageName)
|
||||
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
|
||||
rs, err := f.ClientSet.Extensions().ReplicaSets(f.Namespace.Name).Create(rsSt)
|
||||
Expect(err).NotTo(HaveOccurred())
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
package workload
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -26,7 +26,6 @@ import (
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
klabels "k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
@ -77,14 +76,14 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
|
||||
ss = framework.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels)
|
||||
|
||||
By("Creating service " + headlessSvcName + " in namespace " + ns)
|
||||
headlessService := createServiceSpec(headlessSvcName, "", true, labels)
|
||||
headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels)
|
||||
_, err := c.Core().Services(ns).Create(headlessService)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
dumpDebugInfo(c, ns)
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
framework.Logf("Deleting all statefulset in ns %v", ns)
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
@ -276,7 +275,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
|
||||
sst.BreakPodProbe(ss, &pods.Items[1], testProbe)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
||||
newImage := newNginxImage
|
||||
newImage := NewNginxImage
|
||||
oldImage := ss.Spec.Template.Spec.Containers[0].Image
|
||||
|
||||
By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
|
||||
@ -400,7 +399,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
}
|
||||
newImage := newNginxImage
|
||||
newImage := NewNginxImage
|
||||
oldImage := ss.Spec.Template.Spec.Containers[0].Image
|
||||
|
||||
By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
|
||||
@ -621,7 +620,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
}
|
||||
newImage := newNginxImage
|
||||
newImage := NewNginxImage
|
||||
oldImage := ss.Spec.Template.Spec.Containers[0].Image
|
||||
|
||||
By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
|
||||
@ -876,7 +875,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
|
||||
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
dumpDebugInfo(c, ns)
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
framework.Logf("Deleting all statefulset in ns %v", ns)
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
@ -904,17 +903,6 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
|
||||
})
|
||||
})
|
||||
|
||||
func dumpDebugInfo(c clientset.Interface, ns string) {
|
||||
sl, _ := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
for _, s := range sl.Items {
|
||||
desc, _ := framework.RunKubectl("describe", "po", s.Name, fmt.Sprintf("--namespace=%v", ns))
|
||||
framework.Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc)
|
||||
|
||||
l, _ := framework.RunKubectl("logs", s.Name, fmt.Sprintf("--namespace=%v", ns), "--tail=100")
|
||||
framework.Logf("\nLast 100 log lines of %v:\n%v", s.Name, l)
|
||||
}
|
||||
}
|
||||
|
||||
func kubectlExecWithRetries(args ...string) (out string) {
|
||||
var err error
|
||||
for i := 0; i < 3; i++ {
|
37
test/e2e/workload/types.go
Normal file
37
test/e2e/workload/types.go
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workload
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
NautilusImage = "gcr.io/google_containers/update-demo:nautilus"
|
||||
KittenImage = "gcr.io/google_containers/update-demo:kitten"
|
||||
NginxImage = "gcr.io/google_containers/nginx-slim:0.7"
|
||||
NginxImageName = "nginx"
|
||||
RedisImage = "gcr.io/k8s-testimages/redis:e2e"
|
||||
RedisImageName = "redis"
|
||||
NewNginxImage = "gcr.io/google_containers/nginx-slim:0.8"
|
||||
)
|
||||
|
||||
var (
|
||||
CronJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "cronjobs"}
|
||||
ScheduledJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "scheduledjobs"}
|
||||
)
|
Loading…
Reference in New Issue
Block a user