mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 05:27:21 +00:00
Merge pull request #19840 from madhusudancs/replicaset-deployment
Auto commit by PR queue bot
This commit is contained in:
commit
41a98b43e4
@ -7,7 +7,8 @@ metadata:
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
name: nginx
|
||||
matchLabels:
|
||||
name: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
@ -943,6 +943,7 @@ __EOF__
|
||||
kubectl scale --replicas=2 -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
|
||||
# Post-condition: 2 replicas
|
||||
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
|
||||
kubectl delete rc frontend "${kube_flags[@]}"
|
||||
|
||||
### Scale multiple replication controllers
|
||||
kubectl create -f examples/guestbook/redis-master-controller.yaml "${kube_flags[@]}"
|
||||
@ -963,16 +964,17 @@ __EOF__
|
||||
kube::test::get_object_assert 'job pi' "{{$job_parallelism_field}}" '2'
|
||||
# Clean-up
|
||||
kubectl delete job/pi "${kube_flags[@]}"
|
||||
### Scale a deployment
|
||||
kubectl create -f examples/extensions/deployment.yaml "${kube_flags[@]}"
|
||||
# Command
|
||||
kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment
|
||||
# Post-condition: 1 replica for nginx-deployment
|
||||
kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1'
|
||||
# Clean-up
|
||||
kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
|
||||
# TODO: Remove once deployment reaping is implemented
|
||||
kubectl delete rc --all "${kube_flags[@]}"
|
||||
# TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
|
||||
# ### Scale a deployment
|
||||
# kubectl create -f examples/extensions/deployment.yaml "${kube_flags[@]}"
|
||||
# # Command
|
||||
# kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment
|
||||
# # Post-condition: 1 replica for nginx-deployment
|
||||
# kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1'
|
||||
# # Clean-up
|
||||
# kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
|
||||
# # TODO: Remove once deployment reaping is implemented
|
||||
# kubectl delete rs --all "${kube_flags[@]}"
|
||||
|
||||
### Expose a deployment as a service
|
||||
kubectl create -f examples/extensions/deployment.yaml "${kube_flags[@]}"
|
||||
@ -985,7 +987,7 @@ __EOF__
|
||||
# Clean-up
|
||||
kubectl delete deployment/nginx-deployment service/nginx-deployment "${kube_flags[@]}"
|
||||
# TODO: Remove once deployment reaping is implemented
|
||||
kubectl delete rc --all "${kube_flags[@]}"
|
||||
kubectl delete rs --all "${kube_flags[@]}"
|
||||
|
||||
### Expose replication controller as service
|
||||
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
|
||||
@ -1102,7 +1104,7 @@ __EOF__
|
||||
# Clean up
|
||||
kubectl delete hpa nginx-deployment "${kube_flags[@]}"
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
|
||||
kubectl delete rc -l pod-template-hash "${kube_flags[@]}"
|
||||
kubectl delete rs -l pod-template-hash "${kube_flags[@]}"
|
||||
|
||||
### Rollback a deployment
|
||||
# Pre-condition: no deployment exists
|
||||
@ -1131,7 +1133,7 @@ __EOF__
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" 'nginx:latest:'
|
||||
# Clean up
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
|
||||
kubectl delete rc -l pod-template-hash "${kube_flags[@]}"
|
||||
kubectl delete rs -l pod-template-hash "${kube_flags[@]}"
|
||||
|
||||
######################
|
||||
# ConfigMap #
|
||||
|
3
hack/testdata/deployment-revision2.yaml
vendored
3
hack/testdata/deployment-revision2.yaml
vendored
@ -7,7 +7,8 @@ metadata:
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
name: nginx
|
||||
matchLabels:
|
||||
name: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
@ -16,24 +16,32 @@ limitations under the License.
|
||||
|
||||
package extensions
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
// TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
|
||||
// import (
|
||||
// "fmt"
|
||||
|
||||
// ScaleFromDeployment returns a scale subresource for a deployment.
|
||||
func ScaleFromDeployment(deployment *Deployment) *Scale {
|
||||
return &Scale{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: deployment.Name,
|
||||
Namespace: deployment.Namespace,
|
||||
CreationTimestamp: deployment.CreationTimestamp,
|
||||
},
|
||||
Spec: ScaleSpec{
|
||||
Replicas: deployment.Spec.Replicas,
|
||||
},
|
||||
Status: ScaleStatus{
|
||||
Replicas: deployment.Status.Replicas,
|
||||
Selector: deployment.Spec.Selector,
|
||||
},
|
||||
}
|
||||
}
|
||||
// "k8s.io/kubernetes/pkg/api"
|
||||
// "k8s.io/kubernetes/pkg/api/unversioned"
|
||||
// )
|
||||
|
||||
// // ScaleFromDeployment returns a scale subresource for a deployment.
|
||||
// func ScaleFromDeployment(deployment *Deployment) (*Scale, error) {
|
||||
// selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("failed to convert label selector to selector: %v", err)
|
||||
// }
|
||||
// return &Scale{
|
||||
// ObjectMeta: api.ObjectMeta{
|
||||
// Name: deployment.Name,
|
||||
// Namespace: deployment.Namespace,
|
||||
// CreationTimestamp: deployment.CreationTimestamp,
|
||||
// },
|
||||
// Spec: ScaleSpec{
|
||||
// Replicas: deployment.Spec.Replicas,
|
||||
// },
|
||||
// Status: ScaleStatus{
|
||||
// Replicas: deployment.Status.Replicas,
|
||||
// Selector: selector.String(),
|
||||
// },
|
||||
// }, nil
|
||||
// }
|
||||
|
@ -5430,7 +5430,7 @@ func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
_, _, _ = yysep453, yyq453, yy2arr453
|
||||
const yyr453 bool = false
|
||||
yyq453[0] = x.Replicas != 0
|
||||
yyq453[1] = len(x.Selector) != 0
|
||||
yyq453[1] = x.Selector != nil
|
||||
yyq453[3] = true
|
||||
yyq453[4] = x.MinReadySeconds != 0
|
||||
yyq453[5] = x.RevisionHistoryLimit != nil
|
||||
@ -5483,8 +5483,9 @@ func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
yym458 := z.EncBinary()
|
||||
_ = yym458
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.EncExt(x.Selector) {
|
||||
} else {
|
||||
z.F.EncMapStringStringV(x.Selector, false, e)
|
||||
z.EncFallback(x.Selector)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -5501,8 +5502,9 @@ func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
yym459 := z.EncBinary()
|
||||
_ = yym459
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.EncExt(x.Selector) {
|
||||
} else {
|
||||
z.F.EncMapStringStringV(x.Selector, false, e)
|
||||
z.EncFallback(x.Selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -5712,14 +5714,19 @@ func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
|
||||
}
|
||||
case "selector":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Selector = nil
|
||||
if x.Selector != nil {
|
||||
x.Selector = nil
|
||||
}
|
||||
} else {
|
||||
yyv482 := &x.Selector
|
||||
if x.Selector == nil {
|
||||
x.Selector = new(pkg1_unversioned.LabelSelector)
|
||||
}
|
||||
yym483 := z.DecBinary()
|
||||
_ = yym483
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.DecExt(x.Selector) {
|
||||
} else {
|
||||
z.F.DecMapStringStringX(yyv482, false, d)
|
||||
z.DecFallback(x.Selector, false)
|
||||
}
|
||||
}
|
||||
case "template":
|
||||
@ -5817,14 +5824,19 @@ func (x *DeploymentSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Selector = nil
|
||||
if x.Selector != nil {
|
||||
x.Selector = nil
|
||||
}
|
||||
} else {
|
||||
yyv493 := &x.Selector
|
||||
if x.Selector == nil {
|
||||
x.Selector = new(pkg1_unversioned.LabelSelector)
|
||||
}
|
||||
yym494 := z.DecBinary()
|
||||
_ = yym494
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.DecExt(x.Selector) {
|
||||
} else {
|
||||
z.F.DecMapStringStringX(yyv493, false, d)
|
||||
z.DecFallback(x.Selector, false)
|
||||
}
|
||||
}
|
||||
yyj491++
|
||||
|
@ -233,9 +233,9 @@ type DeploymentSpec struct {
|
||||
// zero and not specified. Defaults to 1.
|
||||
Replicas int `json:"replicas,omitempty"`
|
||||
|
||||
// Label selector for pods. Existing ReplicationControllers whose pods are
|
||||
// Label selector for pods. Existing ReplicaSets whose pods are
|
||||
// selected by this will be the ones affected by this deployment.
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
Selector *unversioned.LabelSelector `json:"selector,omitempty"`
|
||||
|
||||
// Template describes the pods that will be created.
|
||||
Template api.PodTemplateSpec `json:"template"`
|
||||
@ -248,7 +248,7 @@ type DeploymentSpec struct {
|
||||
// Defaults to 0 (pod will be considered available as soon as it is ready)
|
||||
MinReadySeconds int `json:"minReadySeconds,omitempty"`
|
||||
|
||||
// The number of old ReplicationControllers to retain to allow rollback.
|
||||
// The number of old ReplicaSets to retain to allow rollback.
|
||||
// This is a pointer to distinguish between explicit zero and not specified.
|
||||
RevisionHistoryLimit *int `json:"revisionHistoryLimit,omitempty"`
|
||||
|
||||
|
@ -104,9 +104,9 @@ func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.
|
||||
out.Replicas = new(int32)
|
||||
*out.Replicas = int32(in.Replicas)
|
||||
if in.Selector != nil {
|
||||
out.Selector = make(map[string]string)
|
||||
for key, val := range in.Selector {
|
||||
out.Selector[key] = val
|
||||
out.Selector = new(LabelSelector)
|
||||
if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in.Selector, out.Selector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.Selector = nil
|
||||
@ -139,10 +139,11 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentS
|
||||
if in.Replicas != nil {
|
||||
out.Replicas = int(*in.Replicas)
|
||||
}
|
||||
|
||||
if in.Selector != nil {
|
||||
out.Selector = make(map[string]string)
|
||||
for key, val := range in.Selector {
|
||||
out.Selector[key] = val
|
||||
out.Selector = new(unversioned.LabelSelector)
|
||||
if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.Selector = nil
|
||||
|
@ -2855,10 +2855,11 @@ func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensi
|
||||
if err := s.Convert(&in.Replicas, &out.Replicas, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
// unable to generate simple pointer conversion for unversioned.LabelSelector -> v1beta1.LabelSelector
|
||||
if in.Selector != nil {
|
||||
out.Selector = make(map[string]string)
|
||||
for key, val := range in.Selector {
|
||||
out.Selector[key] = val
|
||||
out.Selector = new(LabelSelector)
|
||||
if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in.Selector, out.Selector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.Selector = nil
|
||||
@ -4170,10 +4171,11 @@ func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *Deploym
|
||||
defaulting.(func(*DeploymentSpec))(in)
|
||||
}
|
||||
// in.Replicas has no peer in out
|
||||
// unable to generate simple pointer conversion for v1beta1.LabelSelector -> unversioned.LabelSelector
|
||||
if in.Selector != nil {
|
||||
out.Selector = make(map[string]string)
|
||||
for key, val := range in.Selector {
|
||||
out.Selector[key] = val
|
||||
out.Selector = new(unversioned.LabelSelector)
|
||||
if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.Selector = nil
|
||||
|
@ -1185,9 +1185,9 @@ func deepCopy_v1beta1_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *
|
||||
out.Replicas = nil
|
||||
}
|
||||
if in.Selector != nil {
|
||||
out.Selector = make(map[string]string)
|
||||
for key, val := range in.Selector {
|
||||
out.Selector[key] = val
|
||||
out.Selector = new(LabelSelector)
|
||||
if err := deepCopy_v1beta1_LabelSelector(*in.Selector, out.Selector, c); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.Selector = nil
|
||||
|
@ -67,8 +67,8 @@ func addDefaultingFuncs(scheme *runtime.Scheme) {
|
||||
labels := obj.Spec.Template.Labels
|
||||
|
||||
if labels != nil {
|
||||
if len(obj.Spec.Selector) == 0 {
|
||||
obj.Spec.Selector = labels
|
||||
if obj.Spec.Selector == nil {
|
||||
obj.Spec.Selector = &LabelSelector{MatchLabels: labels}
|
||||
}
|
||||
if len(obj.Labels) == 0 {
|
||||
obj.Labels = labels
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -199,7 +199,7 @@ type ThirdPartyResourceData struct {
|
||||
Data []byte `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// Deployment enables declarative updates for Pods and ReplicationControllers.
|
||||
// Deployment enables declarative updates for Pods and ReplicaSets.
|
||||
type Deployment struct {
|
||||
unversioned.TypeMeta `json:",inline"`
|
||||
// Standard object metadata.
|
||||
@ -218,9 +218,9 @@ type DeploymentSpec struct {
|
||||
// zero and not specified. Defaults to 1.
|
||||
Replicas *int32 `json:"replicas,omitempty"`
|
||||
|
||||
// Label selector for pods. Existing ReplicationControllers whose pods are
|
||||
// Label selector for pods. Existing ReplicaSets whose pods are
|
||||
// selected by this will be the ones affected by this deployment.
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
Selector *LabelSelector `json:"selector,omitempty"`
|
||||
|
||||
// Template describes the pods that will be created.
|
||||
Template v1.PodTemplateSpec `json:"template"`
|
||||
@ -233,7 +233,7 @@ type DeploymentSpec struct {
|
||||
// Defaults to 0 (pod will be considered available as soon as it is ready)
|
||||
MinReadySeconds int32 `json:"minReadySeconds,omitempty"`
|
||||
|
||||
// The number of old ReplicationControllers to retain to allow rollback.
|
||||
// The number of old ReplicaSets to retain to allow rollback.
|
||||
// This is a pointer to distinguish between explicit zero and not specified.
|
||||
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
|
||||
|
||||
|
@ -147,7 +147,7 @@ func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string {
|
||||
}
|
||||
|
||||
var map_Deployment = map[string]string{
|
||||
"": "Deployment enables declarative updates for Pods and ReplicationControllers.",
|
||||
"": "Deployment enables declarative updates for Pods and ReplicaSets.",
|
||||
"metadata": "Standard object metadata.",
|
||||
"spec": "Specification of the desired behavior of the Deployment.",
|
||||
"status": "Most recently observed status of the Deployment.",
|
||||
@ -181,11 +181,11 @@ func (DeploymentRollback) SwaggerDoc() map[string]string {
|
||||
var map_DeploymentSpec = map[string]string{
|
||||
"": "DeploymentSpec is the specification of the desired behavior of the Deployment.",
|
||||
"replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.",
|
||||
"selector": "Label selector for pods. Existing ReplicationControllers whose pods are selected by this will be the ones affected by this deployment.",
|
||||
"selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.",
|
||||
"template": "Template describes the pods that will be created.",
|
||||
"strategy": "The deployment strategy to use to replace existing pods with new ones.",
|
||||
"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
|
||||
"revisionHistoryLimit": "The number of old ReplicationControllers to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified.",
|
||||
"revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified.",
|
||||
"paused": "Indicates that the deployment is paused and will not be processed by the deployment controller.",
|
||||
"rollbackTo": "The config this deployment is rolling back to. Will be cleared after rollback is done.",
|
||||
}
|
||||
|
@ -346,9 +346,24 @@ func ValidateRollback(rollback *extensions.RollbackConfig, fldPath *field.Path)
|
||||
// Validates given deployment spec.
|
||||
func ValidateDeploymentSpec(spec *extensions.DeploymentSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonEmptySelector(spec.Selector, fldPath.Child("selector"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpecForRC(&spec.Template, spec.Selector, spec.Replicas, fldPath.Child("template"))...)
|
||||
|
||||
if spec.Selector == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("selector"), ""))
|
||||
} else {
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...)
|
||||
if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for deployment."))
|
||||
}
|
||||
}
|
||||
|
||||
selector, err := unversioned.LabelSelectorAsSelector(spec.Selector)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "failed to convert LabelSelector to Selector."))
|
||||
} else {
|
||||
allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"))...)
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, ValidateDeploymentStrategy(&spec.Strategy, fldPath.Child("strategy"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
|
||||
if spec.RevisionHistoryLimit != nil {
|
||||
|
@ -952,8 +952,10 @@ func validDeployment() *extensions.Deployment {
|
||||
Namespace: api.NamespaceDefault,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Selector: map[string]string{
|
||||
"name": "abc",
|
||||
Selector: &unversioned.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"name": "abc",
|
||||
},
|
||||
},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
@ -1000,8 +1002,10 @@ func TestValidateDeployment(t *testing.T) {
|
||||
}
|
||||
// selector should match the labels in pod template.
|
||||
invalidSelectorDeployment := validDeployment()
|
||||
invalidSelectorDeployment.Spec.Selector = map[string]string{
|
||||
"name": "def",
|
||||
invalidSelectorDeployment.Spec.Selector = &unversioned.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"name": "def",
|
||||
},
|
||||
}
|
||||
errorCases["`selector` does not match template `labels`"] = invalidSelectorDeployment
|
||||
|
||||
|
42
pkg/client/cache/listers.go
vendored
42
pkg/client/cache/listers.go
vendored
@ -236,33 +236,34 @@ func (s *StoreToDeploymentLister) List() (deployments []extensions.Deployment, e
|
||||
return deployments, nil
|
||||
}
|
||||
|
||||
// GetDeploymentsForRC returns a list of deployments managing a replication controller. Returns an error only if no matching deployments are found.
|
||||
func (s *StoreToDeploymentLister) GetDeploymentsForRC(rc *api.ReplicationController) (deployments []extensions.Deployment, err error) {
|
||||
var selector labels.Selector
|
||||
// GetDeploymentsForReplicaSet returns a list of deployments managing a replica set. Returns an error only if no matching deployments are found.
|
||||
func (s *StoreToDeploymentLister) GetDeploymentsForReplicaSet(rs *extensions.ReplicaSet) (deployments []extensions.Deployment, err error) {
|
||||
var d extensions.Deployment
|
||||
|
||||
if len(rc.Labels) == 0 {
|
||||
err = fmt.Errorf("no deployments found for replication controller %v because it has no labels", rc.Name)
|
||||
if len(rs.Labels) == 0 {
|
||||
err = fmt.Errorf("no deployments found for ReplicaSet %v because it has no labels", rs.Name)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label
|
||||
for _, m := range s.Store.List() {
|
||||
d = *m.(*extensions.Deployment)
|
||||
if d.Namespace != rc.Namespace {
|
||||
if d.Namespace != rs.Namespace {
|
||||
continue
|
||||
}
|
||||
labelSet := labels.Set(d.Spec.Selector)
|
||||
selector = labels.Set(d.Spec.Selector).AsSelector()
|
||||
|
||||
selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert LabelSelector to Selector: %v", err)
|
||||
}
|
||||
// If a deployment with a nil or empty selector creeps in, it should match nothing, not everything.
|
||||
if labelSet.AsSelector().Empty() || !selector.Matches(labels.Set(rc.Labels)) {
|
||||
if selector.Empty() || !selector.Matches(labels.Set(rs.Labels)) {
|
||||
continue
|
||||
}
|
||||
deployments = append(deployments, d)
|
||||
}
|
||||
if len(deployments) == 0 {
|
||||
err = fmt.Errorf("could not find deployments set for replication controller %s in namespace %s with labels: %v", rc.Name, rc.Namespace, rc.Labels)
|
||||
err = fmt.Errorf("could not find deployments set for ReplicaSet %s in namespace %s with labels: %v", rs.Name, rs.Namespace, rs.Labels)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -290,6 +291,27 @@ func (s *StoreToReplicaSetLister) List() (rss []extensions.ReplicaSet, err error
|
||||
return rss, nil
|
||||
}
|
||||
|
||||
type storeReplicaSetsNamespacer struct {
|
||||
store Store
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (s storeReplicaSetsNamespacer) List(selector labels.Selector) (rss []extensions.ReplicaSet, err error) {
|
||||
for _, c := range s.store.List() {
|
||||
rs := *(c.(*extensions.ReplicaSet))
|
||||
if s.namespace == api.NamespaceAll || s.namespace == rs.Namespace {
|
||||
if selector.Matches(labels.Set(rs.Labels)) {
|
||||
rss = append(rss, rs)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *StoreToReplicaSetLister) ReplicaSets(namespace string) storeReplicaSetsNamespacer {
|
||||
return storeReplicaSetsNamespacer{s.Store, namespace}
|
||||
}
|
||||
|
||||
// GetPodReplicaSets returns a list of ReplicaSets managing a pod. Returns an error only if no matching ReplicaSets are found.
|
||||
func (s *StoreToReplicaSetLister) GetPodReplicaSets(pod *api.Pod) (rss []extensions.ReplicaSet, err error) {
|
||||
var selector labels.Selector
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
@ -477,3 +478,16 @@ func (o ControllersByCreationTimestamp) Less(i, j int) bool {
|
||||
}
|
||||
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
|
||||
}
|
||||
|
||||
// ReplicaSetsByCreationTimestamp sorts a list of ReplicationSets by creation timestamp, using their names as a tie breaker.
|
||||
type ReplicaSetsByCreationTimestamp []*extensions.ReplicaSet
|
||||
|
||||
func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) }
|
||||
func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool {
|
||||
if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
|
||||
return o[i].Name < o[j].Name
|
||||
}
|
||||
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -34,7 +34,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func TestDeploymentController_reconcileNewRC(t *testing.T) {
|
||||
func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
|
||||
tests := []struct {
|
||||
deploymentReplicas int
|
||||
maxSurge intstr.IntOrString
|
||||
@ -87,16 +87,16 @@ func TestDeploymentController_reconcileNewRC(t *testing.T) {
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("executing scenario %d", i)
|
||||
newRc := rc("foo-v2", test.newReplicas, nil)
|
||||
oldRc := rc("foo-v2", test.oldReplicas, nil)
|
||||
allRcs := []*api.ReplicationController{newRc, oldRc}
|
||||
newRS := rs("foo-v2", test.newReplicas, nil)
|
||||
oldRS := rs("foo-v2", test.oldReplicas, nil)
|
||||
allRSs := []*exp.ReplicaSet{newRS, oldRS}
|
||||
deployment := deployment("foo", test.deploymentReplicas, test.maxSurge, intstr.FromInt(0))
|
||||
fake := fake.Clientset{}
|
||||
controller := &DeploymentController{
|
||||
client: &fake,
|
||||
eventRecorder: &record.FakeRecorder{},
|
||||
}
|
||||
scaled, err := controller.reconcileNewRC(allRcs, newRc, deployment)
|
||||
scaled, err := controller.reconcileNewReplicaSet(allRSs, newRS, deployment)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
continue
|
||||
@ -115,21 +115,21 @@ func TestDeploymentController_reconcileNewRC(t *testing.T) {
|
||||
t.Errorf("expected 1 action during scale, got: %v", fake.Actions())
|
||||
continue
|
||||
}
|
||||
updated := fake.Actions()[0].(testclient.UpdateAction).GetObject().(*api.ReplicationController)
|
||||
updated := fake.Actions()[0].(testclient.UpdateAction).GetObject().(*exp.ReplicaSet)
|
||||
if e, a := test.expectedNewReplicas, updated.Spec.Replicas; e != a {
|
||||
t.Errorf("expected update to %d replicas, got %d", e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeploymentController_reconcileOldRCs(t *testing.T) {
|
||||
func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
|
||||
tests := []struct {
|
||||
deploymentReplicas int
|
||||
maxUnavailable intstr.IntOrString
|
||||
oldReplicas int
|
||||
newReplicas int
|
||||
readyPodsFromOldRC int
|
||||
readyPodsFromNewRC int
|
||||
readyPodsFromOldRS int
|
||||
readyPodsFromNewRS int
|
||||
scaleExpected bool
|
||||
expectedOldReplicas int
|
||||
}{
|
||||
@ -138,8 +138,8 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
|
||||
maxUnavailable: intstr.FromInt(0),
|
||||
oldReplicas: 10,
|
||||
newReplicas: 0,
|
||||
readyPodsFromOldRC: 10,
|
||||
readyPodsFromNewRC: 0,
|
||||
readyPodsFromOldRS: 10,
|
||||
readyPodsFromNewRS: 0,
|
||||
scaleExpected: false,
|
||||
},
|
||||
{
|
||||
@ -147,38 +147,38 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
|
||||
maxUnavailable: intstr.FromInt(2),
|
||||
oldReplicas: 10,
|
||||
newReplicas: 0,
|
||||
readyPodsFromOldRC: 10,
|
||||
readyPodsFromNewRC: 0,
|
||||
readyPodsFromOldRS: 10,
|
||||
readyPodsFromNewRS: 0,
|
||||
scaleExpected: true,
|
||||
expectedOldReplicas: 8,
|
||||
},
|
||||
{ // expect unhealthy replicas from old rcs been cleaned up
|
||||
{ // expect unhealthy replicas from old replica sets been cleaned up
|
||||
deploymentReplicas: 10,
|
||||
maxUnavailable: intstr.FromInt(2),
|
||||
oldReplicas: 10,
|
||||
newReplicas: 0,
|
||||
readyPodsFromOldRC: 8,
|
||||
readyPodsFromNewRC: 0,
|
||||
readyPodsFromOldRS: 8,
|
||||
readyPodsFromNewRS: 0,
|
||||
scaleExpected: true,
|
||||
expectedOldReplicas: 8,
|
||||
},
|
||||
{ // expect 1 unhealthy replica from old rcs been cleaned up, and 1 ready pod been scaled down
|
||||
{ // expect 1 unhealthy replica from old replica sets been cleaned up, and 1 ready pod been scaled down
|
||||
deploymentReplicas: 10,
|
||||
maxUnavailable: intstr.FromInt(2),
|
||||
oldReplicas: 10,
|
||||
newReplicas: 0,
|
||||
readyPodsFromOldRC: 9,
|
||||
readyPodsFromNewRC: 0,
|
||||
readyPodsFromOldRS: 9,
|
||||
readyPodsFromNewRS: 0,
|
||||
scaleExpected: true,
|
||||
expectedOldReplicas: 8,
|
||||
},
|
||||
{ // the unavailable pods from the newRC would not make us scale down old RCs in a further step
|
||||
{ // the unavailable pods from the newRS would not make us scale down old RSs in a further step
|
||||
deploymentReplicas: 10,
|
||||
maxUnavailable: intstr.FromInt(2),
|
||||
oldReplicas: 8,
|
||||
newReplicas: 2,
|
||||
readyPodsFromOldRC: 8,
|
||||
readyPodsFromNewRC: 0,
|
||||
readyPodsFromOldRS: 8,
|
||||
readyPodsFromNewRS: 0,
|
||||
scaleExpected: false,
|
||||
},
|
||||
}
|
||||
@ -187,10 +187,10 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
|
||||
|
||||
newSelector := map[string]string{"foo": "new"}
|
||||
oldSelector := map[string]string{"foo": "old"}
|
||||
newRc := rc("foo-new", test.newReplicas, newSelector)
|
||||
oldRc := rc("foo-old", test.oldReplicas, oldSelector)
|
||||
oldRCs := []*api.ReplicationController{oldRc}
|
||||
allRCs := []*api.ReplicationController{oldRc, newRc}
|
||||
newRS := rs("foo-new", test.newReplicas, newSelector)
|
||||
oldRS := rs("foo-old", test.oldReplicas, oldSelector)
|
||||
oldRSs := []*exp.ReplicaSet{oldRS}
|
||||
allRSs := []*exp.ReplicaSet{oldRS, newRS}
|
||||
|
||||
deployment := deployment("foo", test.deploymentReplicas, intstr.FromInt(0), test.maxUnavailable)
|
||||
fakeClientset := fake.Clientset{}
|
||||
@ -198,10 +198,10 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
|
||||
switch action.(type) {
|
||||
case core.ListAction:
|
||||
podList := &api.PodList{}
|
||||
for podIndex := 0; podIndex < test.readyPodsFromOldRC; podIndex++ {
|
||||
for podIndex := 0; podIndex < test.readyPodsFromOldRS; podIndex++ {
|
||||
podList.Items = append(podList.Items, api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-oldReadyPod-%d", oldRc.Name, podIndex),
|
||||
Name: fmt.Sprintf("%s-oldReadyPod-%d", oldRS.Name, podIndex),
|
||||
Labels: oldSelector,
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
@ -214,10 +214,10 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
|
||||
},
|
||||
})
|
||||
}
|
||||
for podIndex := 0; podIndex < test.oldReplicas-test.readyPodsFromOldRC; podIndex++ {
|
||||
for podIndex := 0; podIndex < test.oldReplicas-test.readyPodsFromOldRS; podIndex++ {
|
||||
podList.Items = append(podList.Items, api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-oldUnhealthyPod-%d", oldRc.Name, podIndex),
|
||||
Name: fmt.Sprintf("%s-oldUnhealthyPod-%d", oldRS.Name, podIndex),
|
||||
Labels: oldSelector,
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
@ -230,10 +230,10 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
|
||||
},
|
||||
})
|
||||
}
|
||||
for podIndex := 0; podIndex < test.readyPodsFromNewRC; podIndex++ {
|
||||
for podIndex := 0; podIndex < test.readyPodsFromNewRS; podIndex++ {
|
||||
podList.Items = append(podList.Items, api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-newReadyPod-%d", oldRc.Name, podIndex),
|
||||
Name: fmt.Sprintf("%s-newReadyPod-%d", oldRS.Name, podIndex),
|
||||
Labels: newSelector,
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
@ -246,10 +246,10 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
|
||||
},
|
||||
})
|
||||
}
|
||||
for podIndex := 0; podIndex < test.oldReplicas-test.readyPodsFromOldRC; podIndex++ {
|
||||
for podIndex := 0; podIndex < test.oldReplicas-test.readyPodsFromOldRS; podIndex++ {
|
||||
podList.Items = append(podList.Items, api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-newUnhealthyPod-%d", oldRc.Name, podIndex),
|
||||
Name: fmt.Sprintf("%s-newUnhealthyPod-%d", oldRS.Name, podIndex),
|
||||
Labels: newSelector,
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
@ -271,7 +271,7 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
|
||||
eventRecorder: &record.FakeRecorder{},
|
||||
}
|
||||
|
||||
scaled, err := controller.reconcileOldRCs(allRCs, oldRCs, newRc, deployment, false)
|
||||
scaled, err := controller.reconcileOldReplicaSets(allRSs, oldRSs, newRS, deployment, false)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
continue
|
||||
@ -327,8 +327,8 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("executing scenario %d", i)
|
||||
oldRc := rc("foo-v2", test.oldReplicas, nil)
|
||||
oldRCs := []*api.ReplicationController{oldRc}
|
||||
oldRS := rs("foo-v2", test.oldReplicas, nil)
|
||||
oldRSs := []*exp.ReplicaSet{oldRS}
|
||||
deployment := deployment("foo", 10, intstr.FromInt(2), intstr.FromInt(2))
|
||||
fakeClientset := fake.Clientset{}
|
||||
fakeClientset.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
@ -338,7 +338,7 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
|
||||
for podIndex := 0; podIndex < test.readyPods; podIndex++ {
|
||||
podList.Items = append(podList.Items, api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-readyPod-%d", oldRc.Name, podIndex),
|
||||
Name: fmt.Sprintf("%s-readyPod-%d", oldRS.Name, podIndex),
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
Conditions: []api.PodCondition{
|
||||
@ -353,7 +353,7 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
|
||||
for podIndex := 0; podIndex < test.unHealthyPods; podIndex++ {
|
||||
podList.Items = append(podList.Items, api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-unHealthyPod-%d", oldRc.Name, podIndex),
|
||||
Name: fmt.Sprintf("%s-unHealthyPod-%d", oldRS.Name, podIndex),
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
Conditions: []api.PodCondition{
|
||||
@ -374,7 +374,7 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
|
||||
client: &fakeClientset,
|
||||
eventRecorder: &record.FakeRecorder{},
|
||||
}
|
||||
cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRCs, deployment, test.maxCleanupCount)
|
||||
cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRSs, deployment, test.maxCleanupCount)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
continue
|
||||
@ -386,7 +386,7 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeploymentController_scaleDownOldRCsForRollingUpdate(t *testing.T) {
|
||||
func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing.T) {
|
||||
tests := []struct {
|
||||
deploymentReplicas int
|
||||
maxUnavailable intstr.IntOrString
|
||||
@ -428,9 +428,9 @@ func TestDeploymentController_scaleDownOldRCsForRollingUpdate(t *testing.T) {
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("executing scenario %d", i)
|
||||
oldRc := rc("foo-v2", test.oldReplicas, nil)
|
||||
allRcs := []*api.ReplicationController{oldRc}
|
||||
oldRcs := []*api.ReplicationController{oldRc}
|
||||
oldRS := rs("foo-v2", test.oldReplicas, nil)
|
||||
allRSs := []*exp.ReplicaSet{oldRS}
|
||||
oldRSs := []*exp.ReplicaSet{oldRS}
|
||||
deployment := deployment("foo", test.deploymentReplicas, intstr.FromInt(0), test.maxUnavailable)
|
||||
fakeClientset := fake.Clientset{}
|
||||
fakeClientset.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
@ -440,7 +440,8 @@ func TestDeploymentController_scaleDownOldRCsForRollingUpdate(t *testing.T) {
|
||||
for podIndex := 0; podIndex < test.readyPods; podIndex++ {
|
||||
podList.Items = append(podList.Items, api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-pod-%d", oldRc.Name, podIndex),
|
||||
Name: fmt.Sprintf("%s-pod-%d", oldRS.Name, podIndex),
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
Conditions: []api.PodCondition{
|
||||
@ -460,7 +461,7 @@ func TestDeploymentController_scaleDownOldRCsForRollingUpdate(t *testing.T) {
|
||||
client: &fakeClientset,
|
||||
eventRecorder: &record.FakeRecorder{},
|
||||
}
|
||||
scaled, err := controller.scaleDownOldRCsForRollingUpdate(allRcs, oldRcs, deployment)
|
||||
scaled, err := controller.scaleDownOldReplicaSetsForRollingUpdate(allRSs, oldRSs, deployment)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
continue
|
||||
@ -492,42 +493,42 @@ func TestDeploymentController_scaleDownOldRCsForRollingUpdate(t *testing.T) {
|
||||
t.Errorf("expected an update action")
|
||||
continue
|
||||
}
|
||||
updated := updateAction.GetObject().(*api.ReplicationController)
|
||||
updated := updateAction.GetObject().(*exp.ReplicaSet)
|
||||
if e, a := test.expectedOldReplicas, updated.Spec.Replicas; e != a {
|
||||
t.Errorf("expected update to %d replicas, got %d", e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeploymentController_cleanupOldRCs(t *testing.T) {
|
||||
func TestDeploymentController_cleanupOldReplicaSets(t *testing.T) {
|
||||
selector := map[string]string{"foo": "bar"}
|
||||
|
||||
tests := []struct {
|
||||
oldRCs []*api.ReplicationController
|
||||
oldRSs []*exp.ReplicaSet
|
||||
revisionHistoryLimit int
|
||||
expectedDeletions int
|
||||
}{
|
||||
{
|
||||
oldRCs: []*api.ReplicationController{
|
||||
rc("foo-1", 0, selector),
|
||||
rc("foo-2", 0, selector),
|
||||
rc("foo-3", 0, selector),
|
||||
oldRSs: []*exp.ReplicaSet{
|
||||
rs("foo-1", 0, selector),
|
||||
rs("foo-2", 0, selector),
|
||||
rs("foo-3", 0, selector),
|
||||
},
|
||||
revisionHistoryLimit: 1,
|
||||
expectedDeletions: 2,
|
||||
},
|
||||
{
|
||||
oldRCs: []*api.ReplicationController{
|
||||
rc("foo-1", 0, selector),
|
||||
rc("foo-2", 0, selector),
|
||||
oldRSs: []*exp.ReplicaSet{
|
||||
rs("foo-1", 0, selector),
|
||||
rs("foo-2", 0, selector),
|
||||
},
|
||||
revisionHistoryLimit: 0,
|
||||
expectedDeletions: 2,
|
||||
},
|
||||
{
|
||||
oldRCs: []*api.ReplicationController{
|
||||
rc("foo-1", 1, selector),
|
||||
rc("foo-2", 1, selector),
|
||||
oldRSs: []*exp.ReplicaSet{
|
||||
rs("foo-1", 1, selector),
|
||||
rs("foo-2", 1, selector),
|
||||
},
|
||||
revisionHistoryLimit: 0,
|
||||
expectedDeletions: 0,
|
||||
@ -539,14 +540,14 @@ func TestDeploymentController_cleanupOldRCs(t *testing.T) {
|
||||
controller := NewDeploymentController(fake, controller.NoResyncPeriodFunc)
|
||||
|
||||
controller.eventRecorder = &record.FakeRecorder{}
|
||||
controller.rcStoreSynced = alwaysReady
|
||||
controller.rsStoreSynced = alwaysReady
|
||||
controller.podStoreSynced = alwaysReady
|
||||
for _, rc := range test.oldRCs {
|
||||
controller.rcStore.Add(rc)
|
||||
for _, rs := range test.oldRSs {
|
||||
controller.rsStore.Add(rs)
|
||||
}
|
||||
|
||||
d := newDeployment(1, &tests[i].revisionHistoryLimit)
|
||||
controller.cleanupOldRcs(test.oldRCs, *d)
|
||||
controller.cleanupOldReplicaSets(test.oldRSs, *d)
|
||||
|
||||
gotDeletions := 0
|
||||
for _, action := range fake.Actions() {
|
||||
@ -555,20 +556,20 @@ func TestDeploymentController_cleanupOldRCs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
if gotDeletions != test.expectedDeletions {
|
||||
t.Errorf("expect %v old rcs been deleted, but got %v", test.expectedDeletions, gotDeletions)
|
||||
t.Errorf("expect %v old replica sets been deleted, but got %v", test.expectedDeletions, gotDeletions)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func rc(name string, replicas int, selector map[string]string) *api.ReplicationController {
|
||||
return &api.ReplicationController{
|
||||
func rs(name string, replicas int, selector map[string]string) *exp.ReplicaSet {
|
||||
return &exp.ReplicaSet{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Spec: exp.ReplicaSetSpec{
|
||||
Replicas: replicas,
|
||||
Selector: selector,
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: selector},
|
||||
Template: &api.PodTemplateSpec{},
|
||||
},
|
||||
}
|
||||
@ -609,7 +610,7 @@ func newDeployment(replicas int, revisionHistoryLimit *int) *exp.Deployment {
|
||||
RollingUpdate: &exp.RollingUpdateDeployment{},
|
||||
},
|
||||
Replicas: replicas,
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
@ -640,13 +641,13 @@ func getKey(d *exp.Deployment, t *testing.T) string {
|
||||
}
|
||||
}
|
||||
|
||||
func newReplicationController(d *exp.Deployment, name string, replicas int) *api.ReplicationController {
|
||||
return &api.ReplicationController{
|
||||
func newReplicaSet(d *exp.Deployment, name string, replicas int) *exp.ReplicaSet {
|
||||
return &exp.ReplicaSet{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: api.NamespaceDefault,
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Spec: exp.ReplicaSetSpec{
|
||||
Replicas: replicas,
|
||||
Template: &d.Spec.Template,
|
||||
},
|
||||
@ -664,7 +665,7 @@ type fixture struct {
|
||||
client *fake.Clientset
|
||||
// Objects to put in the store.
|
||||
dStore []*exp.Deployment
|
||||
rcStore []*api.ReplicationController
|
||||
rsStore []*exp.ReplicaSet
|
||||
podStore []*api.Pod
|
||||
|
||||
// Actions expected to happen on the client. Objects from here are also
|
||||
@ -678,14 +679,14 @@ func (f *fixture) expectUpdateDeploymentAction(d *exp.Deployment) {
|
||||
f.objects.Items = append(f.objects.Items, d)
|
||||
}
|
||||
|
||||
func (f *fixture) expectCreateRCAction(rc *api.ReplicationController) {
|
||||
f.actions = append(f.actions, core.NewCreateAction("replicationcontrollers", rc.Namespace, rc))
|
||||
f.objects.Items = append(f.objects.Items, rc)
|
||||
func (f *fixture) expectCreateRSAction(rs *exp.ReplicaSet) {
|
||||
f.actions = append(f.actions, core.NewCreateAction("replicasets", rs.Namespace, rs))
|
||||
f.objects.Items = append(f.objects.Items, rs)
|
||||
}
|
||||
|
||||
func (f *fixture) expectUpdateRCAction(rc *api.ReplicationController) {
|
||||
f.actions = append(f.actions, core.NewUpdateAction("replicationcontrollers", rc.Namespace, rc))
|
||||
f.objects.Items = append(f.objects.Items, rc)
|
||||
func (f *fixture) expectUpdateRSAction(rs *exp.ReplicaSet) {
|
||||
f.actions = append(f.actions, core.NewUpdateAction("replicasets", rs.Namespace, rs))
|
||||
f.objects.Items = append(f.objects.Items, rs)
|
||||
}
|
||||
|
||||
func (f *fixture) expectListPodAction(namespace string, opt api.ListOptions) {
|
||||
@ -703,13 +704,13 @@ func (f *fixture) run(deploymentName string) {
|
||||
f.client = fake.NewSimpleClientset(f.objects)
|
||||
c := NewDeploymentController(f.client, controller.NoResyncPeriodFunc)
|
||||
c.eventRecorder = &record.FakeRecorder{}
|
||||
c.rcStoreSynced = alwaysReady
|
||||
c.rsStoreSynced = alwaysReady
|
||||
c.podStoreSynced = alwaysReady
|
||||
for _, d := range f.dStore {
|
||||
c.dStore.Store.Add(d)
|
||||
}
|
||||
for _, rc := range f.rcStore {
|
||||
c.rcStore.Store.Add(rc)
|
||||
for _, rs := range f.rsStore {
|
||||
c.rsStore.Store.Add(rs)
|
||||
}
|
||||
for _, pod := range f.podStore {
|
||||
c.podStore.Store.Add(pod)
|
||||
@ -739,22 +740,22 @@ func (f *fixture) run(deploymentName string) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncDeploymentCreatesRC(t *testing.T) {
|
||||
func TestSyncDeploymentCreatesReplicaSet(t *testing.T) {
|
||||
f := newFixture(t)
|
||||
|
||||
d := newDeployment(1, nil)
|
||||
f.dStore = append(f.dStore, d)
|
||||
|
||||
// expect that one rc with zero replicas is created
|
||||
// expect that one ReplicaSet with zero replicas is created
|
||||
// then is updated to 1 replica
|
||||
rc := newReplicationController(d, "deploymentrc-4186632231", 0)
|
||||
updatedRC := newReplicationController(d, "deploymentrc-4186632231", 1)
|
||||
rs := newReplicaSet(d, "deploymentrs-4186632231", 0)
|
||||
updatedRS := newReplicaSet(d, "deploymentrs-4186632231", 1)
|
||||
opt := newListOptions()
|
||||
|
||||
f.expectCreateRCAction(rc)
|
||||
f.expectCreateRSAction(rs)
|
||||
f.expectUpdateDeploymentAction(d)
|
||||
f.expectUpdateRCAction(updatedRC)
|
||||
f.expectListPodAction(rc.Namespace, opt)
|
||||
f.expectUpdateRSAction(updatedRS)
|
||||
f.expectListPodAction(rs.Namespace, opt)
|
||||
f.expectUpdateDeploymentAction(d)
|
||||
|
||||
f.run(getKey(d, t))
|
||||
|
@ -125,6 +125,7 @@ __custom_func() {
|
||||
* persistentvolumeclaims (aka 'pvc')
|
||||
* quota
|
||||
* resourcequotas (aka 'quota')
|
||||
* replicasets (aka 'rs')
|
||||
* replicationcontrollers (aka 'rc')
|
||||
* secrets
|
||||
* serviceaccounts
|
||||
|
@ -290,6 +290,7 @@ func NewAPIFactory() (*cmdutil.Factory, *testFactory, runtime.Codec) {
|
||||
}
|
||||
rf := cmdutil.NewFactory(nil)
|
||||
f.PodSelectorForObject = rf.PodSelectorForObject
|
||||
f.MapBasedSelectorForObject = rf.MapBasedSelectorForObject
|
||||
f.PortsForObject = rf.PortsForObject
|
||||
f.LabelsForObject = rf.LabelsForObject
|
||||
f.CanBeExposed = rf.CanBeExposed
|
||||
|
@ -149,9 +149,9 @@ func RunExpose(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []str
|
||||
// For objects that need a pod selector, derive it from the exposed object in case a user
|
||||
// didn't explicitly specify one via --selector
|
||||
if s, found := params["selector"]; found && kubectl.IsZero(s) {
|
||||
s, err := f.PodSelectorForObject(inputObject)
|
||||
s, err := f.MapBasedSelectorForObject(inputObject)
|
||||
if err != nil {
|
||||
return cmdutil.UsageError(cmd, fmt.Sprintf("couldn't find selectors via --selector flag or introspection: %s", err))
|
||||
return cmdutil.UsageError(cmd, fmt.Sprintf("couldn't retrieve selectors via --selector flag or introspection: %s", err))
|
||||
}
|
||||
params["selector"] = s
|
||||
}
|
||||
|
@ -94,6 +94,10 @@ type Factory struct {
|
||||
Rollbacker func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error)
|
||||
// PodSelectorForObject returns the pod selector associated with the provided object
|
||||
PodSelectorForObject func(object runtime.Object) (string, error)
|
||||
// MapBasedSelectorForObject returns the map-based selector associated with the provided object. If a
|
||||
// new set-based selector is provided, an error is returned if the selector cannot be converted to a
|
||||
// map-based selector
|
||||
MapBasedSelectorForObject func(object runtime.Object) (string, error)
|
||||
// PortsForObject returns the ports associated with the provided object
|
||||
PortsForObject func(object runtime.Object) ([]string, error)
|
||||
// LabelsForObject returns the labels associated with the provided object
|
||||
@ -257,7 +261,41 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
|
||||
}
|
||||
return kubectl.MakeLabels(t.Spec.Selector), nil
|
||||
case *extensions.Deployment:
|
||||
selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to convert label selector to selector: %v", err)
|
||||
}
|
||||
return selector.String(), nil
|
||||
default:
|
||||
gvk, err := api.Scheme.ObjectKind(object)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "", fmt.Errorf("cannot extract pod selector from %v", gvk)
|
||||
}
|
||||
},
|
||||
MapBasedSelectorForObject: func(object runtime.Object) (string, error) {
|
||||
// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
|
||||
switch t := object.(type) {
|
||||
case *api.ReplicationController:
|
||||
return kubectl.MakeLabels(t.Spec.Selector), nil
|
||||
case *api.Pod:
|
||||
if len(t.Labels) == 0 {
|
||||
return "", fmt.Errorf("the pod has no labels and cannot be exposed")
|
||||
}
|
||||
return kubectl.MakeLabels(t.Labels), nil
|
||||
case *api.Service:
|
||||
if t.Spec.Selector == nil {
|
||||
return "", fmt.Errorf("the service has no pod selector set")
|
||||
}
|
||||
return kubectl.MakeLabels(t.Spec.Selector), nil
|
||||
case *extensions.Deployment:
|
||||
// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
|
||||
// operator, DoubleEquals operator and In operator with only one element in the set.
|
||||
if len(t.Spec.Selector.MatchExpressions) > 0 {
|
||||
return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format")
|
||||
}
|
||||
return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
|
||||
default:
|
||||
gvk, err := api.Scheme.ObjectKind(object)
|
||||
if err != nil {
|
||||
@ -447,11 +485,20 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
|
||||
}
|
||||
switch t := object.(type) {
|
||||
case *api.ReplicationController:
|
||||
return GetFirstPod(client, t.Namespace, t.Spec.Selector)
|
||||
selector := labels.SelectorFromSet(t.Spec.Selector)
|
||||
return GetFirstPod(client, t.Namespace, selector)
|
||||
case *extensions.Deployment:
|
||||
return GetFirstPod(client, t.Namespace, t.Spec.Selector)
|
||||
selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert label selector to selector: %v", err)
|
||||
}
|
||||
return GetFirstPod(client, t.Namespace, selector)
|
||||
case *extensions.Job:
|
||||
return GetFirstPod(client, t.Namespace, t.Spec.Selector.MatchLabels)
|
||||
selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert label selector to selector: %v", err)
|
||||
}
|
||||
return GetFirstPod(client, t.Namespace, selector)
|
||||
case *api.Pod:
|
||||
return t, nil
|
||||
default:
|
||||
@ -469,12 +516,11 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
|
||||
}
|
||||
|
||||
// GetFirstPod returns the first pod of an object from its namespace and selector
|
||||
func GetFirstPod(client *client.Client, namespace string, selector map[string]string) (*api.Pod, error) {
|
||||
func GetFirstPod(client *client.Client, namespace string, selector labels.Selector) (*api.Pod, error) {
|
||||
var pods *api.PodList
|
||||
for pods == nil || len(pods.Items) == 0 {
|
||||
var err error
|
||||
labelSelector := labels.SelectorFromSet(selector)
|
||||
options := api.ListOptions{LabelSelector: labelSelector}
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
if pods, err = client.Pods(namespace).List(options); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1607,12 +1607,16 @@ func (dd *DeploymentDescriber) Describe(namespace, name string) (string, error)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
selector, err := unversioned.LabelSelectorAsSelector(d.Spec.Selector)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return tabbedString(func(out io.Writer) error {
|
||||
fmt.Fprintf(out, "Name:\t%s\n", d.ObjectMeta.Name)
|
||||
fmt.Fprintf(out, "Namespace:\t%s\n", d.ObjectMeta.Namespace)
|
||||
fmt.Fprintf(out, "CreationTimestamp:\t%s\n", d.CreationTimestamp.Time.Format(time.RFC1123Z))
|
||||
fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(d.Labels))
|
||||
fmt.Fprintf(out, "Selector:\t%s\n", labels.FormatLabels(d.Spec.Selector))
|
||||
fmt.Fprintf(out, "Selector:\t%s\n", selector)
|
||||
fmt.Fprintf(out, "Replicas:\t%d updated | %d total | %d available | %d unavailable\n", d.Status.UpdatedReplicas, d.Spec.Replicas, d.Status.AvailableReplicas, d.Status.UnavailableReplicas)
|
||||
fmt.Fprintf(out, "StrategyType:\t%s\n", d.Spec.Strategy.Type)
|
||||
fmt.Fprintf(out, "MinReadySeconds:\t%s\n", d.Spec.MinReadySeconds)
|
||||
@ -1620,17 +1624,17 @@ func (dd *DeploymentDescriber) Describe(namespace, name string) (string, error)
|
||||
ru := d.Spec.Strategy.RollingUpdate
|
||||
fmt.Fprintf(out, "RollingUpdateStrategy:\t%s max unavailable, %s max surge\n", ru.MaxUnavailable.String(), ru.MaxSurge.String())
|
||||
}
|
||||
oldRCs, _, err := deploymentutil.GetOldRCs(*d, dd)
|
||||
oldRSs, _, err := deploymentutil.GetOldReplicaSets(*d, dd)
|
||||
if err == nil {
|
||||
fmt.Fprintf(out, "OldReplicationControllers:\t%s\n", printReplicationControllersByLabels(oldRCs))
|
||||
fmt.Fprintf(out, "OldReplicaSets:\t%s\n", printReplicaSetsByLabels(oldRSs))
|
||||
}
|
||||
newRC, err := deploymentutil.GetNewRC(*d, dd)
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*d, dd)
|
||||
if err == nil {
|
||||
var newRCs []*api.ReplicationController
|
||||
if newRC != nil {
|
||||
newRCs = append(newRCs, newRC)
|
||||
var newRSs []*extensions.ReplicaSet
|
||||
if newRS != nil {
|
||||
newRSs = append(newRSs, newRS)
|
||||
}
|
||||
fmt.Fprintf(out, "NewReplicationController:\t%s\n", printReplicationControllersByLabels(newRCs))
|
||||
fmt.Fprintf(out, "NewReplicaSet:\t%s\n", printReplicaSetsByLabels(newRSs))
|
||||
}
|
||||
events, err := dd.Core().Events(namespace).Search(d)
|
||||
if err == nil && events != nil {
|
||||
@ -1705,6 +1709,20 @@ func printReplicationControllersByLabels(matchingRCs []*api.ReplicationControlle
|
||||
return list
|
||||
}
|
||||
|
||||
func printReplicaSetsByLabels(matchingRSs []*extensions.ReplicaSet) string {
|
||||
// Format the matching ReplicaSets into strings.
|
||||
var rsStrings []string
|
||||
for _, rs := range matchingRSs {
|
||||
rsStrings = append(rsStrings, fmt.Sprintf("%s (%d/%d replicas created)", rs.Name, rs.Status.Replicas, rs.Spec.Replicas))
|
||||
}
|
||||
|
||||
list := strings.Join(rsStrings, ", ")
|
||||
if list == "" {
|
||||
return "<none>"
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
func getPodStatusForController(c client.PodInterface, selector labels.Selector) (running, waiting, succeeded, failed int, err error) {
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
rcPods, err := c.List(options)
|
||||
|
@ -58,7 +58,7 @@ type DeploymentHistoryViewer struct {
|
||||
c clientset.Interface
|
||||
}
|
||||
|
||||
// History returns a revision-to-RC map as the revision history of a deployment
|
||||
// History returns a revision-to-replicaset map as the revision history of a deployment
|
||||
func (h *DeploymentHistoryViewer) History(namespace, name string) (HistoryInfo, error) {
|
||||
historyInfo := HistoryInfo{
|
||||
RevisionToTemplate: make(map[int64]*api.PodTemplateSpec),
|
||||
@ -67,22 +67,22 @@ func (h *DeploymentHistoryViewer) History(namespace, name string) (HistoryInfo,
|
||||
if err != nil {
|
||||
return historyInfo, fmt.Errorf("failed to retrieve deployment %s: %v", name, err)
|
||||
}
|
||||
_, allOldRCs, err := deploymentutil.GetOldRCs(*deployment, h.c)
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(*deployment, h.c)
|
||||
if err != nil {
|
||||
return historyInfo, fmt.Errorf("failed to retrieve old RCs from deployment %s: %v", name, err)
|
||||
return historyInfo, fmt.Errorf("failed to retrieve old replica sets from deployment %s: %v", name, err)
|
||||
}
|
||||
newRC, err := deploymentutil.GetNewRC(*deployment, h.c)
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, h.c)
|
||||
if err != nil {
|
||||
return historyInfo, fmt.Errorf("failed to retrieve new RC from deployment %s: %v", name, err)
|
||||
return historyInfo, fmt.Errorf("failed to retrieve new replica set from deployment %s: %v", name, err)
|
||||
}
|
||||
allRCs := append(allOldRCs, newRC)
|
||||
for _, rc := range allRCs {
|
||||
v, err := deploymentutil.Revision(rc)
|
||||
allRSs := append(allOldRSs, newRS)
|
||||
for _, rs := range allRSs {
|
||||
v, err := deploymentutil.Revision(rs)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
historyInfo.RevisionToTemplate[v] = rc.Spec.Template
|
||||
changeCause := getChangeCause(rc)
|
||||
historyInfo.RevisionToTemplate[v] = rs.Spec.Template
|
||||
changeCause := getChangeCause(rs)
|
||||
if historyInfo.RevisionToTemplate[v].Annotations == nil {
|
||||
historyInfo.RevisionToTemplate[v].Annotations = make(map[string]string)
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ func (DeploymentV1Beta1) Generate(genericParams map[string]interface{}) (runtime
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: count,
|
||||
Selector: labels,
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: labels},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: labels,
|
||||
|
@ -656,7 +656,7 @@ func TestGenerateDeployment(t *testing.T) {
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: 3,
|
||||
Selector: map[string]string{"foo": "bar", "baz": "blah"},
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar", "baz": "blah"}},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: map[string]string{"foo": "bar", "baz": "blah"},
|
||||
|
@ -48,8 +48,9 @@ func ScalerFor(kind unversioned.GroupKind, c client.Interface) (Scaler, error) {
|
||||
return &ReplicaSetScaler{c.Extensions()}, nil
|
||||
case extensions.Kind("Job"):
|
||||
return &JobScaler{c.Extensions()}, nil
|
||||
case extensions.Kind("Deployment"):
|
||||
return &DeploymentScaler{c.Extensions()}, nil
|
||||
// TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
|
||||
// case extensions.Kind("Deployment"):
|
||||
// return &DeploymentScaler{c.Extensions()}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("no scaler has been implemented for %q", kind)
|
||||
}
|
||||
@ -327,53 +328,57 @@ func (precondition *ScalePrecondition) ValidateDeployment(deployment *extensions
|
||||
return nil
|
||||
}
|
||||
|
||||
type DeploymentScaler struct {
|
||||
c client.ExtensionsInterface
|
||||
}
|
||||
// TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
|
||||
// type DeploymentScaler struct {
|
||||
// c client.ExtensionsInterface
|
||||
// }
|
||||
|
||||
// ScaleSimple is responsible for updating a deployment's desired replicas count.
|
||||
func (scaler *DeploymentScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error {
|
||||
deployment, err := scaler.c.Deployments(namespace).Get(name)
|
||||
if err != nil {
|
||||
return ScaleError{ScaleGetFailure, "Unknown", err}
|
||||
}
|
||||
if preconditions != nil {
|
||||
if err := preconditions.ValidateDeployment(deployment); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
scale := extensions.ScaleFromDeployment(deployment)
|
||||
scale.Spec.Replicas = int(newSize)
|
||||
if _, err := scaler.c.Scales(namespace).Update("Deployment", scale); err != nil {
|
||||
if errors.IsInvalid(err) {
|
||||
return ScaleError{ScaleUpdateInvalidFailure, deployment.ResourceVersion, err}
|
||||
}
|
||||
return ScaleError{ScaleUpdateFailure, deployment.ResourceVersion, err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// // ScaleSimple is responsible for updating a deployment's desired replicas count.
|
||||
// func (scaler *DeploymentScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error {
|
||||
// deployment, err := scaler.c.Deployments(namespace).Get(name)
|
||||
// if err != nil {
|
||||
// return ScaleError{ScaleGetFailure, "Unknown", err}
|
||||
// }
|
||||
// if preconditions != nil {
|
||||
// if err := preconditions.ValidateDeployment(deployment); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
// scale, err := extensions.ScaleFromDeployment(deployment)
|
||||
// if err != nil {
|
||||
// return ScaleError{ScaleUpdateFailure, deployment.ResourceVersion, err}
|
||||
// }
|
||||
// scale.Spec.Replicas = int(newSize)
|
||||
// if _, err := scaler.c.Scales(namespace).Update("Deployment", scale); err != nil {
|
||||
// if errors.IsInvalid(err) {
|
||||
// return ScaleError{ScaleUpdateInvalidFailure, deployment.ResourceVersion, err}
|
||||
// }
|
||||
// return ScaleError{ScaleUpdateFailure, deployment.ResourceVersion, err}
|
||||
// }
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// Scale updates a deployment to a new size, with optional precondition check (if preconditions is not nil),
|
||||
// optional retries (if retry is not nil), and then optionally waits for the status to reach desired count.
|
||||
func (scaler *DeploymentScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
|
||||
if preconditions == nil {
|
||||
preconditions = &ScalePrecondition{-1, ""}
|
||||
}
|
||||
if retry == nil {
|
||||
// Make it try only once, immediately
|
||||
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
|
||||
}
|
||||
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize)
|
||||
if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
|
||||
return err
|
||||
}
|
||||
if waitForReplicas != nil {
|
||||
deployment, err := scaler.c.Deployments(namespace).Get(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout,
|
||||
client.DeploymentHasDesiredReplicas(scaler.c, deployment))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// // Scale updates a deployment to a new size, with optional precondition check (if preconditions is not nil),
|
||||
// // optional retries (if retry is not nil), and then optionally waits for the status to reach desired count.
|
||||
// func (scaler *DeploymentScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
|
||||
// if preconditions == nil {
|
||||
// preconditions = &ScalePrecondition{-1, ""}
|
||||
// }
|
||||
// if retry == nil {
|
||||
// // Make it try only once, immediately
|
||||
// retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
|
||||
// }
|
||||
// cond := ScaleCondition(scaler, preconditions, namespace, name, newSize)
|
||||
// if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if waitForReplicas != nil {
|
||||
// deployment, err := scaler.c.Deployments(namespace).Get(name)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout,
|
||||
// client.DeploymentHasDesiredReplicas(scaler.c, deployment))
|
||||
// }
|
||||
// return nil
|
||||
// }
|
||||
|
@ -488,143 +488,145 @@ func TestValidateJob(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type ErrorScales struct {
|
||||
testclient.FakeScales
|
||||
invalid bool
|
||||
}
|
||||
// TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
|
||||
|
||||
func (c *ErrorScales) Update(kind string, scale *extensions.Scale) (*extensions.Scale, error) {
|
||||
if c.invalid {
|
||||
return nil, kerrors.NewInvalid(extensions.Kind(scale.Kind), scale.Name, nil)
|
||||
}
|
||||
return nil, errors.New("scale update failure")
|
||||
}
|
||||
// type ErrorScales struct {
|
||||
// testclient.FakeScales
|
||||
// invalid bool
|
||||
// }
|
||||
|
||||
func (c *ErrorScales) Get(kind, name string) (*extensions.Scale, error) {
|
||||
return &extensions.Scale{
|
||||
Spec: extensions.ScaleSpec{
|
||||
Replicas: 0,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
// func (c *ErrorScales) Update(kind string, scale *extensions.Scale) (*extensions.Scale, error) {
|
||||
// if c.invalid {
|
||||
// return nil, kerrors.NewInvalid(extensions.Kind(scale.Kind), scale.Name, nil)
|
||||
// }
|
||||
// return nil, errors.New("scale update failure")
|
||||
// }
|
||||
|
||||
type ErrorDeployments struct {
|
||||
testclient.FakeDeployments
|
||||
invalid bool
|
||||
}
|
||||
// func (c *ErrorScales) Get(kind, name string) (*extensions.Scale, error) {
|
||||
// return &extensions.Scale{
|
||||
// Spec: extensions.ScaleSpec{
|
||||
// Replicas: 0,
|
||||
// },
|
||||
// }, nil
|
||||
// }
|
||||
|
||||
func (c *ErrorDeployments) Update(deployment *extensions.Deployment) (*extensions.Deployment, error) {
|
||||
if c.invalid {
|
||||
return nil, kerrors.NewInvalid(extensions.Kind(deployment.Kind), deployment.Name, nil)
|
||||
}
|
||||
return nil, errors.New("deployment update failure")
|
||||
}
|
||||
// type ErrorDeployments struct {
|
||||
// testclient.FakeDeployments
|
||||
// invalid bool
|
||||
// }
|
||||
|
||||
func (c *ErrorDeployments) Get(name string) (*extensions.Deployment, error) {
|
||||
return &extensions.Deployment{
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: 0,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
// func (c *ErrorDeployments) Update(deployment *extensions.Deployment) (*extensions.Deployment, error) {
|
||||
// if c.invalid {
|
||||
// return nil, kerrors.NewInvalid(extensions.Kind(deployment.Kind), deployment.Name, nil)
|
||||
// }
|
||||
// return nil, errors.New("deployment update failure")
|
||||
// }
|
||||
|
||||
type ErrorDeploymentClient struct {
|
||||
testclient.FakeExperimental
|
||||
invalid bool
|
||||
}
|
||||
// func (c *ErrorDeployments) Get(name string) (*extensions.Deployment, error) {
|
||||
// return &extensions.Deployment{
|
||||
// Spec: extensions.DeploymentSpec{
|
||||
// Replicas: 0,
|
||||
// },
|
||||
// }, nil
|
||||
// }
|
||||
|
||||
func (c *ErrorDeploymentClient) Deployments(namespace string) client.DeploymentInterface {
|
||||
return &ErrorDeployments{testclient.FakeDeployments{Fake: &c.FakeExperimental, Namespace: namespace}, c.invalid}
|
||||
}
|
||||
// type ErrorDeploymentClient struct {
|
||||
// testclient.FakeExperimental
|
||||
// invalid bool
|
||||
// }
|
||||
|
||||
func (c *ErrorDeploymentClient) Scales(namespace string) client.ScaleInterface {
|
||||
return &ErrorScales{testclient.FakeScales{Fake: &c.FakeExperimental, Namespace: namespace}, c.invalid}
|
||||
}
|
||||
// func (c *ErrorDeploymentClient) Deployments(namespace string) client.DeploymentInterface {
|
||||
// return &ErrorDeployments{testclient.FakeDeployments{Fake: &c.FakeExperimental, Namespace: namespace}, c.invalid}
|
||||
// }
|
||||
|
||||
func TestDeploymentScaleRetry(t *testing.T) {
|
||||
fake := &ErrorDeploymentClient{FakeExperimental: testclient.FakeExperimental{Fake: &testclient.Fake{}}, invalid: false}
|
||||
scaler := &DeploymentScaler{fake}
|
||||
preconditions := &ScalePrecondition{-1, ""}
|
||||
count := uint(3)
|
||||
name := "foo"
|
||||
namespace := "default"
|
||||
// func (c *ErrorDeploymentClient) Scales(namespace string) client.ScaleInterface {
|
||||
// return &ErrorScales{testclient.FakeScales{Fake: &c.FakeExperimental, Namespace: namespace}, c.invalid}
|
||||
// }
|
||||
|
||||
scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count)
|
||||
pass, err := scaleFunc()
|
||||
if pass != false {
|
||||
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Did not expect an error on update failure, got %v", err)
|
||||
}
|
||||
preconditions = &ScalePrecondition{3, ""}
|
||||
scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count)
|
||||
pass, err = scaleFunc()
|
||||
if err == nil {
|
||||
t.Errorf("Expected error on precondition failure")
|
||||
}
|
||||
}
|
||||
// func TestDeploymentScaleRetry(t *testing.T) {
|
||||
// fake := &ErrorDeploymentClient{FakeExperimental: testclient.FakeExperimental{Fake: &testclient.Fake{}}, invalid: false}
|
||||
// scaler := &DeploymentScaler{fake}
|
||||
// preconditions := &ScalePrecondition{-1, ""}
|
||||
// count := uint(3)
|
||||
// name := "foo"
|
||||
// namespace := "default"
|
||||
|
||||
func TestDeploymentScale(t *testing.T) {
|
||||
fake := &testclient.FakeExperimental{Fake: &testclient.Fake{}}
|
||||
scaler := DeploymentScaler{fake}
|
||||
preconditions := ScalePrecondition{-1, ""}
|
||||
count := uint(3)
|
||||
name := "foo"
|
||||
scaler.Scale("default", name, count, &preconditions, nil, nil)
|
||||
// scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count)
|
||||
// pass, err := scaleFunc()
|
||||
// if pass != false {
|
||||
// t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||
// }
|
||||
// if err != nil {
|
||||
// t.Errorf("Did not expect an error on update failure, got %v", err)
|
||||
// }
|
||||
// preconditions = &ScalePrecondition{3, ""}
|
||||
// scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count)
|
||||
// pass, err = scaleFunc()
|
||||
// if err == nil {
|
||||
// t.Errorf("Expected error on precondition failure")
|
||||
// }
|
||||
// }
|
||||
|
||||
actions := fake.Actions()
|
||||
if len(actions) != 2 {
|
||||
t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions)
|
||||
}
|
||||
if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "deployments" || action.GetName() != name {
|
||||
t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name)
|
||||
}
|
||||
// TODO: The testclient needs to support subresources
|
||||
if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "Deployment" || action.GetObject().(*extensions.Scale).Spec.Replicas != int(count) {
|
||||
t.Errorf("unexpected action %v, expected update-deployment-scale with replicas = %d", actions[1], count)
|
||||
}
|
||||
}
|
||||
// func TestDeploymentScale(t *testing.T) {
|
||||
// fake := &testclient.FakeExperimental{Fake: &testclient.Fake{}}
|
||||
// scaler := DeploymentScaler{fake}
|
||||
// preconditions := ScalePrecondition{-1, ""}
|
||||
// count := uint(3)
|
||||
// name := "foo"
|
||||
// scaler.Scale("default", name, count, &preconditions, nil, nil)
|
||||
|
||||
func TestDeploymentScaleInvalid(t *testing.T) {
|
||||
fake := &ErrorDeploymentClient{FakeExperimental: testclient.FakeExperimental{Fake: &testclient.Fake{}}, invalid: true}
|
||||
scaler := DeploymentScaler{fake}
|
||||
preconditions := ScalePrecondition{-1, ""}
|
||||
count := uint(3)
|
||||
name := "foo"
|
||||
namespace := "default"
|
||||
// actions := fake.Actions()
|
||||
// if len(actions) != 2 {
|
||||
// t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions)
|
||||
// }
|
||||
// if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "deployments" || action.GetName() != name {
|
||||
// t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name)
|
||||
// }
|
||||
// // TODO: The testclient needs to support subresources
|
||||
// if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "Deployment" || action.GetObject().(*extensions.Scale).Spec.Replicas != int(count) {
|
||||
// t.Errorf("unexpected action %v, expected update-deployment-scale with replicas = %d", actions[1], count)
|
||||
// }
|
||||
// }
|
||||
|
||||
scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count)
|
||||
pass, err := scaleFunc()
|
||||
if pass {
|
||||
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||
}
|
||||
e, ok := err.(ScaleError)
|
||||
if err == nil || !ok || e.FailureType != ScaleUpdateInvalidFailure {
|
||||
t.Errorf("Expected error on invalid update failure, got %v", err)
|
||||
}
|
||||
}
|
||||
// func TestDeploymentScaleInvalid(t *testing.T) {
|
||||
// fake := &ErrorDeploymentClient{FakeExperimental: testclient.FakeExperimental{Fake: &testclient.Fake{}}, invalid: true}
|
||||
// scaler := DeploymentScaler{fake}
|
||||
// preconditions := ScalePrecondition{-1, ""}
|
||||
// count := uint(3)
|
||||
// name := "foo"
|
||||
// namespace := "default"
|
||||
|
||||
func TestDeploymentScaleFailsPreconditions(t *testing.T) {
|
||||
fake := testclient.NewSimpleFake(&extensions.Deployment{
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: 10,
|
||||
},
|
||||
})
|
||||
scaler := DeploymentScaler{&testclient.FakeExperimental{fake}}
|
||||
preconditions := ScalePrecondition{2, ""}
|
||||
count := uint(3)
|
||||
name := "foo"
|
||||
scaler.Scale("default", name, count, &preconditions, nil, nil)
|
||||
// scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count)
|
||||
// pass, err := scaleFunc()
|
||||
// if pass {
|
||||
// t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||
// }
|
||||
// e, ok := err.(ScaleError)
|
||||
// if err == nil || !ok || e.FailureType != ScaleUpdateInvalidFailure {
|
||||
// t.Errorf("Expected error on invalid update failure, got %v", err)
|
||||
// }
|
||||
// }
|
||||
|
||||
actions := fake.Actions()
|
||||
if len(actions) != 1 {
|
||||
t.Errorf("unexpected actions: %v, expected 1 actions (get)", actions)
|
||||
}
|
||||
if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "deployments" || action.GetName() != name {
|
||||
t.Errorf("unexpected action: %v, expected get-deployment %s", actions[0], name)
|
||||
}
|
||||
}
|
||||
// func TestDeploymentScaleFailsPreconditions(t *testing.T) {
|
||||
// fake := testclient.NewSimpleFake(&extensions.Deployment{
|
||||
// Spec: extensions.DeploymentSpec{
|
||||
// Replicas: 10,
|
||||
// },
|
||||
// })
|
||||
// scaler := DeploymentScaler{&testclient.FakeExperimental{fake}}
|
||||
// preconditions := ScalePrecondition{2, ""}
|
||||
// count := uint(3)
|
||||
// name := "foo"
|
||||
// scaler.Scale("default", name, count, &preconditions, nil, nil)
|
||||
|
||||
// actions := fake.Actions()
|
||||
// if len(actions) != 1 {
|
||||
// t.Errorf("unexpected actions: %v, expected 1 actions (get)", actions)
|
||||
// }
|
||||
// if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "deployments" || action.GetName() != name {
|
||||
// t.Errorf("unexpected action: %v, expected get-deployment %s", actions[0], name)
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestValidateDeployment(t *testing.T) {
|
||||
zero, ten, twenty := 0, 10, 20
|
||||
|
@ -624,7 +624,8 @@ func (m *Master) getExtensionResources(c *Config) map[string]rest.Storage {
|
||||
deploymentStorage := deploymentetcd.NewStorage(dbClient("deployments"), storageDecorator)
|
||||
storage["deployments"] = deploymentStorage.Deployment
|
||||
storage["deployments/status"] = deploymentStorage.Status
|
||||
storage["deployments/scale"] = deploymentStorage.Scale
|
||||
// TODO(madhusudancs): Install scale when Scale group issues are fixed (see issue #18528).
|
||||
// storage["deployments/scale"] = deploymentStorage.Scale
|
||||
storage["deployments/rollback"] = deploymentStorage.Rollback
|
||||
}
|
||||
if isEnabled("jobs") {
|
||||
|
@ -183,43 +183,53 @@ type ScaleREST struct {
|
||||
registry *deployment.Registry
|
||||
}
|
||||
|
||||
// ScaleREST implements Patcher
|
||||
var _ = rest.Patcher(&ScaleREST{})
|
||||
// TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
|
||||
|
||||
// New creates a new Scale object
|
||||
func (r *ScaleREST) New() runtime.Object {
|
||||
return &extensions.Scale{}
|
||||
}
|
||||
// // ScaleREST implements Patcher
|
||||
// var _ = rest.Patcher(&ScaleREST{})
|
||||
|
||||
func (r *ScaleREST) Get(ctx api.Context, name string) (runtime.Object, error) {
|
||||
deployment, err := (*r.registry).GetDeployment(ctx, name)
|
||||
if err != nil {
|
||||
return nil, errors.NewNotFound(extensions.Resource("deployments/scale"), name)
|
||||
}
|
||||
return extensions.ScaleFromDeployment(deployment), nil
|
||||
}
|
||||
// // New creates a new Scale object
|
||||
// func (r *ScaleREST) New() runtime.Object {
|
||||
// return &extensions.Scale{}
|
||||
// }
|
||||
|
||||
func (r *ScaleREST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) {
|
||||
if obj == nil {
|
||||
return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale"))
|
||||
}
|
||||
scale, ok := obj.(*extensions.Scale)
|
||||
if !ok {
|
||||
return nil, false, errors.NewBadRequest(fmt.Sprintf("wrong object passed to Scale update: %v", obj))
|
||||
}
|
||||
// func (r *ScaleREST) Get(ctx api.Context, name string) (runtime.Object, error) {
|
||||
// deployment, err := (*r.registry).GetDeployment(ctx, name)
|
||||
// if err != nil {
|
||||
// return nil, errors.NewNotFound(extensions.Resource("deployments/scale"), name)
|
||||
// }
|
||||
// scale, err := extensions.ScaleFromDeployment(deployment)
|
||||
// if err != nil {
|
||||
// return nil, errors.NewBadRequest(fmt.Sprintf("%v", err))
|
||||
// }
|
||||
// return scale, nil
|
||||
// }
|
||||
|
||||
if errs := extvalidation.ValidateScale(scale); len(errs) > 0 {
|
||||
return nil, false, errors.NewInvalid(extensions.Kind("Scale"), scale.Name, errs)
|
||||
}
|
||||
// func (r *ScaleREST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) {
|
||||
// if obj == nil {
|
||||
// return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale"))
|
||||
// }
|
||||
// scale, ok := obj.(*extensions.Scale)
|
||||
// if !ok {
|
||||
// return nil, false, errors.NewBadRequest(fmt.Sprintf("wrong object passed to Scale update: %v", obj))
|
||||
// }
|
||||
|
||||
deployment, err := (*r.registry).GetDeployment(ctx, scale.Name)
|
||||
if err != nil {
|
||||
return nil, false, errors.NewNotFound(extensions.Resource("deployments/scale"), scale.Name)
|
||||
}
|
||||
deployment.Spec.Replicas = scale.Spec.Replicas
|
||||
deployment, err = (*r.registry).UpdateDeployment(ctx, deployment)
|
||||
if err != nil {
|
||||
return nil, false, errors.NewConflict(extensions.Resource("deployments/scale"), scale.Name, err)
|
||||
}
|
||||
return extensions.ScaleFromDeployment(deployment), false, nil
|
||||
}
|
||||
// if errs := extvalidation.ValidateScale(scale); len(errs) > 0 {
|
||||
// return nil, false, errors.NewInvalid(extensions.Kind("Scale"), scale.Name, errs)
|
||||
// }
|
||||
|
||||
// deployment, err := (*r.registry).GetDeployment(ctx, scale.Name)
|
||||
// if err != nil {
|
||||
// return nil, false, errors.NewNotFound(extensions.Resource("deployments/scale"), scale.Name)
|
||||
// }
|
||||
// deployment.Spec.Replicas = scale.Spec.Replicas
|
||||
// deployment, err = (*r.registry).UpdateDeployment(ctx, deployment)
|
||||
// if err != nil {
|
||||
// return nil, false, errors.NewConflict(extensions.Resource("deployments/scale"), scale.Name, err)
|
||||
// }
|
||||
// newScale, err := extensions.ScaleFromDeployment(deployment)
|
||||
// if err != nil {
|
||||
// return nil, false, errors.NewBadRequest(fmt.Sprintf("%v", err))
|
||||
// }
|
||||
// return newScale, false, nil
|
||||
// }
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
etcderrors "k8s.io/kubernetes/pkg/api/errors/etcd"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
@ -31,7 +32,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/storage/etcd/etcdtest"
|
||||
etcdtesting "k8s.io/kubernetes/pkg/storage/etcd/testing"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
func newStorage(t *testing.T) (*DeploymentStorage, *etcdtesting.EtcdTestServer) {
|
||||
@ -50,7 +50,7 @@ func validNewDeployment() *extensions.Deployment {
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Selector: map[string]string{"a": "b"},
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"a": "b"}},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: map[string]string{"a": "b"},
|
||||
@ -89,7 +89,7 @@ func TestCreate(t *testing.T) {
|
||||
// invalid (invalid selector)
|
||||
&extensions.Deployment{
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Selector: map[string]string{},
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{}},
|
||||
Template: validDeployment.Spec.Template,
|
||||
},
|
||||
},
|
||||
@ -127,7 +127,7 @@ func TestUpdate(t *testing.T) {
|
||||
},
|
||||
func(obj runtime.Object) runtime.Object {
|
||||
object := obj.(*extensions.Deployment)
|
||||
object.Spec.Selector = map[string]string{}
|
||||
object.Spec.Selector = &unversioned.LabelSelector{MatchLabels: map[string]string{}}
|
||||
return object
|
||||
},
|
||||
)
|
||||
@ -179,71 +179,73 @@ func TestWatch(t *testing.T) {
|
||||
)
|
||||
}
|
||||
|
||||
func validNewScale() *extensions.Scale {
|
||||
return &extensions.Scale{
|
||||
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace},
|
||||
Spec: extensions.ScaleSpec{
|
||||
Replicas: validDeployment.Spec.Replicas,
|
||||
},
|
||||
Status: extensions.ScaleStatus{
|
||||
Replicas: validDeployment.Status.Replicas,
|
||||
Selector: validDeployment.Spec.Template.Labels,
|
||||
},
|
||||
}
|
||||
}
|
||||
// TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
|
||||
|
||||
var validScale = *validNewScale()
|
||||
// func validNewScale() *extensions.Scale {
|
||||
// return &extensions.Scale{
|
||||
// ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace},
|
||||
// Spec: extensions.ScaleSpec{
|
||||
// Replicas: validDeployment.Spec.Replicas,
|
||||
// },
|
||||
// Status: extensions.ScaleStatus{
|
||||
// Replicas: validDeployment.Status.Replicas,
|
||||
// Selector: validDeployment.Spec.Template.Labels,
|
||||
// },
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestScaleGet(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
// var validScale = *validNewScale()
|
||||
|
||||
ctx := api.WithNamespace(api.NewContext(), namespace)
|
||||
key := etcdtest.AddPrefix("/deployments/" + namespace + "/" + name)
|
||||
if err := storage.Deployment.Storage.Set(ctx, key, &validDeployment, nil, 0); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
// func TestScaleGet(t *testing.T) {
|
||||
// storage, server := newStorage(t)
|
||||
// defer server.Terminate(t)
|
||||
|
||||
expect := &validScale
|
||||
obj, err := storage.Scale.Get(ctx, name)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
scale := obj.(*extensions.Scale)
|
||||
if e, a := expect, scale; !api.Semantic.DeepDerivative(e, a) {
|
||||
t.Errorf("unexpected scale: %s", util.ObjectDiff(e, a))
|
||||
}
|
||||
}
|
||||
// ctx := api.WithNamespace(api.NewContext(), namespace)
|
||||
// key := etcdtest.AddPrefix("/deployments/" + namespace + "/" + name)
|
||||
// if err := storage.Deployment.Storage.Set(ctx, key, &validDeployment, nil, 0); err != nil {
|
||||
// t.Fatalf("unexpected error: %v", err)
|
||||
// }
|
||||
|
||||
func TestScaleUpdate(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
// expect := &validScale
|
||||
// obj, err := storage.Scale.Get(ctx, name)
|
||||
// if err != nil {
|
||||
// t.Fatalf("unexpected error: %v", err)
|
||||
// }
|
||||
// scale := obj.(*extensions.Scale)
|
||||
// if e, a := expect, scale; !api.Semantic.DeepDerivative(e, a) {
|
||||
// t.Errorf("unexpected scale: %s", util.ObjectDiff(e, a))
|
||||
// }
|
||||
// }
|
||||
|
||||
ctx := api.WithNamespace(api.NewContext(), namespace)
|
||||
key := etcdtest.AddPrefix("/deployments/" + namespace + "/" + name)
|
||||
if err := storage.Deployment.Storage.Set(ctx, key, &validDeployment, nil, 0); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
replicas := 12
|
||||
update := extensions.Scale{
|
||||
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace},
|
||||
Spec: extensions.ScaleSpec{
|
||||
Replicas: replicas,
|
||||
},
|
||||
}
|
||||
// func TestScaleUpdate(t *testing.T) {
|
||||
// storage, server := newStorage(t)
|
||||
// defer server.Terminate(t)
|
||||
|
||||
if _, _, err := storage.Scale.Update(ctx, &update); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
obj, err := storage.Deployment.Get(ctx, name)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
deployment := obj.(*extensions.Deployment)
|
||||
if deployment.Spec.Replicas != replicas {
|
||||
t.Errorf("wrong replicas count expected: %d got: %d", replicas, deployment.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
// ctx := api.WithNamespace(api.NewContext(), namespace)
|
||||
// key := etcdtest.AddPrefix("/deployments/" + namespace + "/" + name)
|
||||
// if err := storage.Deployment.Storage.Set(ctx, key, &validDeployment, nil, 0); err != nil {
|
||||
// t.Fatalf("unexpected error: %v", err)
|
||||
// }
|
||||
// replicas := 12
|
||||
// update := extensions.Scale{
|
||||
// ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace},
|
||||
// Spec: extensions.ScaleSpec{
|
||||
// Replicas: replicas,
|
||||
// },
|
||||
// }
|
||||
|
||||
// if _, _, err := storage.Scale.Update(ctx, &update); err != nil {
|
||||
// t.Fatalf("unexpected error: %v", err)
|
||||
// }
|
||||
// obj, err := storage.Deployment.Get(ctx, name)
|
||||
// if err != nil {
|
||||
// t.Fatalf("unexpected error: %v", err)
|
||||
// }
|
||||
// deployment := obj.(*extensions.Deployment)
|
||||
// if deployment.Spec.Replicas != replicas {
|
||||
// t.Errorf("wrong replicas count expected: %d got: %d", replicas, deployment.Spec.Replicas)
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestStatusUpdate(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
@ -30,7 +31,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// The revision annotation of a deployment's replication controllers which records its rollout sequence
|
||||
// The revision annotation of a deployment's replica sets which records its rollout sequence
|
||||
RevisionAnnotation = "deployment.kubernetes.io/revision"
|
||||
|
||||
// Here are the possible rollback event reasons
|
||||
@ -39,112 +40,124 @@ const (
|
||||
RollbackDone = "DeploymentRollback"
|
||||
)
|
||||
|
||||
// GetOldRCs returns the old RCs targeted by the given Deployment; get PodList and RCList from client interface.
|
||||
// Note that the first set of old RCs doesn't include the ones with no pods, and the second set of old RCs include all old RCs.
|
||||
func GetOldRCs(deployment extensions.Deployment, c clientset.Interface) ([]*api.ReplicationController, []*api.ReplicationController, error) {
|
||||
return GetOldRCsFromLists(deployment, c,
|
||||
// GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface.
|
||||
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
|
||||
func GetOldReplicaSets(deployment extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) {
|
||||
return GetOldReplicaSetsFromLists(deployment, c,
|
||||
func(namespace string, options api.ListOptions) (*api.PodList, error) {
|
||||
return c.Core().Pods(namespace).List(options)
|
||||
},
|
||||
func(namespace string, options api.ListOptions) ([]api.ReplicationController, error) {
|
||||
rcList, err := c.Core().ReplicationControllers(namespace).List(options)
|
||||
return rcList.Items, err
|
||||
func(namespace string, options api.ListOptions) ([]extensions.ReplicaSet, error) {
|
||||
rsList, err := c.Extensions().ReplicaSets(namespace).List(options)
|
||||
return rsList.Items, err
|
||||
})
|
||||
}
|
||||
|
||||
// GetOldRCsFromLists returns two sets of old RCs targeted by the given Deployment; get PodList and RCList with input functions.
|
||||
// Note that the first set of old RCs doesn't include the ones with no pods, and the second set of old RCs include all old RCs.
|
||||
func GetOldRCsFromLists(deployment extensions.Deployment, c clientset.Interface, getPodList func(string, api.ListOptions) (*api.PodList, error), getRcList func(string, api.ListOptions) ([]api.ReplicationController, error)) ([]*api.ReplicationController, []*api.ReplicationController, error) {
|
||||
// GetOldReplicaSetsFromLists returns two sets of old replica sets targeted by the given Deployment; get PodList and ReplicaSetList with input functions.
|
||||
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
|
||||
func GetOldReplicaSetsFromLists(deployment extensions.Deployment, c clientset.Interface, getPodList func(string, api.ListOptions) (*api.PodList, error), getRSList func(string, api.ListOptions) ([]extensions.ReplicaSet, error)) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) {
|
||||
namespace := deployment.ObjectMeta.Namespace
|
||||
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to convert LabelSelector to Selector: %v", err)
|
||||
}
|
||||
|
||||
// 1. Find all pods whose labels match deployment.Spec.Selector
|
||||
selector := labels.SelectorFromSet(deployment.Spec.Selector)
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
podList, err := getPodList(namespace, options)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error listing pods: %v", err)
|
||||
}
|
||||
// 2. Find the corresponding RCs for pods in podList.
|
||||
// TODO: Right now we list all RCs and then filter. We should add an API for this.
|
||||
oldRCs := map[string]api.ReplicationController{}
|
||||
allOldRCs := map[string]api.ReplicationController{}
|
||||
rcList, err := getRcList(namespace, options)
|
||||
// 2. Find the corresponding replica sets for pods in podList.
|
||||
// TODO: Right now we list all replica sets and then filter. We should add an API for this.
|
||||
oldRSs := map[string]extensions.ReplicaSet{}
|
||||
allOldRSs := map[string]extensions.ReplicaSet{}
|
||||
rsList, err := getRSList(namespace, options)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error listing replication controllers: %v", err)
|
||||
return nil, nil, fmt.Errorf("error listing replica sets: %v", err)
|
||||
}
|
||||
newRCTemplate := GetNewRCTemplate(deployment)
|
||||
newRSTemplate := GetNewReplicaSetTemplate(deployment)
|
||||
for _, pod := range podList.Items {
|
||||
podLabelsSelector := labels.Set(pod.ObjectMeta.Labels)
|
||||
for _, rc := range rcList {
|
||||
rcLabelsSelector := labels.SelectorFromSet(rc.Spec.Selector)
|
||||
// Filter out RC that has the same pod template spec as the deployment - that is the new RC.
|
||||
if api.Semantic.DeepEqual(rc.Spec.Template, &newRCTemplate) {
|
||||
for _, rs := range rsList {
|
||||
rsLabelsSelector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to convert LabelSelector to Selector: %v", err)
|
||||
}
|
||||
// Filter out replica set that has the same pod template spec as the deployment - that is the new replica set.
|
||||
if api.Semantic.DeepEqual(rs.Spec.Template, &newRSTemplate) {
|
||||
continue
|
||||
}
|
||||
allOldRCs[rc.ObjectMeta.Name] = rc
|
||||
if rcLabelsSelector.Matches(podLabelsSelector) {
|
||||
oldRCs[rc.ObjectMeta.Name] = rc
|
||||
allOldRSs[rs.ObjectMeta.Name] = rs
|
||||
if rsLabelsSelector.Matches(podLabelsSelector) {
|
||||
oldRSs[rs.ObjectMeta.Name] = rs
|
||||
}
|
||||
}
|
||||
}
|
||||
requiredRCs := []*api.ReplicationController{}
|
||||
for key := range oldRCs {
|
||||
value := oldRCs[key]
|
||||
requiredRCs = append(requiredRCs, &value)
|
||||
requiredRSs := []*extensions.ReplicaSet{}
|
||||
for key := range oldRSs {
|
||||
value := oldRSs[key]
|
||||
requiredRSs = append(requiredRSs, &value)
|
||||
}
|
||||
allRCs := []*api.ReplicationController{}
|
||||
for key := range allOldRCs {
|
||||
value := allOldRCs[key]
|
||||
allRCs = append(allRCs, &value)
|
||||
allRSs := []*extensions.ReplicaSet{}
|
||||
for key := range allOldRSs {
|
||||
value := allOldRSs[key]
|
||||
allRSs = append(allRSs, &value)
|
||||
}
|
||||
return requiredRCs, allRCs, nil
|
||||
return requiredRSs, allRSs, nil
|
||||
}
|
||||
|
||||
// GetNewRC returns an RC that matches the intent of the given deployment; get RCList from client interface.
|
||||
// Returns nil if the new RC doesnt exist yet.
|
||||
func GetNewRC(deployment extensions.Deployment, c clientset.Interface) (*api.ReplicationController, error) {
|
||||
return GetNewRCFromList(deployment, c,
|
||||
func(namespace string, options api.ListOptions) ([]api.ReplicationController, error) {
|
||||
rcList, err := c.Core().ReplicationControllers(namespace).List(options)
|
||||
return rcList.Items, err
|
||||
// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface.
|
||||
// Returns nil if the new replica set doesnt exist yet.
|
||||
func GetNewReplicaSet(deployment extensions.Deployment, c clientset.Interface) (*extensions.ReplicaSet, error) {
|
||||
return GetNewReplicaSetFromList(deployment, c,
|
||||
func(namespace string, options api.ListOptions) ([]extensions.ReplicaSet, error) {
|
||||
rsList, err := c.Extensions().ReplicaSets(namespace).List(options)
|
||||
return rsList.Items, err
|
||||
})
|
||||
}
|
||||
|
||||
// GetNewRCFromList returns an RC that matches the intent of the given deployment; get RCList with the input function.
|
||||
// Returns nil if the new RC doesnt exist yet.
|
||||
func GetNewRCFromList(deployment extensions.Deployment, c clientset.Interface, getRcList func(string, api.ListOptions) ([]api.ReplicationController, error)) (*api.ReplicationController, error) {
|
||||
// GetNewReplicaSetFromList returns a replica set that matches the intent of the given deployment; get ReplicaSetList with the input function.
|
||||
// Returns nil if the new replica set doesnt exist yet.
|
||||
func GetNewReplicaSetFromList(deployment extensions.Deployment, c clientset.Interface, getRSList func(string, api.ListOptions) ([]extensions.ReplicaSet, error)) (*extensions.ReplicaSet, error) {
|
||||
namespace := deployment.ObjectMeta.Namespace
|
||||
rcList, err := getRcList(namespace, api.ListOptions{LabelSelector: labels.SelectorFromSet(deployment.Spec.Selector)})
|
||||
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing replication controllers: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert LabelSelector to Selector: %v", err)
|
||||
}
|
||||
newRCTemplate := GetNewRCTemplate(deployment)
|
||||
|
||||
for i := range rcList {
|
||||
if api.Semantic.DeepEqual(rcList[i].Spec.Template, &newRCTemplate) {
|
||||
// This is the new RC.
|
||||
return &rcList[i], nil
|
||||
rsList, err := getRSList(namespace, api.ListOptions{LabelSelector: selector})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing ReplicaSets: %v", err)
|
||||
}
|
||||
newRSTemplate := GetNewReplicaSetTemplate(deployment)
|
||||
|
||||
for i := range rsList {
|
||||
if api.Semantic.DeepEqual(rsList[i].Spec.Template, &newRSTemplate) {
|
||||
// This is the new ReplicaSet.
|
||||
return &rsList[i], nil
|
||||
}
|
||||
}
|
||||
// new RC does not exist.
|
||||
// new ReplicaSet does not exist.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Returns the desired PodTemplateSpec for the new RC corresponding to the given RC.
|
||||
func GetNewRCTemplate(deployment extensions.Deployment) api.PodTemplateSpec {
|
||||
// newRC will have the same template as in deployment spec, plus a unique label in some cases.
|
||||
newRCTemplate := api.PodTemplateSpec{
|
||||
// Returns the desired PodTemplateSpec for the new ReplicaSet corresponding to the given ReplicaSet.
|
||||
func GetNewReplicaSetTemplate(deployment extensions.Deployment) api.PodTemplateSpec {
|
||||
// newRS will have the same template as in deployment spec, plus a unique label in some cases.
|
||||
newRSTemplate := api.PodTemplateSpec{
|
||||
ObjectMeta: deployment.Spec.Template.ObjectMeta,
|
||||
Spec: deployment.Spec.Template.Spec,
|
||||
}
|
||||
newRCTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel(
|
||||
newRSTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel(
|
||||
deployment.Spec.Template.ObjectMeta.Labels,
|
||||
extensions.DefaultDeploymentUniqueLabelKey,
|
||||
podutil.GetPodTemplateSpecHash(newRCTemplate))
|
||||
return newRCTemplate
|
||||
podutil.GetPodTemplateSpecHash(newRSTemplate))
|
||||
return newRSTemplate
|
||||
}
|
||||
|
||||
// SetTemplate sets the desired PodTemplateSpec from an RC template to the given deployment.
|
||||
func SetFromRCTemplate(deployment *extensions.Deployment, template api.PodTemplateSpec) *extensions.Deployment {
|
||||
// SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment.
|
||||
func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template api.PodTemplateSpec) *extensions.Deployment {
|
||||
deployment.Spec.Template.ObjectMeta = template.ObjectMeta
|
||||
deployment.Spec.Template.Spec = template.Spec
|
||||
deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel(
|
||||
@ -153,18 +166,18 @@ func SetFromRCTemplate(deployment *extensions.Deployment, template api.PodTempla
|
||||
return deployment
|
||||
}
|
||||
|
||||
// Returns the sum of Replicas of the given replication controllers.
|
||||
func GetReplicaCountForRCs(replicationControllers []*api.ReplicationController) int {
|
||||
// Returns the sum of Replicas of the given replica sets.
|
||||
func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int {
|
||||
totalReplicaCount := 0
|
||||
for _, rc := range replicationControllers {
|
||||
totalReplicaCount += rc.Spec.Replicas
|
||||
for _, rs := range replicaSets {
|
||||
totalReplicaCount += rs.Spec.Replicas
|
||||
}
|
||||
return totalReplicaCount
|
||||
}
|
||||
|
||||
// Returns the number of available pods corresponding to the given RCs.
|
||||
func GetAvailablePodsForRCs(c clientset.Interface, rcs []*api.ReplicationController, minReadySeconds int) (int, error) {
|
||||
allPods, err := getPodsForRCs(c, rcs)
|
||||
// Returns the number of available pods corresponding to the given replica sets.
|
||||
func GetAvailablePodsForReplicaSets(c clientset.Interface, rss []*extensions.ReplicaSet, minReadySeconds int) (int, error) {
|
||||
allPods, err := getPodsForReplicaSets(c, rss)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -195,12 +208,15 @@ func getReadyPodsCount(pods []api.Pod, minReadySeconds int) int {
|
||||
return readyPodCount
|
||||
}
|
||||
|
||||
func getPodsForRCs(c clientset.Interface, replicationControllers []*api.ReplicationController) ([]api.Pod, error) {
|
||||
func getPodsForReplicaSets(c clientset.Interface, replicaSets []*extensions.ReplicaSet) ([]api.Pod, error) {
|
||||
allPods := []api.Pod{}
|
||||
for _, rc := range replicationControllers {
|
||||
selector := labels.SelectorFromSet(rc.Spec.Selector)
|
||||
for _, rs := range replicaSets {
|
||||
selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert LabelSelector to Selector: %v", err)
|
||||
}
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
podList, err := c.Core().Pods(rc.ObjectMeta.Namespace).List(options)
|
||||
podList, err := c.Core().Pods(rs.ObjectMeta.Namespace).List(options)
|
||||
if err != nil {
|
||||
return allPods, fmt.Errorf("error listing pods: %v", err)
|
||||
}
|
||||
@ -209,9 +225,9 @@ func getPodsForRCs(c clientset.Interface, replicationControllers []*api.Replicat
|
||||
return allPods, nil
|
||||
}
|
||||
|
||||
// Revision returns the revision number of the input RC
|
||||
func Revision(rc *api.ReplicationController) (int64, error) {
|
||||
v, ok := rc.Annotations[RevisionAnnotation]
|
||||
// Revision returns the revision number of the input replica set
|
||||
func Revision(rs *extensions.ReplicaSet) (int64, error) {
|
||||
v, ok := rs.Annotations[RevisionAnnotation]
|
||||
if !ok {
|
||||
return 0, nil
|
||||
}
|
||||
|
@ -82,13 +82,13 @@ func TestGetReadyPodsCount(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// generatePodFromRC creates a pod, with the input rc's selector and its template
|
||||
func generatePodFromRC(rc api.ReplicationController) api.Pod {
|
||||
// generatePodFromRS creates a pod, with the input ReplicaSet's selector and its template
|
||||
func generatePodFromRS(rs extensions.ReplicaSet) api.Pod {
|
||||
return api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: rc.Spec.Selector,
|
||||
Labels: rs.Labels,
|
||||
},
|
||||
Spec: rc.Spec.Template.Spec,
|
||||
Spec: rs.Spec.Template.Spec,
|
||||
}
|
||||
}
|
||||
|
||||
@ -110,15 +110,15 @@ func generatePod(labels map[string]string, image string) api.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
func generateRCWithLabel(labels map[string]string, image string) api.ReplicationController {
|
||||
return api.ReplicationController{
|
||||
func generateRSWithLabel(labels map[string]string, image string) extensions.ReplicaSet {
|
||||
return extensions.ReplicaSet{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: api.SimpleNameGenerator.GenerateName("rc"),
|
||||
Name: api.SimpleNameGenerator.GenerateName("replicaset"),
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: 1,
|
||||
Selector: labels,
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: labels},
|
||||
Template: &api.PodTemplateSpec{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
@ -135,17 +135,17 @@ func generateRCWithLabel(labels map[string]string, image string) api.Replication
|
||||
}
|
||||
}
|
||||
|
||||
// generateRC creates a replication controller, with the input deployment's template as its template
|
||||
func generateRC(deployment extensions.Deployment) api.ReplicationController {
|
||||
template := GetNewRCTemplate(deployment)
|
||||
return api.ReplicationController{
|
||||
// generateRS creates a replica set, with the input deployment's template as its template
|
||||
func generateRS(deployment extensions.Deployment) extensions.ReplicaSet {
|
||||
template := GetNewReplicaSetTemplate(deployment)
|
||||
return extensions.ReplicaSet{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: api.SimpleNameGenerator.GenerateName("rc"),
|
||||
Name: api.SimpleNameGenerator.GenerateName("replicaset"),
|
||||
Labels: template.Labels,
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Template: &template,
|
||||
Selector: template.Labels,
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: template.Labels},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -160,7 +160,7 @@ func generateDeployment(image string) extensions.Deployment {
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: 1,
|
||||
Selector: podLabels,
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: podLabels},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
@ -186,32 +186,32 @@ func generateDeployment(image string) extensions.Deployment {
|
||||
|
||||
func TestGetNewRC(t *testing.T) {
|
||||
newDeployment := generateDeployment("nginx")
|
||||
newRC := generateRC(newDeployment)
|
||||
newRC := generateRS(newDeployment)
|
||||
|
||||
tests := []struct {
|
||||
test string
|
||||
rcList api.ReplicationControllerList
|
||||
expected *api.ReplicationController
|
||||
rsList extensions.ReplicaSetList
|
||||
expected *extensions.ReplicaSet
|
||||
}{
|
||||
{
|
||||
"No new RC",
|
||||
api.ReplicationControllerList{
|
||||
Items: []api.ReplicationController{
|
||||
generateRC(generateDeployment("foo")),
|
||||
generateRC(generateDeployment("bar")),
|
||||
"No new ReplicaSet",
|
||||
extensions.ReplicaSetList{
|
||||
Items: []extensions.ReplicaSet{
|
||||
generateRS(generateDeployment("foo")),
|
||||
generateRS(generateDeployment("bar")),
|
||||
},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"Has new RC",
|
||||
api.ReplicationControllerList{
|
||||
Items: []api.ReplicationController{
|
||||
generateRC(generateDeployment("foo")),
|
||||
generateRC(generateDeployment("bar")),
|
||||
generateRC(generateDeployment("abc")),
|
||||
"Has new ReplicaSet",
|
||||
extensions.ReplicaSetList{
|
||||
Items: []extensions.ReplicaSet{
|
||||
generateRS(generateDeployment("foo")),
|
||||
generateRS(generateDeployment("bar")),
|
||||
generateRS(generateDeployment("abc")),
|
||||
newRC,
|
||||
generateRC(generateDeployment("xyz")),
|
||||
generateRS(generateDeployment("xyz")),
|
||||
},
|
||||
},
|
||||
&newRC,
|
||||
@ -223,69 +223,69 @@ func TestGetNewRC(t *testing.T) {
|
||||
c := &simple.Client{
|
||||
Request: simple.Request{
|
||||
Method: "GET",
|
||||
Path: testapi.Default.ResourcePath("replicationControllers", ns, ""),
|
||||
Path: testapi.Default.ResourcePath("replicaSets", ns, ""),
|
||||
},
|
||||
Response: simple.Response{
|
||||
StatusCode: 200,
|
||||
Body: &test.rcList,
|
||||
Body: &test.rsList,
|
||||
},
|
||||
}
|
||||
rc, err := GetNewRC(newDeployment, c.Setup(t).Clientset)
|
||||
rs, err := GetNewReplicaSet(newDeployment, c.Setup(t).Clientset)
|
||||
if err != nil {
|
||||
t.Errorf("In test case %s, got unexpected error %v", test.test, err)
|
||||
}
|
||||
if !api.Semantic.DeepEqual(rc, test.expected) {
|
||||
t.Errorf("In test case %s, expected %+v, got %+v", test.test, test.expected, rc)
|
||||
if !api.Semantic.DeepEqual(rs, test.expected) {
|
||||
t.Errorf("In test case %s, expected %+v, got %+v", test.test, test.expected, rs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetOldRCs(t *testing.T) {
|
||||
newDeployment := generateDeployment("nginx")
|
||||
newRC := generateRC(newDeployment)
|
||||
newPod := generatePodFromRC(newRC)
|
||||
newRS := generateRS(newDeployment)
|
||||
newPod := generatePodFromRS(newRS)
|
||||
|
||||
// create 2 old deployments and related rcs/pods, with the same labels but different template
|
||||
// create 2 old deployments and related replica sets/pods, with the same labels but different template
|
||||
oldDeployment := generateDeployment("nginx")
|
||||
oldDeployment.Spec.Template.Spec.Containers[0].Name = "nginx-old-1"
|
||||
oldRC := generateRC(oldDeployment)
|
||||
oldPod := generatePodFromRC(oldRC)
|
||||
oldRS := generateRS(oldDeployment)
|
||||
oldPod := generatePodFromRS(oldRS)
|
||||
oldDeployment2 := generateDeployment("nginx")
|
||||
oldDeployment2.Spec.Template.Spec.Containers[0].Name = "nginx-old-2"
|
||||
oldRC2 := generateRC(oldDeployment2)
|
||||
oldPod2 := generatePodFromRC(oldRC2)
|
||||
oldRS2 := generateRS(oldDeployment2)
|
||||
oldPod2 := generatePodFromRS(oldRS2)
|
||||
|
||||
// create 1 rc that existed before the deployment, with the same labels as the deployment
|
||||
existedPod := generatePod(newDeployment.Spec.Selector, "foo")
|
||||
existedRC := generateRCWithLabel(newDeployment.Spec.Selector, "foo")
|
||||
// create 1 ReplicaSet that existed before the deployment, with the same labels as the deployment
|
||||
existedPod := generatePod(newDeployment.Spec.Template.Labels, "foo")
|
||||
existedRS := generateRSWithLabel(newDeployment.Spec.Template.Labels, "foo")
|
||||
|
||||
tests := []struct {
|
||||
test string
|
||||
objs []runtime.Object
|
||||
expected []*api.ReplicationController
|
||||
expected []*extensions.ReplicaSet
|
||||
}{
|
||||
{
|
||||
"No old RCs",
|
||||
"No old ReplicaSets",
|
||||
[]runtime.Object{
|
||||
&api.PodList{
|
||||
Items: []api.Pod{
|
||||
generatePod(newDeployment.Spec.Selector, "foo"),
|
||||
generatePod(newDeployment.Spec.Selector, "bar"),
|
||||
generatePod(newDeployment.Spec.Template.Labels, "foo"),
|
||||
generatePod(newDeployment.Spec.Template.Labels, "bar"),
|
||||
newPod,
|
||||
},
|
||||
},
|
||||
&api.ReplicationControllerList{
|
||||
Items: []api.ReplicationController{
|
||||
generateRC(generateDeployment("foo")),
|
||||
newRC,
|
||||
generateRC(generateDeployment("bar")),
|
||||
&extensions.ReplicaSetList{
|
||||
Items: []extensions.ReplicaSet{
|
||||
generateRS(generateDeployment("foo")),
|
||||
newRS,
|
||||
generateRS(generateDeployment("bar")),
|
||||
},
|
||||
},
|
||||
},
|
||||
[]*api.ReplicationController{},
|
||||
[]*extensions.ReplicaSet{},
|
||||
},
|
||||
{
|
||||
"Has old RC",
|
||||
"Has old ReplicaSet",
|
||||
[]runtime.Object{
|
||||
&api.PodList{
|
||||
Items: []api.Pod{
|
||||
@ -294,51 +294,51 @@ func TestGetOldRCs(t *testing.T) {
|
||||
generatePod(map[string]string{"name": "bar"}, "bar"),
|
||||
generatePod(map[string]string{"name": "xyz"}, "xyz"),
|
||||
existedPod,
|
||||
generatePod(newDeployment.Spec.Selector, "abc"),
|
||||
generatePod(newDeployment.Spec.Template.Labels, "abc"),
|
||||
},
|
||||
},
|
||||
&api.ReplicationControllerList{
|
||||
Items: []api.ReplicationController{
|
||||
oldRC2,
|
||||
oldRC,
|
||||
existedRC,
|
||||
newRC,
|
||||
generateRCWithLabel(map[string]string{"name": "xyz"}, "xyz"),
|
||||
generateRCWithLabel(map[string]string{"name": "bar"}, "bar"),
|
||||
&extensions.ReplicaSetList{
|
||||
Items: []extensions.ReplicaSet{
|
||||
oldRS2,
|
||||
oldRS,
|
||||
existedRS,
|
||||
newRS,
|
||||
generateRSWithLabel(map[string]string{"name": "xyz"}, "xyz"),
|
||||
generateRSWithLabel(map[string]string{"name": "bar"}, "bar"),
|
||||
},
|
||||
},
|
||||
},
|
||||
[]*api.ReplicationController{&oldRC, &oldRC2, &existedRC},
|
||||
[]*extensions.ReplicaSet{&oldRS, &oldRS2, &existedRS},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
rcs, _, err := GetOldRCs(newDeployment, fake.NewSimpleClientset(test.objs...))
|
||||
rss, _, err := GetOldReplicaSets(newDeployment, fake.NewSimpleClientset(test.objs...))
|
||||
if err != nil {
|
||||
t.Errorf("In test case %s, got unexpected error %v", test.test, err)
|
||||
}
|
||||
if !equal(rcs, test.expected) {
|
||||
t.Errorf("In test case %q, expected %v, got %v", test.test, test.expected, rcs)
|
||||
if !equal(rss, test.expected) {
|
||||
t.Errorf("In test case %q, expected %v, got %v", test.test, test.expected, rss)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// equal compares the equality of two rc slices regardless of their ordering
|
||||
func equal(rcs1, rcs2 []*api.ReplicationController) bool {
|
||||
if reflect.DeepEqual(rcs1, rcs2) {
|
||||
// equal compares the equality of two ReplicaSet slices regardless of their ordering
|
||||
func equal(rss1, rss2 []*extensions.ReplicaSet) bool {
|
||||
if reflect.DeepEqual(rss1, rss2) {
|
||||
return true
|
||||
}
|
||||
if rcs1 == nil || rcs2 == nil || len(rcs1) != len(rcs2) {
|
||||
if rss1 == nil || rss2 == nil || len(rss1) != len(rss2) {
|
||||
return false
|
||||
}
|
||||
count := 0
|
||||
for _, rc1 := range rcs1 {
|
||||
for _, rc2 := range rcs2 {
|
||||
if reflect.DeepEqual(rc1, rc2) {
|
||||
for _, rs1 := range rss1 {
|
||||
for _, rs2 := range rss2 {
|
||||
if reflect.DeepEqual(rs1, rs2) {
|
||||
count++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return count == len(rcs1)
|
||||
return count == len(rss1)
|
||||
}
|
||||
|
@ -18,6 +18,8 @@ package labels
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
)
|
||||
|
||||
// Clones the given map and returns a new map with the given key and value added.
|
||||
@ -51,3 +53,43 @@ func CloneAndRemoveLabel(labels map[string]string, labelKey string) map[string]s
|
||||
delete(newLabels, labelKey)
|
||||
return newLabels
|
||||
}
|
||||
|
||||
// Clones the given selector and returns a new selector with the given key and value added.
|
||||
// Returns the given selector, if labelKey is empty.
|
||||
func CloneSelectorAndAddLabel(selector *unversioned.LabelSelector, labelKey string, labelValue uint32) *unversioned.LabelSelector {
|
||||
if labelKey == "" {
|
||||
// Dont need to add a label.
|
||||
return selector
|
||||
}
|
||||
|
||||
// Clone.
|
||||
newSelector := new(unversioned.LabelSelector)
|
||||
|
||||
// TODO(madhusudancs): Check if you can use deepCopy_extensions_LabelSelector here.
|
||||
newSelector.MatchLabels = make(map[string]string)
|
||||
if selector.MatchLabels != nil {
|
||||
for key, val := range selector.MatchLabels {
|
||||
newSelector.MatchLabels[key] = val
|
||||
}
|
||||
}
|
||||
newSelector.MatchLabels[labelKey] = fmt.Sprintf("%d", labelValue)
|
||||
|
||||
if selector.MatchExpressions != nil {
|
||||
newMExps := make([]unversioned.LabelSelectorRequirement, len(selector.MatchExpressions))
|
||||
for i, me := range selector.MatchExpressions {
|
||||
newMExps[i].Key = me.Key
|
||||
newMExps[i].Operator = me.Operator
|
||||
if me.Values != nil {
|
||||
newMExps[i].Values = make([]string, len(me.Values))
|
||||
copy(newMExps[i].Values, me.Values)
|
||||
} else {
|
||||
newMExps[i].Values = nil
|
||||
}
|
||||
}
|
||||
newSelector.MatchExpressions = newMExps
|
||||
} else {
|
||||
newSelector.MatchExpressions = nil
|
||||
}
|
||||
|
||||
return newSelector
|
||||
}
|
||||
|
@ -21,10 +21,10 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
|
||||
@ -47,7 +47,7 @@ var _ = Describe("Deployment [Feature:Deployment]", func() {
|
||||
It("RecreateDeployment should delete old pods and create new ones", func() {
|
||||
testRecreateDeployment(f)
|
||||
})
|
||||
It("deployment should delete old rcs", func() {
|
||||
It("deployment should delete old replica sets", func() {
|
||||
testDeploymentCleanUpPolicy(f)
|
||||
})
|
||||
It("deployment should support rollover", func() {
|
||||
@ -59,22 +59,22 @@ var _ = Describe("Deployment [Feature:Deployment]", func() {
|
||||
It("deployment should support rollback", func() {
|
||||
testRollbackDeployment(f)
|
||||
})
|
||||
It("deployment should support rollback when there's RC with no revision", func() {
|
||||
testRollbackDeploymentRCNoRevision(f)
|
||||
It("deployment should support rollback when there's replica set with no revision", func() {
|
||||
testRollbackDeploymentRSNoRevision(f)
|
||||
})
|
||||
})
|
||||
|
||||
func newRC(rcName string, replicas int, rcPodLabels map[string]string, imageName string, image string) *api.ReplicationController {
|
||||
return &api.ReplicationController{
|
||||
func newRS(rsName string, replicas int, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet {
|
||||
return &extensions.ReplicaSet{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: rcName,
|
||||
Name: rsName,
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: replicas,
|
||||
Selector: rcPodLabels,
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: rsPodLabels},
|
||||
Template: &api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: rcPodLabels,
|
||||
Labels: rsPodLabels,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
@ -96,7 +96,7 @@ func newDeployment(deploymentName string, replicas int, podLabels map[string]str
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: replicas,
|
||||
Selector: podLabels,
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: podLabels},
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
Type: strategyType,
|
||||
},
|
||||
@ -126,27 +126,27 @@ func newDeploymentRollback(name string, annotations map[string]string, revision
|
||||
}
|
||||
}
|
||||
|
||||
// checkDeploymentRevision checks if the input deployment's and its new RC's revision and images are as expected.
|
||||
func checkDeploymentRevision(c *clientset.Clientset, ns, deploymentName, revision, imageName, image string) (*extensions.Deployment, *api.ReplicationController) {
|
||||
// checkDeploymentRevision checks if the input deployment's and its new replica set's revision and images are as expected.
|
||||
func checkDeploymentRevision(c *clientset.Clientset, ns, deploymentName, revision, imageName, image string) (*extensions.Deployment, *extensions.ReplicaSet) {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Check revision of the new RC of this deployment
|
||||
newRC, err := deploymentutil.GetNewRC(*deployment, c)
|
||||
// Check revision of the new replica set of this deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newRC.Annotations).NotTo(Equal(nil))
|
||||
Expect(newRC.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(revision))
|
||||
Expect(newRS.Annotations).NotTo(Equal(nil))
|
||||
Expect(newRS.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(revision))
|
||||
// Check revision of This deployment
|
||||
Expect(deployment.Annotations).NotTo(Equal(nil))
|
||||
Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(revision))
|
||||
if len(imageName) > 0 {
|
||||
// Check the image the new RC creates
|
||||
Expect(newRC.Spec.Template.Spec.Containers[0].Name).Should(Equal(imageName))
|
||||
Expect(newRC.Spec.Template.Spec.Containers[0].Image).Should(Equal(image))
|
||||
// Check the image the new replica set creates
|
||||
Expect(newRS.Spec.Template.Spec.Containers[0].Name).Should(Equal(imageName))
|
||||
Expect(newRS.Spec.Template.Spec.Containers[0].Image).Should(Equal(image))
|
||||
// Check the image the deployment creates
|
||||
Expect(deployment.Spec.Template.Spec.Containers[0].Name).Should(Equal(imageName))
|
||||
Expect(deployment.Spec.Template.Spec.Containers[0].Image).Should(Equal(image))
|
||||
}
|
||||
return deployment, newRC
|
||||
return deployment, newRS
|
||||
}
|
||||
|
||||
func testNewDeployment(f *Framework) {
|
||||
@ -161,7 +161,7 @@ func testNewDeployment(f *Framework) {
|
||||
replicas := 1
|
||||
Logf("Creating simple deployment %s", deploymentName)
|
||||
d := newDeployment(deploymentName, replicas, podLabels, "nginx", "nginx", extensions.RollingUpdateDeploymentStrategyType, nil)
|
||||
d.Annotations = map[string]string{"test": "should-copy-to-RC", kubectl.LastAppliedConfigAnnotation: "should-not-copy-to-RC"}
|
||||
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", kubectl.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
|
||||
_, err := c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
@ -169,10 +169,10 @@ func testNewDeployment(f *Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||
// TODO: remove this once we can delete rcs with deployment
|
||||
newRC, err := deploymentutil.GetNewRC(*deployment, c)
|
||||
// TODO: remove this once we can delete replica sets with deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
// Check that deployment is created fine.
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
@ -191,12 +191,12 @@ func testNewDeployment(f *Framework) {
|
||||
Expect(deployment.Status.UpdatedReplicas).Should(Equal(replicas))
|
||||
|
||||
// Check if it's updated to revision 1 correctly
|
||||
_, newRC := checkDeploymentRevision(c, ns, deploymentName, "1", "nginx", "nginx")
|
||||
_, newRS := checkDeploymentRevision(c, ns, deploymentName, "1", "nginx", "nginx")
|
||||
// Check other annotations
|
||||
Expect(newRC.Annotations["test"]).Should(Equal("should-copy-to-RC"))
|
||||
Expect(newRC.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal(""))
|
||||
Expect(deployment.Annotations["test"]).Should(Equal("should-copy-to-RC"))
|
||||
Expect(deployment.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal("should-not-copy-to-RC"))
|
||||
Expect(newRS.Annotations["test"]).Should(Equal("should-copy-to-replica-set"))
|
||||
Expect(newRS.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal(""))
|
||||
Expect(deployment.Annotations["test"]).Should(Equal("should-copy-to-replica-set"))
|
||||
Expect(deployment.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal("should-not-copy-to-replica-set"))
|
||||
}
|
||||
|
||||
func testRollingUpdateDeployment(f *Framework) {
|
||||
@ -207,18 +207,18 @@ func testRollingUpdateDeployment(f *Framework) {
|
||||
c := clientset.FromUnversionedClient(unversionedClient)
|
||||
// Create nginx pods.
|
||||
deploymentPodLabels := map[string]string{"name": "sample-pod"}
|
||||
rcPodLabels := map[string]string{
|
||||
rsPodLabels := map[string]string{
|
||||
"name": "sample-pod",
|
||||
"pod": "nginx",
|
||||
}
|
||||
|
||||
rcName := "nginx-controller"
|
||||
rsName := "nginx-controller"
|
||||
replicas := 3
|
||||
_, err := c.Core().ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, "nginx", "nginx"))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
Logf("deleting replication controller %s", rcName)
|
||||
Expect(c.Core().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
|
||||
Logf("deleting replica set %s", rsName)
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
// Verify that the required pods have come up.
|
||||
err = verifyPods(unversionedClient, ns, "sample-pod", false, 3)
|
||||
@ -237,10 +237,10 @@ func testRollingUpdateDeployment(f *Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||
// TODO: remove this once we can delete rcs with deployment
|
||||
newRC, err := deploymentutil.GetNewRC(*deployment, c)
|
||||
// TODO: remove this once we can delete replica sets with deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
|
||||
@ -258,24 +258,24 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
|
||||
c := clientset.FromUnversionedClient(unversionedClient)
|
||||
// Create nginx pods.
|
||||
deploymentPodLabels := map[string]string{"name": "sample-pod-2"}
|
||||
rcPodLabels := map[string]string{
|
||||
rsPodLabels := map[string]string{
|
||||
"name": "sample-pod-2",
|
||||
"pod": "nginx",
|
||||
}
|
||||
rcName := "nginx-controller"
|
||||
rsName := "nginx-controller"
|
||||
replicas := 1
|
||||
|
||||
rcRevision := "3546343826724305832"
|
||||
rsRevision := "3546343826724305832"
|
||||
annotations := make(map[string]string)
|
||||
annotations[deploymentutil.RevisionAnnotation] = rcRevision
|
||||
rc := newRC(rcName, replicas, rcPodLabels, "nginx", "nginx")
|
||||
rc.Annotations = annotations
|
||||
annotations[deploymentutil.RevisionAnnotation] = rsRevision
|
||||
rs := newRS(rsName, replicas, rsPodLabels, "nginx", "nginx")
|
||||
rs.Annotations = annotations
|
||||
|
||||
_, err := c.Core().ReplicationControllers(ns).Create(rc)
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
Logf("deleting replication controller %s", rcName)
|
||||
Expect(c.Core().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
|
||||
Logf("deleting replica set %s", rsName)
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
// Verify that the required pods have come up.
|
||||
err = verifyPods(unversionedClient, ns, "sample-pod-2", false, 1)
|
||||
@ -294,10 +294,10 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||
// TODO: remove this once we can delete rcs with deployment
|
||||
newRC, err := deploymentutil.GetNewRC(*deployment, c)
|
||||
// TODO: remove this once we can delete replica sets with deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
|
||||
@ -311,13 +311,14 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
|
||||
Logf("error in listing events: %s", err)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
// There should be 2 events, one to scale up the new RC and then to scale down the old RC.
|
||||
// There should be 2 events, one to scale up the new ReplicaSet and then to scale down
|
||||
// the old ReplicaSet.
|
||||
Expect(len(events.Items)).Should(Equal(2))
|
||||
newRC, err := deploymentutil.GetNewRC(*deployment, c)
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newRC).NotTo(Equal(nil))
|
||||
Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled up rc %s to 1", newRC.Name)))
|
||||
Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled down rc %s to 0", rcName)))
|
||||
Expect(newRS).NotTo(Equal(nil))
|
||||
Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled up replica set %s to 1", newRS.Name)))
|
||||
Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled down replica set %s to 0", rsName)))
|
||||
|
||||
// Check if it's updated to revision 3546343826724305833 correctly
|
||||
checkDeploymentRevision(c, ns, deploymentName, "3546343826724305833", "redis", "redis")
|
||||
@ -331,18 +332,18 @@ func testRecreateDeployment(f *Framework) {
|
||||
c := clientset.FromUnversionedClient(unversionedClient)
|
||||
// Create nginx pods.
|
||||
deploymentPodLabels := map[string]string{"name": "sample-pod-3"}
|
||||
rcPodLabels := map[string]string{
|
||||
rsPodLabels := map[string]string{
|
||||
"name": "sample-pod-3",
|
||||
"pod": "nginx",
|
||||
}
|
||||
|
||||
rcName := "nginx-controller"
|
||||
rsName := "nginx-controller"
|
||||
replicas := 3
|
||||
_, err := c.Core().ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, "nginx", "nginx"))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
Logf("deleting replication controller %s", rcName)
|
||||
Expect(c.Core().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
|
||||
Logf("deleting replica set %s", rsName)
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
// Verify that the required pods have come up.
|
||||
err = verifyPods(unversionedClient, ns, "sample-pod-3", false, 3)
|
||||
@ -361,10 +362,10 @@ func testRecreateDeployment(f *Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||
// TODO: remove this once we can delete rcs with deployment
|
||||
newRC, err := deploymentutil.GetNewRC(*deployment, c)
|
||||
// TODO: remove this once we can delete replica sets with deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, 0, replicas, 0)
|
||||
@ -383,13 +384,13 @@ func testRecreateDeployment(f *Framework) {
|
||||
Logf("error in listing events: %s", err)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
// There should be 2 events, one to scale up the new RC and then to scale down the old RC.
|
||||
// There should be 2 events, one to scale up the new ReplicaSet and then to scale down the old ReplicaSet.
|
||||
Expect(len(events.Items)).Should(Equal(2))
|
||||
newRC, err := deploymentutil.GetNewRC(*deployment, c)
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newRC).NotTo(Equal(nil))
|
||||
Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled down rc %s to 0", rcName)))
|
||||
Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled up rc %s to 3", newRC.Name)))
|
||||
Expect(newRS).NotTo(Equal(nil))
|
||||
Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled down replica set %s to 0", rsName)))
|
||||
Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled up replica set %s to 3", newRS.Name)))
|
||||
|
||||
// Check if it's updated to revision 1 correctly
|
||||
checkDeploymentRevision(c, ns, deploymentName, "1", "redis", "redis")
|
||||
@ -402,15 +403,15 @@ func testDeploymentCleanUpPolicy(f *Framework) {
|
||||
c := clientset.FromUnversionedClient(unversionedClient)
|
||||
// Create nginx pods.
|
||||
deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
|
||||
rcPodLabels := map[string]string{
|
||||
rsPodLabels := map[string]string{
|
||||
"name": "cleanup-pod",
|
||||
"pod": "nginx",
|
||||
}
|
||||
rcName := "nginx-controller"
|
||||
rsName := "nginx-controller"
|
||||
replicas := 1
|
||||
revisionHistoryLimit := new(int)
|
||||
*revisionHistoryLimit = 0
|
||||
_, err := c.Core().ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, "nginx", "nginx"))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
@ -430,13 +431,13 @@ func testDeploymentCleanUpPolicy(f *Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||
// TODO: remove this once we can delete rcs with deployment
|
||||
newRC, err := deploymentutil.GetNewRC(*deployment, c)
|
||||
// TODO: remove this once we can delete replica sets with deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
err = waitForDeploymentOldRCsNum(c, ns, deploymentName, *revisionHistoryLimit)
|
||||
err = waitForDeploymentOldRSsNum(c, ns, deploymentName, *revisionHistoryLimit)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
@ -450,21 +451,21 @@ func testRolloverDeployment(f *Framework) {
|
||||
c := clientset.FromUnversionedClient(unversionedClient)
|
||||
podName := "rollover-pod"
|
||||
deploymentPodLabels := map[string]string{"name": podName}
|
||||
rcPodLabels := map[string]string{
|
||||
rsPodLabels := map[string]string{
|
||||
"name": podName,
|
||||
"pod": "nginx",
|
||||
}
|
||||
|
||||
rcName := "nginx-controller"
|
||||
rcReplicas := 4
|
||||
_, err := c.Core().ReplicationControllers(ns).Create(newRC(rcName, rcReplicas, rcPodLabels, "nginx", "nginx"))
|
||||
rsName := "nginx-controller"
|
||||
rsReplicas := 4
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, "nginx", "nginx"))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
Logf("deleting replication controller %s", rcName)
|
||||
Expect(c.Core().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
|
||||
Logf("deleting replica set %s", rsName)
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
// Verify that the required pods have come up.
|
||||
err = verifyPods(unversionedClient, ns, podName, false, rcReplicas)
|
||||
err = verifyPods(unversionedClient, ns, podName, false, rsReplicas)
|
||||
if err != nil {
|
||||
Logf("error in waiting for pods to come up: %s", err)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -490,22 +491,22 @@ func testRolloverDeployment(f *Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||
// TODO: remove this once we can delete rcs with deployment
|
||||
newRC, err := deploymentutil.GetNewRC(*deployment, c)
|
||||
// TODO: remove this once we can delete replica sets with deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
// Verify that the pods were scaled up and down as expected. We use events to verify that.
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Make sure the deployment starts to scale up and down RCs
|
||||
// Make sure the deployment starts to scale up and down replica sets
|
||||
waitForPartialEvents(unversionedClient, ns, deployment, 2)
|
||||
// Check if it's updated to revision 1 correctly
|
||||
_, newRC := checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
|
||||
_, newRS := checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
|
||||
|
||||
// Before the deployment finishes, update the deployment to rollover the above 2 rcs and bring up redis pods.
|
||||
// Before the deployment finishes, update the deployment to rollover the above 2 ReplicaSets and bring up redis pods.
|
||||
// If the deployment already finished here, the test would fail. When this happens, increase its minReadySeconds or replicas to prevent it.
|
||||
Expect(newRC.Spec.Replicas).Should(BeNumerically("<", deploymentReplicas))
|
||||
Expect(newRS.Spec.Replicas).Should(BeNumerically("<", deploymentReplicas))
|
||||
updatedDeploymentImage := "redis"
|
||||
newDeployment.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImage
|
||||
newDeployment.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
@ -544,10 +545,10 @@ func testPausedDeployment(f *Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify that there is no latest state realized for the new deployment.
|
||||
rc, err := deploymentutil.GetNewRC(*deployment, c)
|
||||
rs, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if rc != nil {
|
||||
err = fmt.Errorf("unexpected new rc/%s for deployment/%s", rc.Name, deployment.Name)
|
||||
if rs != nil {
|
||||
err = fmt.Errorf("unexpected new rs/%s for deployment/%s", rs.Name, deployment.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
@ -556,28 +557,32 @@ func testPausedDeployment(f *Framework) {
|
||||
deployment, err = c.Extensions().Deployments(ns).Update(deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
opts := api.ListOptions{LabelSelector: labels.Set(deployment.Spec.Selector).AsSelector()}
|
||||
w, err := c.Core().ReplicationControllers(ns).Watch(opts)
|
||||
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
opts := api.ListOptions{LabelSelector: selector}
|
||||
w, err := c.Extensions().ReplicaSets(ns).Watch(opts)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
select {
|
||||
case <-w.ResultChan():
|
||||
// this is it
|
||||
case <-time.After(time.Minute):
|
||||
err = fmt.Errorf("expected a new rc to be created")
|
||||
err = fmt.Errorf("expected a new replica set to be created")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Pause the deployment and delete the replication controller.
|
||||
// Pause the deployment and delete the replica set.
|
||||
// The paused deployment shouldn't recreate a new one.
|
||||
deployment.Spec.Paused = true
|
||||
deployment.ResourceVersion = ""
|
||||
deployment, err = c.Extensions().Deployments(ns).Update(deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
newRC, err := deploymentutil.GetNewRC(*deployment, c)
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
||||
|
||||
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -586,10 +591,10 @@ func testPausedDeployment(f *Framework) {
|
||||
err = fmt.Errorf("deployment %q should be paused", deployment.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
shouldBeNil, err := deploymentutil.GetNewRC(*deployment, c)
|
||||
shouldBeNil, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if shouldBeNil != nil {
|
||||
err = fmt.Errorf("deployment %q shouldn't have a rc but there is %q", deployment.Name, shouldBeNil.Name)
|
||||
err = fmt.Errorf("deployment %q shouldn't have a replica set but there is %q", deployment.Name, shouldBeNil.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
@ -618,14 +623,14 @@ func testRollbackDeployment(f *Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||
// TODO: remove this once we can delete rcs with deployment
|
||||
newRC, err := deploymentutil.GetNewRC(*deployment, c)
|
||||
// TODO: remove this once we can delete replica sets with deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
|
||||
oldRCs, _, err := deploymentutil.GetOldRCs(*deployment, c)
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
||||
oldRSs, _, err := deploymentutil.GetOldReplicaSets(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, oldRC := range oldRCs {
|
||||
Expect(c.Core().ReplicationControllers(ns).Delete(oldRC.Name, nil)).NotTo(HaveOccurred())
|
||||
for _, oldRS := range oldRSs {
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(oldRS.Name, nil)).NotTo(HaveOccurred())
|
||||
}
|
||||
}()
|
||||
// Check that deployment is created fine.
|
||||
@ -689,36 +694,36 @@ func testRollbackDeployment(f *Framework) {
|
||||
checkDeploymentRevision(c, ns, deploymentName, "4", updatedDeploymentImageName, updatedDeploymentImage)
|
||||
}
|
||||
|
||||
// testRollbackDeploymentRCNoRevision tests that deployment supports rollback even when there's old RC without revision.
|
||||
// An old RC without revision is created, and then a deployment is created (v1). The deployment shouldn't add revision
|
||||
// annotation to the old RC. Then rollback the deployment to last revision, and it should fail and emit related event.
|
||||
// testRollbackDeploymentRSNoRevision tests that deployment supports rollback even when there's old replica set without revision.
|
||||
// An old replica set without revision is created, and then a deployment is created (v1). The deployment shouldn't add revision
|
||||
// annotation to the old replica set. Then rollback the deployment to last revision, and it should fail and emit related event.
|
||||
// Then update the deployment to v2 and rollback it to v1 should succeed and emit related event, now the deployment
|
||||
// becomes v3. Then rollback the deployment to v10 (doesn't exist in history) should fail and emit related event.
|
||||
// Finally, rollback the deployment (v3) to v3 should be no-op and emit related event.
|
||||
func testRollbackDeploymentRCNoRevision(f *Framework) {
|
||||
func testRollbackDeploymentRSNoRevision(f *Framework) {
|
||||
ns := f.Namespace.Name
|
||||
unversionedClient := f.Client
|
||||
c := clientset.FromUnversionedClient(f.Client)
|
||||
podName := "nginx"
|
||||
deploymentPodLabels := map[string]string{"name": podName}
|
||||
rcPodLabels := map[string]string{
|
||||
rsPodLabels := map[string]string{
|
||||
"name": podName,
|
||||
"pod": "nginx",
|
||||
}
|
||||
|
||||
rcName := "nginx-controller"
|
||||
rcReplicas := 0
|
||||
rc := newRC(rcName, rcReplicas, rcPodLabels, "nginx", "nginx")
|
||||
rc.Annotations = make(map[string]string)
|
||||
rc.Annotations["make"] = "difference"
|
||||
_, err := c.Core().ReplicationControllers(ns).Create(rc)
|
||||
rsName := "nginx-controller"
|
||||
rsReplicas := 0
|
||||
rs := newRS(rsName, rsReplicas, rsPodLabels, "nginx", "nginx")
|
||||
rs.Annotations = make(map[string]string)
|
||||
rs.Annotations["make"] = "difference"
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
Logf("deleting replication controller %s", rcName)
|
||||
Expect(c.Core().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
|
||||
Logf("deleting replica set %s", rsName)
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
// Create a deployment to create nginx pods, which have different template than the rc created above.
|
||||
// Create a deployment to create nginx pods, which have different template than the replica set created above.
|
||||
deploymentName, deploymentImageName := "nginx-deployment", "nginx"
|
||||
deploymentReplicas := 1
|
||||
deploymentImage := "nginx"
|
||||
@ -732,14 +737,14 @@ func testRollbackDeploymentRCNoRevision(f *Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||
// TODO: remove this once we can delete rcs with deployment
|
||||
newRC, err := deploymentutil.GetNewRC(*deployment, c)
|
||||
// TODO: remove this once we can delete replica sets with deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
|
||||
oldRCs, _, err := deploymentutil.GetOldRCs(*deployment, c)
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
||||
oldRSs, _, err := deploymentutil.GetOldReplicaSets(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, oldRC := range oldRCs {
|
||||
Expect(c.Core().ReplicationControllers(ns).Delete(oldRC.Name, nil)).NotTo(HaveOccurred())
|
||||
for _, oldRS := range oldRSs {
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(oldRS.Name, nil)).NotTo(HaveOccurred())
|
||||
}
|
||||
}()
|
||||
// Check that deployment is created fine.
|
||||
@ -761,9 +766,9 @@ func testRollbackDeploymentRCNoRevision(f *Framework) {
|
||||
// Check if it's updated to revision 1 correctly
|
||||
checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
|
||||
|
||||
// Check that the rc we created still doesn't contain revision information
|
||||
rc, err = c.Core().ReplicationControllers(ns).Get(rcName)
|
||||
Expect(rc.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(""))
|
||||
// Check that the replica set we created still doesn't contain revision information
|
||||
rs, err = c.Extensions().ReplicaSets(ns).Get(rsName)
|
||||
Expect(rs.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(""))
|
||||
|
||||
// Update the deploymentRollback to rollback to last revision
|
||||
// Since there's only 1 revision in history, it should stay as revision 1
|
||||
|
@ -41,15 +41,16 @@ var _ = Describe("Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slo
|
||||
titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5"
|
||||
titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1"
|
||||
|
||||
Describe("Deployment [Feature:Deployment]", func() {
|
||||
// CPU tests via deployments
|
||||
It(titleUp, func() {
|
||||
scaleUp("deployment", kindDeployment, rc, f)
|
||||
})
|
||||
It(titleDown, func() {
|
||||
scaleDown("deployment", kindDeployment, rc, f)
|
||||
})
|
||||
})
|
||||
// TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
|
||||
// Describe("Deployment [Feature:Deployment]", func() {
|
||||
// // CPU tests via deployments
|
||||
// It(titleUp, func() {
|
||||
// scaleUp("deployment", kindDeployment, rc, f)
|
||||
// })
|
||||
// It(titleDown, func() {
|
||||
// scaleDown("deployment", kindDeployment, rc, f)
|
||||
// })
|
||||
// })
|
||||
|
||||
Describe("ReplicationController", func() {
|
||||
// CPU tests via replication controllers
|
||||
|
@ -457,7 +457,7 @@ var _ = Describe("Kubectl client", func() {
|
||||
withStdinData("abcd1234\n").
|
||||
execOrDie()
|
||||
Expect(runOutput).ToNot(ContainSubstring("stdin closed"))
|
||||
runTestPod, err := util.GetFirstPod(c, ns, map[string]string{"run": "run-test-3"})
|
||||
runTestPod, err := util.GetFirstPod(c, ns, labels.SelectorFromSet(map[string]string{"run": "run-test-3"}))
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
@ -1529,8 +1529,10 @@ func (config *DeploymentConfig) create() error {
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: config.Replicas,
|
||||
Selector: map[string]string{
|
||||
"name": config.Name,
|
||||
Selector: &unversioned.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"name": config.Name,
|
||||
},
|
||||
},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
@ -2064,43 +2066,43 @@ func waitForDeploymentStatus(c clientset.Interface, ns, deploymentName string, d
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
oldRCs, _, err := deploymentutil.GetOldRCs(*deployment, c)
|
||||
oldRSs, _, err := deploymentutil.GetOldReplicaSets(*deployment, c)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
newRC, err := deploymentutil.GetNewRC(*deployment, c)
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newRC == nil {
|
||||
if newRS == nil {
|
||||
// New RC hasnt been created yet.
|
||||
return false, nil
|
||||
}
|
||||
allRCs := append(oldRCs, newRC)
|
||||
totalCreated := deploymentutil.GetReplicaCountForRCs(allRCs)
|
||||
totalAvailable, err := deploymentutil.GetAvailablePodsForRCs(c, allRCs, minReadySeconds)
|
||||
allRSs := append(oldRSs, newRS)
|
||||
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
||||
totalAvailable, err := deploymentutil.GetAvailablePodsForReplicaSets(c, allRSs, minReadySeconds)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if totalCreated > maxCreated {
|
||||
logRCsOfDeployment(deploymentName, oldRCs, newRC)
|
||||
logReplicaSetsOfDeployment(deploymentName, oldRSs, newRS)
|
||||
return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
|
||||
}
|
||||
if totalAvailable < minAvailable {
|
||||
logRCsOfDeployment(deploymentName, oldRCs, newRC)
|
||||
logReplicaSetsOfDeployment(deploymentName, oldRSs, newRS)
|
||||
return false, fmt.Errorf("total pods available: %d, less than the min required: %d", totalAvailable, minAvailable)
|
||||
}
|
||||
|
||||
if deployment.Status.Replicas == desiredUpdatedReplicas &&
|
||||
deployment.Status.UpdatedReplicas == desiredUpdatedReplicas {
|
||||
// Verify RCs.
|
||||
if deploymentutil.GetReplicaCountForRCs(oldRCs) != 0 {
|
||||
logRCsOfDeployment(deploymentName, oldRCs, newRC)
|
||||
return false, fmt.Errorf("old RCs are not fully scaled down")
|
||||
// Verify replica sets.
|
||||
if deploymentutil.GetReplicaCountForReplicaSets(oldRSs) != 0 {
|
||||
logReplicaSetsOfDeployment(deploymentName, oldRSs, newRS)
|
||||
return false, fmt.Errorf("old replica sets are not fully scaled down")
|
||||
}
|
||||
if deploymentutil.GetReplicaCountForRCs([]*api.ReplicationController{newRC}) != desiredUpdatedReplicas {
|
||||
logRCsOfDeployment(deploymentName, oldRCs, newRC)
|
||||
return false, fmt.Errorf("new RC is not fully scaled up")
|
||||
if deploymentutil.GetReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS}) != desiredUpdatedReplicas {
|
||||
logReplicaSetsOfDeployment(deploymentName, oldRSs, newRS)
|
||||
return false, fmt.Errorf("new replica sets is not fully scaled up")
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@ -2109,26 +2111,25 @@ func waitForDeploymentStatus(c clientset.Interface, ns, deploymentName string, d
|
||||
}
|
||||
|
||||
// Waits for the deployment to clean up old rcs.
|
||||
func waitForDeploymentOldRCsNum(c *clientset.Clientset, ns, deploymentName string, desiredRCNum int) error {
|
||||
func waitForDeploymentOldRSsNum(c *clientset.Clientset, ns, deploymentName string, desiredRSNum int) error {
|
||||
return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
|
||||
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
oldRCs, _, err := deploymentutil.GetOldRCs(*deployment, c)
|
||||
oldRSs, _, err := deploymentutil.GetOldReplicaSets(*deployment, c)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(oldRCs) == desiredRCNum, nil
|
||||
return len(oldRSs) == desiredRSNum, nil
|
||||
})
|
||||
}
|
||||
|
||||
func logRCsOfDeployment(deploymentName string, oldRCs []*api.ReplicationController, newRC *api.ReplicationController) {
|
||||
for i := range oldRCs {
|
||||
Logf("Old RCs (%d/%d) of deployment %s: %+v", i+1, len(oldRCs), deploymentName, oldRCs[i])
|
||||
func logReplicaSetsOfDeployment(deploymentName string, oldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {
|
||||
for i := range oldRSs {
|
||||
Logf("Old ReplicaSets (%d/%d) of deployment %s: %+v", i+1, len(oldRSs), deploymentName, oldRSs[i])
|
||||
}
|
||||
Logf("New RC of deployment %s: %+v", deploymentName, newRC)
|
||||
Logf("New ReplicaSet of deployment %s: %+v", deploymentName, newRS)
|
||||
}
|
||||
|
||||
// Waits for the number of events on the given object to reach a desired count.
|
||||
|
Loading…
Reference in New Issue
Block a user