Łukasz Oleś 2017-02-16 11:18:16 +01:00
parent 8124705c81
commit b27308c317
20 changed files with 1091 additions and 89 deletions

View File

@ -518,6 +518,25 @@ func extensionFuncs(t apitesting.TestingCommon) []interface{} {
} }
} }
}, },
func(j *extensions.DaemonSetUpdateStrategy, c fuzz.Continue) {
c.FuzzNoCustom(j) // fuzz self without calling this function again
// Ensure that strategyType is one of valid values.
strategyTypes := []extensions.DaemonSetUpdateStrategyType{extensions.RollingUpdateDaemonSetStrategyType, extensions.OnDeleteDaemonSetStrategyType}
j.Type = strategyTypes[c.Rand.Intn(len(strategyTypes))]
if j.Type != extensions.RollingUpdateDaemonSetStrategyType {
j.RollingUpdate = nil
} else {
rollingUpdate := extensions.RollingUpdateDaemonSet{}
if c.RandBool() {
if c.RandBool() {
rollingUpdate.MaxUnavailable = intstr.FromInt(1 + int(c.Rand.Int31()))
} else {
rollingUpdate.MaxUnavailable = intstr.FromString(fmt.Sprintf("%d%%", 1+c.Rand.Int31()))
}
}
j.RollingUpdate = &rollingUpdate
}
},
} }
} }

View File

@ -370,11 +370,10 @@ type DeploymentList struct {
Items []Deployment Items []Deployment
} }
// TODO(madhusudancs): Uncomment while implementing DaemonSet updates.
/* Commenting out for v1.2. We are planning to bring these types back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting the types out.
type DaemonSetUpdateStrategy struct { type DaemonSetUpdateStrategy struct {
// Type of daemon set update. Only "RollingUpdate" is supported at this time. Default is RollingUpdate. // Type of daemon set update. Can be "RollingUpdate" or "OnDelete".
// +optional // Default is OnDelete.
// +optional
Type DaemonSetUpdateStrategyType Type DaemonSetUpdateStrategyType
// Rolling update config params. Present only if DaemonSetUpdateStrategy = // Rolling update config params. Present only if DaemonSetUpdateStrategy =
@ -382,7 +381,8 @@ type DaemonSetUpdateStrategy struct {
//--- //---
// TODO: Update this to follow our convention for oneOf, whatever we decide it // TODO: Update this to follow our convention for oneOf, whatever we decide it
// to be. Same as DeploymentStrategy.RollingUpdate. // to be. Same as DeploymentStrategy.RollingUpdate.
// +optional // See https://github.com/kubernetes/kubernetes/issues/35345
// +optional
RollingUpdate *RollingUpdateDaemonSet RollingUpdate *RollingUpdateDaemonSet
} }
@ -391,6 +391,9 @@ type DaemonSetUpdateStrategyType string
const ( const (
// Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other.
RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate"
// Replace the old daemons only when it's killed
OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete"
) )
// Spec to control the desired behavior of daemon set rolling update. // Spec to control the desired behavior of daemon set rolling update.
@ -408,17 +411,9 @@ type RollingUpdateDaemonSet struct {
// it then proceeds onto other DaemonSet pods, thus ensuring that at least // it then proceeds onto other DaemonSet pods, thus ensuring that at least
// 70% of original number of DaemonSet pods are available at all times // 70% of original number of DaemonSet pods are available at all times
// during the update. // during the update.
// +optional // +optional
MaxUnavailable intstr.IntOrString MaxUnavailable intstr.IntOrString
// Minimum number of seconds for which a newly created DaemonSet pod should
// be ready without any of its container crashing, for it to be considered
// available. Defaults to 0 (pod will be considered available as soon as it
// is ready).
// +optional
MinReadySeconds int
} }
*/
// DaemonSetSpec is the specification of a daemon set. // DaemonSetSpec is the specification of a daemon set.
type DaemonSetSpec struct { type DaemonSetSpec struct {
@ -436,31 +431,18 @@ type DaemonSetSpec struct {
// More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template
Template api.PodTemplateSpec Template api.PodTemplateSpec
// TODO(madhusudancs): Uncomment while implementing DaemonSet updates. // UpdateStrategy to replace existing DaemonSet pods with new pods.
/* Commenting out for v1.2. We are planning to bring these fields back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting these fields out.
// Update strategy to replace existing DaemonSet pods with new pods.
// +optional // +optional
UpdateStrategy DaemonSetUpdateStrategy UpdateStrategy DaemonSetUpdateStrategy
// Label key that is added to DaemonSet pods to distinguish between old and // MinReadySeconds minimum number of seconds for which a newly created DaemonSet pod should
// new pod templates during DaemonSet update. // be ready without any of its container crashing, for it to be considered
// Users can set this to an empty string to indicate that the system should // available. Defaults to 0 (pod will be considered available as soon as it
// not add any label. If unspecified, system uses // is ready).
// DefaultDaemonSetUniqueLabelKey("daemonset.kubernetes.io/podTemplateHash").
// Value of this key is hash of DaemonSetSpec.PodTemplateSpec.
// No label is added if this is set to empty string.
// +optional // +optional
UniqueLabelKey string MinReadySeconds int32
*/
} }
const (
// DefaultDaemonSetUniqueLabelKey is the default key of the labels that is added
// to daemon set pods to distinguish between old and new pod templates during
// DaemonSet update. See DaemonSetSpec's UniqueLabelKey field for more information.
DefaultDaemonSetUniqueLabelKey string = "daemonset.kubernetes.io/podTemplateHash"
)
// DaemonSetStatus represents the current status of a daemon set. // DaemonSetStatus represents the current status of a daemon set.
type DaemonSetStatus struct { type DaemonSetStatus struct {
// CurrentNumberScheduled is the number of nodes that are running at least 1 // CurrentNumberScheduled is the number of nodes that are running at least 1
@ -482,6 +464,23 @@ type DaemonSetStatus struct {
// ObservedGeneration is the most recent generation observed by the daemon set controller. // ObservedGeneration is the most recent generation observed by the daemon set controller.
// +optional // +optional
ObservedGeneration int64 ObservedGeneration int64
// UpdatedNumberScheduled is the total number of nodes that are running updated
// daemon pod
// +optional
UpdatedNumberScheduled int32
// NumberAvailable is the number of nodes that should be running the
// daemon pod and have one or more of the daemon pod running and
// available (ready for at least minReadySeconds)
// +optional
NumberAvailable int32
// NumberUnavailable is the number of nodes that should be running the
// daemon pod and have none of the daemon pod running and available
// (ready for at least minReadySeconds)
// +optional
NumberUnavailable int32
} }
// +genclient=true // +genclient=true
@ -506,8 +505,20 @@ type DaemonSet struct {
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
// +optional // +optional
Status DaemonSetStatus Status DaemonSetStatus
// A sequence number representing a specific generation of the template.
// Populated by the system. Read-only.
// +optional
TemplateGeneration int64
} }
const (
// DaemonSetTemplateGenerationKey is the key of the labels that is added
// to daemon set pods to distinguish between old and new pod templates
// during DaemonSet template update.
DaemonSetTemplateGenerationKey string = "pod-template-generation"
)
// DaemonSetList is a collection of daemon sets. // DaemonSetList is a collection of daemon sets.
type DaemonSetList struct { type DaemonSetList struct {
metav1.TypeMeta metav1.TypeMeta

View File

@ -38,6 +38,8 @@ func addConversionFuncs(scheme *runtime.Scheme) error {
Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy,
Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment,
Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment,
Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet,
Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet,
Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec,
Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec,
) )
@ -219,6 +221,23 @@ func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployme
return nil return nil
} }
func Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *RollingUpdateDaemonSet, s conversion.Scope) error {
if out.MaxUnavailable == nil {
out.MaxUnavailable = &intstr.IntOrString{}
}
if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil {
return err
}
return nil
}
func Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error {
if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil {
return err
}
return nil
}
func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error { func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error {
out.Replicas = new(int32) out.Replicas = new(int32)
*out.Replicas = int32(in.Replicas) *out.Replicas = int32(in.Replicas)

View File

@ -47,6 +47,21 @@ func SetDefaults_DaemonSet(obj *DaemonSet) {
obj.Labels = labels obj.Labels = labels
} }
} }
updateStrategy := &obj.Spec.UpdateStrategy
if updateStrategy.Type == "" {
updateStrategy.Type = OnDeleteDaemonSetStrategyType
}
if updateStrategy.Type == RollingUpdateDaemonSetStrategyType {
if updateStrategy.RollingUpdate == nil {
rollingUpdate := RollingUpdateDaemonSet{}
updateStrategy.RollingUpdate = &rollingUpdate
}
if updateStrategy.RollingUpdate.MaxUnavailable == nil {
// Set default MaxUnavailable as 1 by default.
maxUnavailable := intstr.FromInt(1)
updateStrategy.RollingUpdate.MaxUnavailable = &maxUnavailable
}
}
} }
func SetDefaults_Deployment(obj *Deployment) { func SetDefaults_Deployment(obj *Deployment) {

View File

@ -74,6 +74,9 @@ func TestSetDefaultDaemonSet(t *testing.T) {
MatchLabels: defaultLabels, MatchLabels: defaultLabels,
}, },
Template: defaultTemplate, Template: defaultTemplate,
UpdateStrategy: DaemonSetUpdateStrategy{
Type: OnDeleteDaemonSetStrategyType,
},
}, },
}, },
}, },
@ -99,6 +102,9 @@ func TestSetDefaultDaemonSet(t *testing.T) {
MatchLabels: defaultLabels, MatchLabels: defaultLabels,
}, },
Template: defaultTemplate, Template: defaultTemplate,
UpdateStrategy: DaemonSetUpdateStrategy{
Type: OnDeleteDaemonSetStrategyType,
},
}, },
}, },
}, },
@ -107,6 +113,9 @@ func TestSetDefaultDaemonSet(t *testing.T) {
expected: &DaemonSet{ expected: &DaemonSet{
Spec: DaemonSetSpec{ Spec: DaemonSetSpec{
Template: templateNoLabel, Template: templateNoLabel,
UpdateStrategy: DaemonSetUpdateStrategy{
Type: OnDeleteDaemonSetStrategyType,
},
}, },
}, },
}, },
@ -117,6 +126,9 @@ func TestSetDefaultDaemonSet(t *testing.T) {
expected: &DaemonSet{ expected: &DaemonSet{
Spec: DaemonSetSpec{ Spec: DaemonSetSpec{
Template: templateNoLabel, Template: templateNoLabel,
UpdateStrategy: DaemonSetUpdateStrategy{
Type: OnDeleteDaemonSetStrategyType,
},
}, },
}, },
}, },
@ -127,6 +139,9 @@ func TestSetDefaultDaemonSet(t *testing.T) {
expected: &DaemonSet{ expected: &DaemonSet{
Spec: DaemonSetSpec{ Spec: DaemonSetSpec{
Template: templateNoLabel, Template: templateNoLabel,
UpdateStrategy: DaemonSetUpdateStrategy{
Type: OnDeleteDaemonSetStrategyType,
},
}, },
}, },
}, },

View File

@ -369,20 +369,20 @@ type DeploymentList struct {
Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"`
} }
// TODO(madhusudancs): Uncomment while implementing DaemonSet updates.
/* Commenting out for v1.2. We are planning to bring these types back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting the types out.
type DaemonSetUpdateStrategy struct { type DaemonSetUpdateStrategy struct {
// Type of daemon set update. Only "RollingUpdate" is supported at this time. Default is RollingUpdate. // Type of daemon set update. Can be "RollingUpdate" or "OnDelete".
// +optional // Default is OnDelete.
Type DaemonSetUpdateStrategyType `json:"type,omitempty"` // +optional
Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"`
// Rolling update config params. Present only if DaemonSetUpdateStrategy = // Rolling update config params. Present only if DaemonSetUpdateStrategy =
// RollingUpdate. // RollingUpdate.
//--- //---
// TODO: Update this to follow our convention for oneOf, whatever we decide it // TODO: Update this to follow our convention for oneOf, whatever we decide it
// to be. Same as DeploymentStrategy.RollingUpdate. // to be. Same as DeploymentStrategy.RollingUpdate.
// +optional // See https://github.com/kubernetes/kubernetes/issues/35345
RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty"` // +optional
RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
} }
type DaemonSetUpdateStrategyType string type DaemonSetUpdateStrategyType string
@ -390,6 +390,9 @@ type DaemonSetUpdateStrategyType string
const ( const (
// Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other.
RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate"
// Replace the old daemons only when it's killed
OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete"
) )
// Spec to control the desired behavior of daemon set rolling update. // Spec to control the desired behavior of daemon set rolling update.
@ -407,17 +410,9 @@ type RollingUpdateDaemonSet struct {
// it then proceeds onto other DaemonSet pods, thus ensuring that at least // it then proceeds onto other DaemonSet pods, thus ensuring that at least
// 70% of original number of DaemonSet pods are available at all times // 70% of original number of DaemonSet pods are available at all times
// during the update. // during the update.
// +optional // +optional
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
// Minimum number of seconds for which a newly created DaemonSet pod should
// be ready without any of its container crashing, for it to be considered
// available. Defaults to 0 (pod will be considered available as soon as it
// is ready).
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty"`
} }
*/
// DaemonSetSpec is the specification of a daemon set. // DaemonSetSpec is the specification of a daemon set.
type DaemonSetSpec struct { type DaemonSetSpec struct {
@ -435,31 +430,18 @@ type DaemonSetSpec struct {
// More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template
Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"`
// TODO(madhusudancs): Uncomment while implementing DaemonSet updates. // UpdateStrategy to replace existing DaemonSet pods with new pods.
/* Commenting out for v1.2. We are planning to bring these fields back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting these fields out.
// Update strategy to replace existing DaemonSet pods with new pods.
// +optional // +optional
UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty"` UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"`
// Label key that is added to DaemonSet pods to distinguish between old and // MinReadySeconds minimum number of seconds for which a newly created DaemonSet pod should
// new pod templates during DaemonSet update. // be ready without any of its container crashing, for it to be considered
// Users can set this to an empty string to indicate that the system should // available. Defaults to 0 (pod will be considered available as soon as it
// not add any label. If unspecified, system uses // is ready).
// DefaultDaemonSetUniqueLabelKey("daemonset.kubernetes.io/podTemplateHash").
// Value of this key is hash of DaemonSetSpec.PodTemplateSpec.
// No label is added if this is set to empty string.
// +optional // +optional
UniqueLabelKey *string `json:"uniqueLabelKey,omitempty"` MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
*/
} }
const (
// DefaultDaemonSetUniqueLabelKey is the default key of the labels that is added
// to daemon set pods to distinguish between old and new pod templates during
// DaemonSet update. See DaemonSetSpec's UniqueLabelKey field for more information.
DefaultDaemonSetUniqueLabelKey string = "daemonset.kubernetes.io/podTemplateHash"
)
// DaemonSetStatus represents the current status of a daemon set. // DaemonSetStatus represents the current status of a daemon set.
type DaemonSetStatus struct { type DaemonSetStatus struct {
// CurrentNumberScheduled is the number of nodes that are running at least 1 // CurrentNumberScheduled is the number of nodes that are running at least 1
@ -484,6 +466,23 @@ type DaemonSetStatus struct {
// ObservedGeneration is the most recent generation observed by the daemon set controller. // ObservedGeneration is the most recent generation observed by the daemon set controller.
// +optional // +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"` ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"`
// UpdatedNumberScheduled is the total number of nodes that are running updated
// daemon pod
// +optional
UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"`
// NumberAvailable is the number of nodes that should be running the
// daemon pod and have one or more of the daemon pod running and
// available (ready for at least minReadySeconds)
// +optional
NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"`
// NumberUnavailable is the number of nodes that should be running the
// daemon pod and have none of the daemon pod running and available
// (ready for at least minReadySeconds)
// +optional
NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"`
} }
// +genclient=true // +genclient=true
@ -508,8 +507,20 @@ type DaemonSet struct {
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
// +optional // +optional
Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
// A sequence number representing a specific generation of the template.
// Populated by the system. Read-only.
// +optional
TemplateGeneration int64 `json:"templateGeneration,omitempty" protobuf:"varint,4,opt,name=templateGeneration"`
} }
const (
// DaemonSetTemplateGenerationKey is the key of the labels that is added
// to daemon set pods to distinguish between old and new pod templates
// during DaemonSet template update.
DaemonSetTemplateGenerationKey string = "pod-template-generation"
)
// DaemonSetList is a collection of daemon sets. // DaemonSetList is a collection of daemon sets.
type DaemonSetList struct { type DaemonSetList struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`

View File

@ -110,6 +110,9 @@ func validateDaemonSetStatus(status *extensions.DaemonSetStatus, fldPath *field.
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredNumberScheduled), fldPath.Child("desiredNumberScheduled"))...) allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredNumberScheduled), fldPath.Child("desiredNumberScheduled"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberReady), fldPath.Child("numberReady"))...) allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberReady), fldPath.Child("numberReady"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child("observedGeneration"))...) allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child("observedGeneration"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedNumberScheduled), fldPath.Child("updatedNumberScheduled"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberAvailable), fldPath.Child("numberAvailable"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberUnavailable), fldPath.Child("numberUnavailable"))...)
return allErrs return allErrs
} }
@ -141,6 +144,39 @@ func ValidateDaemonSetSpec(spec *extensions.DaemonSetSpec, fldPath *field.Path)
if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways { if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)}))
} }
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
allErrs = append(allErrs, ValidateDaemonSetUpdateStrategy(&spec.UpdateStrategy, fldPath.Child("updateStrategy"))...)
return allErrs
}
func ValidateRollingUpdateDaemonSet(rollingUpdate *extensions.RollingUpdateDaemonSet, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
if getIntOrPercentValue(rollingUpdate.MaxUnavailable) == 0 {
// MaxUnavailable cannot be 0.
allErrs = append(allErrs, field.Invalid(fldPath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "cannot be 0"))
}
// Validate that MaxUnavailable is not more than 100%.
allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
return allErrs
}
func ValidateDaemonSetUpdateStrategy(strategy *extensions.DaemonSetUpdateStrategy, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
switch strategy.Type {
case extensions.OnDeleteDaemonSetStrategyType:
case extensions.RollingUpdateDaemonSetStrategyType:
// Make sure RollingUpdate field isn't nil.
if strategy.RollingUpdate == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("rollingUpdate"), ""))
return allErrs
}
allErrs = append(allErrs, ValidateRollingUpdateDaemonSet(strategy.RollingUpdate, fldPath.Child("rollingUpdate"))...)
default:
validValues := []string{string(extensions.RollingUpdateDaemonSetStrategyType), string(extensions.OnDeleteDaemonSetStrategyType)}
allErrs = append(allErrs, field.NotSupported(fldPath, strategy, validValues))
}
return allErrs return allErrs
} }

View File

@ -46,6 +46,9 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
NumberMisscheduled: 2, NumberMisscheduled: 2,
DesiredNumberScheduled: 3, DesiredNumberScheduled: 3,
NumberReady: 1, NumberReady: 1,
UpdatedNumberScheduled: 1,
NumberAvailable: 1,
NumberUnavailable: 2,
}, },
}, },
update: extensions.DaemonSet{ update: extensions.DaemonSet{
@ -55,6 +58,9 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
NumberMisscheduled: 1, NumberMisscheduled: 1,
DesiredNumberScheduled: 3, DesiredNumberScheduled: 3,
NumberReady: 1, NumberReady: 1,
UpdatedNumberScheduled: 1,
NumberAvailable: 1,
NumberUnavailable: 2,
}, },
}, },
}, },
@ -81,6 +87,9 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
DesiredNumberScheduled: 3, DesiredNumberScheduled: 3,
NumberReady: 1, NumberReady: 1,
ObservedGeneration: 3, ObservedGeneration: 3,
UpdatedNumberScheduled: 1,
NumberAvailable: 1,
NumberUnavailable: 2,
}, },
}, },
update: extensions.DaemonSet{ update: extensions.DaemonSet{
@ -95,6 +104,9 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
DesiredNumberScheduled: -3, DesiredNumberScheduled: -3,
NumberReady: -1, NumberReady: -1,
ObservedGeneration: -3, ObservedGeneration: -3,
UpdatedNumberScheduled: -1,
NumberAvailable: -1,
NumberUnavailable: -2,
}, },
}, },
}, },
@ -111,6 +123,9 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
DesiredNumberScheduled: 3, DesiredNumberScheduled: 3,
NumberReady: 1, NumberReady: 1,
ObservedGeneration: 3, ObservedGeneration: 3,
UpdatedNumberScheduled: 1,
NumberAvailable: 1,
NumberUnavailable: 2,
}, },
}, },
update: extensions.DaemonSet{ update: extensions.DaemonSet{
@ -125,6 +140,9 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
DesiredNumberScheduled: 3, DesiredNumberScheduled: 3,
NumberReady: 1, NumberReady: 1,
ObservedGeneration: 3, ObservedGeneration: 3,
UpdatedNumberScheduled: 1,
NumberAvailable: 1,
NumberUnavailable: 2,
}, },
}, },
}, },
@ -141,6 +159,9 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
DesiredNumberScheduled: 3, DesiredNumberScheduled: 3,
NumberReady: 1, NumberReady: 1,
ObservedGeneration: 3, ObservedGeneration: 3,
UpdatedNumberScheduled: 1,
NumberAvailable: 1,
NumberUnavailable: 2,
}, },
}, },
update: extensions.DaemonSet{ update: extensions.DaemonSet{
@ -155,6 +176,9 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
DesiredNumberScheduled: 3, DesiredNumberScheduled: 3,
NumberReady: 1, NumberReady: 1,
ObservedGeneration: 3, ObservedGeneration: 3,
UpdatedNumberScheduled: 1,
NumberAvailable: 1,
NumberUnavailable: 2,
}, },
}, },
}, },
@ -171,6 +195,9 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
DesiredNumberScheduled: 3, DesiredNumberScheduled: 3,
NumberReady: 1, NumberReady: 1,
ObservedGeneration: 3, ObservedGeneration: 3,
UpdatedNumberScheduled: 1,
NumberAvailable: 1,
NumberUnavailable: 2,
}, },
}, },
update: extensions.DaemonSet{ update: extensions.DaemonSet{
@ -185,6 +212,9 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
DesiredNumberScheduled: -3, DesiredNumberScheduled: -3,
NumberReady: 1, NumberReady: 1,
ObservedGeneration: 3, ObservedGeneration: 3,
UpdatedNumberScheduled: 1,
NumberAvailable: 1,
NumberUnavailable: 2,
}, },
}, },
}, },
@ -201,6 +231,9 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
DesiredNumberScheduled: 3, DesiredNumberScheduled: 3,
NumberReady: 1, NumberReady: 1,
ObservedGeneration: 3, ObservedGeneration: 3,
UpdatedNumberScheduled: 1,
NumberAvailable: 1,
NumberUnavailable: 2,
}, },
}, },
update: extensions.DaemonSet{ update: extensions.DaemonSet{
@ -215,6 +248,9 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
DesiredNumberScheduled: 3, DesiredNumberScheduled: 3,
NumberReady: -1, NumberReady: -1,
ObservedGeneration: 3, ObservedGeneration: 3,
UpdatedNumberScheduled: 1,
NumberAvailable: 1,
NumberUnavailable: 2,
}, },
}, },
}, },
@ -231,6 +267,9 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
DesiredNumberScheduled: 3, DesiredNumberScheduled: 3,
NumberReady: 1, NumberReady: 1,
ObservedGeneration: 3, ObservedGeneration: 3,
UpdatedNumberScheduled: 1,
NumberAvailable: 1,
NumberUnavailable: 2,
}, },
}, },
update: extensions.DaemonSet{ update: extensions.DaemonSet{
@ -245,6 +284,117 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
DesiredNumberScheduled: 3, DesiredNumberScheduled: 3,
NumberReady: 1, NumberReady: 1,
ObservedGeneration: -3, ObservedGeneration: -3,
UpdatedNumberScheduled: 1,
NumberAvailable: 1,
NumberUnavailable: 2,
},
},
},
"negative UpdatedNumberScheduled": {
old: extensions.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "abc",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "10",
},
Status: extensions.DaemonSetStatus{
CurrentNumberScheduled: 1,
NumberMisscheduled: 2,
DesiredNumberScheduled: 3,
NumberReady: 1,
ObservedGeneration: 3,
UpdatedNumberScheduled: 1,
NumberAvailable: 1,
NumberUnavailable: 2,
},
},
update: extensions.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "abc",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "10",
},
Status: extensions.DaemonSetStatus{
CurrentNumberScheduled: 1,
NumberMisscheduled: 1,
DesiredNumberScheduled: 3,
NumberReady: 1,
ObservedGeneration: 3,
UpdatedNumberScheduled: -1,
NumberAvailable: 1,
NumberUnavailable: 2,
},
},
},
"negative NumberAvailable": {
old: extensions.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "abc",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "10",
},
Status: extensions.DaemonSetStatus{
CurrentNumberScheduled: 1,
NumberMisscheduled: 2,
DesiredNumberScheduled: 3,
NumberReady: 1,
ObservedGeneration: 3,
UpdatedNumberScheduled: 1,
NumberAvailable: 1,
NumberUnavailable: 2,
},
},
update: extensions.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "abc",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "10",
},
Status: extensions.DaemonSetStatus{
CurrentNumberScheduled: 1,
NumberMisscheduled: 1,
DesiredNumberScheduled: 3,
NumberReady: 1,
ObservedGeneration: 3,
UpdatedNumberScheduled: 1,
NumberAvailable: -1,
NumberUnavailable: 2,
},
},
},
"negative NumberUnavailable": {
old: extensions.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "abc",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "10",
},
Status: extensions.DaemonSetStatus{
CurrentNumberScheduled: 1,
NumberMisscheduled: 2,
DesiredNumberScheduled: 3,
NumberReady: 1,
ObservedGeneration: 3,
UpdatedNumberScheduled: 1,
NumberAvailable: 1,
NumberUnavailable: 2,
},
},
update: extensions.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "abc",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "10",
},
Status: extensions.DaemonSetStatus{
CurrentNumberScheduled: 1,
NumberMisscheduled: 1,
DesiredNumberScheduled: 3,
NumberReady: 1,
ObservedGeneration: 3,
UpdatedNumberScheduled: 1,
NumberAvailable: 1,
NumberUnavailable: -2,
}, },
}, },
}, },
@ -349,6 +499,9 @@ func TestValidateDaemonSetUpdate(t *testing.T) {
Spec: extensions.DaemonSetSpec{ Spec: extensions.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Selector: &metav1.LabelSelector{MatchLabels: validSelector},
Template: validPodTemplateAbc.Template, Template: validPodTemplateAbc.Template,
UpdateStrategy: extensions.DaemonSetUpdateStrategy{
Type: extensions.OnDeleteDaemonSetStrategyType,
},
}, },
}, },
update: extensions.DaemonSet{ update: extensions.DaemonSet{
@ -356,6 +509,9 @@ func TestValidateDaemonSetUpdate(t *testing.T) {
Spec: extensions.DaemonSetSpec{ Spec: extensions.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Selector: &metav1.LabelSelector{MatchLabels: validSelector},
Template: validPodTemplateAbc.Template, Template: validPodTemplateAbc.Template,
UpdateStrategy: extensions.DaemonSetUpdateStrategy{
Type: extensions.OnDeleteDaemonSetStrategyType,
},
}, },
}, },
}, },
@ -365,6 +521,9 @@ func TestValidateDaemonSetUpdate(t *testing.T) {
Spec: extensions.DaemonSetSpec{ Spec: extensions.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Selector: &metav1.LabelSelector{MatchLabels: validSelector},
Template: validPodTemplateAbc.Template, Template: validPodTemplateAbc.Template,
UpdateStrategy: extensions.DaemonSetUpdateStrategy{
Type: extensions.OnDeleteDaemonSetStrategyType,
},
}, },
}, },
update: extensions.DaemonSet{ update: extensions.DaemonSet{
@ -372,6 +531,9 @@ func TestValidateDaemonSetUpdate(t *testing.T) {
Spec: extensions.DaemonSetSpec{ Spec: extensions.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: validSelector2}, Selector: &metav1.LabelSelector{MatchLabels: validSelector2},
Template: validPodTemplateAbc2.Template, Template: validPodTemplateAbc2.Template,
UpdateStrategy: extensions.DaemonSetUpdateStrategy{
Type: extensions.OnDeleteDaemonSetStrategyType,
},
}, },
}, },
}, },
@ -381,6 +543,9 @@ func TestValidateDaemonSetUpdate(t *testing.T) {
Spec: extensions.DaemonSetSpec{ Spec: extensions.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Selector: &metav1.LabelSelector{MatchLabels: validSelector},
Template: validPodTemplateAbc.Template, Template: validPodTemplateAbc.Template,
UpdateStrategy: extensions.DaemonSetUpdateStrategy{
Type: extensions.OnDeleteDaemonSetStrategyType,
},
}, },
}, },
update: extensions.DaemonSet{ update: extensions.DaemonSet{
@ -388,6 +553,9 @@ func TestValidateDaemonSetUpdate(t *testing.T) {
Spec: extensions.DaemonSetSpec{ Spec: extensions.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Selector: &metav1.LabelSelector{MatchLabels: validSelector},
Template: validPodTemplateNodeSelector.Template, Template: validPodTemplateNodeSelector.Template,
UpdateStrategy: extensions.DaemonSetUpdateStrategy{
Type: extensions.OnDeleteDaemonSetStrategyType,
},
}, },
}, },
}, },
@ -536,6 +704,9 @@ func TestValidateDaemonSet(t *testing.T) {
Spec: extensions.DaemonSetSpec{ Spec: extensions.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Selector: &metav1.LabelSelector{MatchLabels: validSelector},
Template: validPodTemplate.Template, Template: validPodTemplate.Template,
UpdateStrategy: extensions.DaemonSetUpdateStrategy{
Type: extensions.OnDeleteDaemonSetStrategyType,
},
}, },
}, },
{ {
@ -543,6 +714,9 @@ func TestValidateDaemonSet(t *testing.T) {
Spec: extensions.DaemonSetSpec{ Spec: extensions.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: validSelector}, Selector: &metav1.LabelSelector{MatchLabels: validSelector},
Template: validPodTemplate.Template, Template: validPodTemplate.Template,
UpdateStrategy: extensions.DaemonSetUpdateStrategy{
Type: extensions.OnDeleteDaemonSetStrategyType,
},
}, },
}, },
} }

View File

@ -13,6 +13,7 @@ go_library(
srcs = [ srcs = [
"daemoncontroller.go", "daemoncontroller.go",
"doc.go", "doc.go",
"update.go",
], ],
tags = ["automanaged"], tags = ["automanaged"],
deps = [ deps = [
@ -26,6 +27,8 @@ go_library(
"//pkg/client/listers/core/v1:go_default_library", "//pkg/client/listers/core/v1:go_default_library",
"//pkg/client/listers/extensions/v1beta1:go_default_library", "//pkg/client/listers/extensions/v1beta1:go_default_library",
"//pkg/controller:go_default_library", "//pkg/controller:go_default_library",
"//pkg/controller/daemon/util:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/util/metrics:go_default_library", "//pkg/util/metrics:go_default_library",
"//plugin/pkg/scheduler/algorithm:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library",
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library", "//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
@ -35,6 +38,7 @@ go_library(
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/labels", "//vendor:k8s.io/apimachinery/pkg/labels",
"//vendor:k8s.io/apimachinery/pkg/util/errors", "//vendor:k8s.io/apimachinery/pkg/util/errors",
"//vendor:k8s.io/apimachinery/pkg/util/intstr",
"//vendor:k8s.io/apimachinery/pkg/util/runtime", "//vendor:k8s.io/apimachinery/pkg/util/runtime",
"//vendor:k8s.io/apimachinery/pkg/util/wait", "//vendor:k8s.io/apimachinery/pkg/util/wait",
"//vendor:k8s.io/client-go/kubernetes/typed/core/v1", "//vendor:k8s.io/client-go/kubernetes/typed/core/v1",
@ -47,7 +51,10 @@ go_library(
go_test( go_test(
name = "go_default_test", name = "go_default_test",
srcs = ["daemoncontroller_test.go"], srcs = [
"daemoncontroller_test.go",
"update_test.go",
],
library = ":go_default_library", library = ":go_default_library",
tags = ["automanaged"], tags = ["automanaged"],
deps = [ deps = [
@ -78,6 +85,9 @@ filegroup(
filegroup( filegroup(
name = "all-srcs", name = "all-srcs",
srcs = [":package-srcs"], srcs = [
":package-srcs",
"//pkg/controller/daemon/util:all-srcs",
],
tags = ["automanaged"], tags = ["automanaged"],
) )

View File

@ -44,6 +44,7 @@ import (
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1" extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/daemon/util"
"k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/metrics"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
@ -255,6 +256,17 @@ func (dsc *DaemonSetsController) enqueueDaemonSet(ds *extensions.DaemonSet) {
dsc.queue.Add(key) dsc.queue.Add(key)
} }
func (dsc *DaemonSetsController) enqueueDaemonSetAfter(obj interface{}, after time.Duration) {
key, err := controller.KeyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
return
}
// TODO: Handle overlapping controllers better. See comment in ReplicationManager.
dsc.queue.AddAfter(key, after)
}
func (dsc *DaemonSetsController) getPodDaemonSet(pod *v1.Pod) *extensions.DaemonSet { func (dsc *DaemonSetsController) getPodDaemonSet(pod *v1.Pod) *extensions.DaemonSet {
// look up in the cache, if cached and the cache is valid, just return cached value // look up in the cache, if cached and the cache is valid, just return cached value
if obj, cached := dsc.lookupCache.GetMatchingObject(pod); cached { if obj, cached := dsc.lookupCache.GetMatchingObject(pod); cached {
@ -342,8 +354,14 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
return return
} }
glog.V(4).Infof("Pod %s updated.", curPod.Name) glog.V(4).Infof("Pod %s updated.", curPod.Name)
changedToReady := !v1.IsPodReady(oldPod) && v1.IsPodReady(curPod)
if curDS := dsc.getPodDaemonSet(curPod); curDS != nil { if curDS := dsc.getPodDaemonSet(curPod); curDS != nil {
dsc.enqueueDaemonSet(curDS) dsc.enqueueDaemonSet(curDS)
// See https://github.com/kubernetes/kubernetes/pull/38076 for more details
if changedToReady && curDS.Spec.MinReadySeconds > 0 {
dsc.enqueueDaemonSetAfter(curDS, time.Duration(curDS.Spec.MinReadySeconds)*time.Second)
}
} }
// If the labels have not changed, then the daemon set responsible for // If the labels have not changed, then the daemon set responsible for
// the pod is the same as it was before. In that case we have enqueued the daemon // the pod is the same as it was before. In that case we have enqueued the daemon
@ -521,11 +539,23 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
} }
} }
} }
errors := dsc.syncNodes(ds, podsToDelete, nodesNeedingDaemonPods)
// Throw an error when the daemon pods fail, to use ratelimiter to prevent kill-recreate hot loop
if failedPodsObserved > 0 {
errors = append(errors, fmt.Errorf("deleted %d failed pods of DaemonSet %s/%s", failedPodsObserved, ds.Namespace, ds.Name))
}
return utilerrors.NewAggregate(errors)
}
// syncNodes deletes given pods and creates new daemon set pods on the given node
// returns slice with erros if any
func (dsc *DaemonSetsController) syncNodes(ds *extensions.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string) []error {
// We need to set expectations before creating/deleting pods to avoid race conditions. // We need to set expectations before creating/deleting pods to avoid race conditions.
dsKey, err := controller.KeyFunc(ds) dsKey, err := controller.KeyFunc(ds)
if err != nil { if err != nil {
return fmt.Errorf("couldn't get key for object %#v: %v", ds, err) return []error{fmt.Errorf("couldn't get key for object %#v: %v", ds, err)}
} }
createDiff := len(nodesNeedingDaemonPods) createDiff := len(nodesNeedingDaemonPods)
@ -546,10 +576,11 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
glog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff) glog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff)
createWait := sync.WaitGroup{} createWait := sync.WaitGroup{}
createWait.Add(createDiff) createWait.Add(createDiff)
template := util.GetPodTemplateWithGeneration(ds.Spec.Template, ds.TemplateGeneration)
for i := 0; i < createDiff; i++ { for i := 0; i < createDiff; i++ {
go func(ix int) { go func(ix int) {
defer createWait.Done() defer createWait.Done()
if err := dsc.podControl.CreatePodsOnNode(nodesNeedingDaemonPods[ix], ds.Namespace, &ds.Spec.Template, ds); err != nil { if err := dsc.podControl.CreatePodsOnNode(nodesNeedingDaemonPods[ix], ds.Namespace, &template, ds); err != nil {
glog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name) glog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
dsc.expectations.CreationObserved(dsKey) dsc.expectations.CreationObserved(dsKey)
errCh <- err errCh <- err
@ -581,18 +612,17 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
for err := range errCh { for err := range errCh {
errors = append(errors, err) errors = append(errors, err)
} }
// Throw an error when the daemon pods fail, to use ratelimiter to prevent kill-recreate hot loop return errors
if failedPodsObserved > 0 {
errors = append(errors, fmt.Errorf("deleted %d failed pods of DaemonSet %s/%s", failedPodsObserved, ds.Namespace, ds.Name))
}
return utilerrors.NewAggregate(errors)
} }
func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady int) error { func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable int) error {
if int(ds.Status.DesiredNumberScheduled) == desiredNumberScheduled && if int(ds.Status.DesiredNumberScheduled) == desiredNumberScheduled &&
int(ds.Status.CurrentNumberScheduled) == currentNumberScheduled && int(ds.Status.CurrentNumberScheduled) == currentNumberScheduled &&
int(ds.Status.NumberMisscheduled) == numberMisscheduled && int(ds.Status.NumberMisscheduled) == numberMisscheduled &&
int(ds.Status.NumberReady) == numberReady && int(ds.Status.NumberReady) == numberReady &&
int(ds.Status.UpdatedNumberScheduled) == updatedNumberScheduled &&
int(ds.Status.NumberAvailable) == numberAvailable &&
int(ds.Status.NumberUnavailable) == numberUnavailable &&
ds.Status.ObservedGeneration >= ds.Generation { ds.Status.ObservedGeneration >= ds.Generation {
return nil return nil
} }
@ -611,6 +641,9 @@ func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds
toUpdate.Status.CurrentNumberScheduled = int32(currentNumberScheduled) toUpdate.Status.CurrentNumberScheduled = int32(currentNumberScheduled)
toUpdate.Status.NumberMisscheduled = int32(numberMisscheduled) toUpdate.Status.NumberMisscheduled = int32(numberMisscheduled)
toUpdate.Status.NumberReady = int32(numberReady) toUpdate.Status.NumberReady = int32(numberReady)
toUpdate.Status.UpdatedNumberScheduled = int32(updatedNumberScheduled)
toUpdate.Status.NumberAvailable = int32(numberAvailable)
toUpdate.Status.NumberUnavailable = int32(numberUnavailable)
if _, updateErr = dsClient.UpdateStatus(toUpdate); updateErr == nil { if _, updateErr = dsClient.UpdateStatus(toUpdate); updateErr == nil {
return nil return nil
@ -638,7 +671,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
return fmt.Errorf("couldn't get list of nodes when updating daemon set %#v: %v", ds, err) return fmt.Errorf("couldn't get list of nodes when updating daemon set %#v: %v", ds, err)
} }
var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady int var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable int
for i := range nodeList { for i := range nodeList {
node := nodeList[i] node := nodeList[i]
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(node, ds) wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
@ -652,11 +685,18 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
desiredNumberScheduled++ desiredNumberScheduled++
if scheduled { if scheduled {
currentNumberScheduled++ currentNumberScheduled++
// Sort the daemon pods by creation time, so the the oldest is first. // Sort the daemon pods by creation time, so that the oldest is first.
daemonPods, _ := nodeToDaemonPods[node.Name] daemonPods, _ := nodeToDaemonPods[node.Name]
sort.Sort(podByCreationTimestamp(daemonPods)) sort.Sort(podByCreationTimestamp(daemonPods))
if v1.IsPodReady(daemonPods[0]) { pod := daemonPods[0]
if v1.IsPodReady(pod) {
numberReady++ numberReady++
if v1.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) {
numberAvailable++
}
}
if util.IsPodUpdated(ds.TemplateGeneration, pod) {
updatedNumberScheduled++
} }
} }
} else { } else {
@ -665,8 +705,9 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
} }
} }
} }
numberUnavailable := desiredNumberScheduled - numberAvailable
err = storeDaemonSetStatus(dsc.kubeClient.Extensions().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady) err = storeDaemonSetStatus(dsc.kubeClient.Extensions().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable)
if err != nil { if err != nil {
return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err) return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err)
} }
@ -714,6 +755,17 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
} }
} }
dsNeedsSync = dsc.expectations.SatisfiedExpectations(dsKey)
if dsNeedsSync && ds.DeletionTimestamp == nil {
switch ds.Spec.UpdateStrategy.Type {
case extensions.RollingUpdateDaemonSetStrategyType:
err = dsc.rollingUpdate(ds)
}
if err != nil {
return err
}
}
return dsc.updateDaemonSetStatus(ds) return dsc.updateDaemonSetStatus(ds)
} }

View File

@ -18,11 +18,13 @@ package daemon
import ( import (
"fmt" "fmt"
"sync"
"testing" "testing"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apiserver/pkg/storage/names" "k8s.io/apiserver/pkg/storage/names"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
@ -166,6 +168,63 @@ func addFailedPods(podStore cache.Store, nodeName string, label map[string]strin
} }
} }
type fakePodControl struct {
sync.Mutex
*controller.FakePodControl
podStore cache.Store
podIDMap map[string]*v1.Pod
}
func newFakePodControl() *fakePodControl {
podIDMap := make(map[string]*v1.Pod)
return &fakePodControl{
FakePodControl: &controller.FakePodControl{},
podIDMap: podIDMap}
}
func (f *fakePodControl) CreatePodsOnNode(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object) error {
f.Lock()
defer f.Unlock()
if err := f.FakePodControl.CreatePodsOnNode(nodeName, namespace, template, object); err != nil {
return fmt.Errorf("failed to create pod on node %q", nodeName)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Labels: template.Labels,
Namespace: namespace,
GenerateName: fmt.Sprintf("%s-", nodeName),
},
}
if err := api.Scheme.Convert(&template.Spec, &pod.Spec, nil); err != nil {
return fmt.Errorf("unable to convert pod template: %v", err)
}
if len(nodeName) != 0 {
pod.Spec.NodeName = nodeName
}
pod.Name = names.SimpleNameGenerator.GenerateName(fmt.Sprintf("%s-", nodeName))
f.podStore.Update(pod)
f.podIDMap[pod.Name] = pod
return nil
}
func (f *fakePodControl) DeletePod(namespace string, podID string, object runtime.Object) error {
f.Lock()
defer f.Unlock()
if err := f.FakePodControl.DeletePod(namespace, podID, object); err != nil {
return fmt.Errorf("failed to delete pod %q", podID)
}
pod, ok := f.podIDMap[podID]
if !ok {
return fmt.Errorf("pod %q does not exist", podID)
}
f.podStore.Delete(pod)
delete(f.podIDMap, podID)
return nil
}
type daemonSetsController struct { type daemonSetsController struct {
*DaemonSetsController *DaemonSetsController
@ -174,7 +233,7 @@ type daemonSetsController struct {
nodeStore cache.Store nodeStore cache.Store
} }
func newTestController(initialObjects ...runtime.Object) (*daemonSetsController, *controller.FakePodControl, *fake.Clientset) { func newTestController(initialObjects ...runtime.Object) (*daemonSetsController, *fakePodControl, *fake.Clientset) {
clientset := fake.NewSimpleClientset(initialObjects...) clientset := fake.NewSimpleClientset(initialObjects...)
informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
@ -190,8 +249,9 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController,
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
manager.nodeStoreSynced = alwaysReady manager.nodeStoreSynced = alwaysReady
manager.dsStoreSynced = alwaysReady manager.dsStoreSynced = alwaysReady
podControl := &controller.FakePodControl{} podControl := newFakePodControl()
manager.podControl = podControl manager.podControl = podControl
podControl.podStore = informerFactory.Core().V1().Pods().Informer().GetStore()
return &daemonSetsController{ return &daemonSetsController{
manager, manager,
@ -201,7 +261,7 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController,
}, podControl, clientset }, podControl, clientset
} }
func validateSyncDaemonSets(t *testing.T, fakePodControl *controller.FakePodControl, expectedCreates, expectedDeletes int) { func validateSyncDaemonSets(t *testing.T, fakePodControl *fakePodControl, expectedCreates, expectedDeletes int) {
if len(fakePodControl.Templates) != expectedCreates { if len(fakePodControl.Templates) != expectedCreates {
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", expectedCreates, len(fakePodControl.Templates)) t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", expectedCreates, len(fakePodControl.Templates))
} }
@ -210,7 +270,7 @@ func validateSyncDaemonSets(t *testing.T, fakePodControl *controller.FakePodCont
} }
} }
func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, podControl *controller.FakePodControl, expectedCreates, expectedDeletes int) { func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int) {
key, err := controller.KeyFunc(ds) key, err := controller.KeyFunc(ds)
if err != nil { if err != nil {
t.Errorf("Could not get key for daemon.") t.Errorf("Could not get key for daemon.")
@ -219,6 +279,18 @@ func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *
validateSyncDaemonSets(t, podControl, expectedCreates, expectedDeletes) validateSyncDaemonSets(t, podControl, expectedCreates, expectedDeletes)
} }
// clearExpectations copies the FakePodControl to PodStore and clears the create and delete expectations.
func clearExpectations(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, fakePodControl *fakePodControl) {
fakePodControl.Clear()
key, err := controller.KeyFunc(ds)
if err != nil {
t.Errorf("Could not get key for daemon.")
return
}
manager.expectations.DeleteExpectations(key)
}
func TestDeleteFinalStateUnknown(t *testing.T) { func TestDeleteFinalStateUnknown(t *testing.T) {
manager, _, _ := newTestController() manager, _, _ := newTestController()
addNodes(manager.nodeStore, 0, 1, nil) addNodes(manager.nodeStore, 0, 1, nil)
@ -231,6 +303,15 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
} }
} }
func markPodsReady(store cache.Store) {
// mark pods as ready
for _, obj := range store.List() {
pod := obj.(*v1.Pod)
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
v1.UpdatePodCondition(&pod.Status, &condition)
}
}
// DaemonSets without node selectors should launch pods on every node. // DaemonSets without node selectors should launch pods on every node.
func TestSimpleDaemonSetLaunchesPods(t *testing.T) { func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
manager, podControl, _ := newTestController() manager, podControl, _ := newTestController()
@ -927,3 +1008,114 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
} }
} }
} }
func TestDaemonSetUpdatesPods(t *testing.T) {
manager, podControl, _ := newTestController()
maxUnavailable := 2
addNodes(manager.nodeStore, 0, 5, nil)
ds := newDaemonSet("foo")
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
markPodsReady(podControl.podStore)
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
ds.TemplateGeneration++
manager.dsStore.Update(ds)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0)
markPodsReady(podControl.podStore)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0)
markPodsReady(podControl.podStore)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
markPodsReady(podControl.podStore)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
clearExpectations(t, manager, ds, podControl)
}
func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) {
manager, podControl, _ := newTestController()
maxUnavailable := 3
addNodes(manager.nodeStore, 0, 5, nil)
ds := newDaemonSet("foo")
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
markPodsReady(podControl.podStore)
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
ds.TemplateGeneration++
manager.dsStore.Update(ds)
// new pods are not ready numUnavailable == maxUnavailable
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
clearExpectations(t, manager, ds, podControl)
}
func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) {
manager, podControl, _ := newTestController()
maxUnavailable := 3
addNodes(manager.nodeStore, 0, 5, nil)
ds := newDaemonSet("foo")
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
ds.TemplateGeneration++
manager.dsStore.Update(ds)
// all old pods are unavailable so should be removed
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 5)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
clearExpectations(t, manager, ds, podControl)
}
func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
manager, podControl, _ := newTestController()
maxUnavailable := 3
addNodes(manager.nodeStore, 0, 5, nil)
ds := newDaemonSet("foo")
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
manager.dsStore.Update(ds)
// template is not changed no pod should be removed
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
clearExpectations(t, manager, ds, podControl)
}

View File

@ -0,0 +1,137 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package daemon
import (
"fmt"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/controller/daemon/util"
)
// rollingUpdate deletes old daemon set pods making sure that no more than
// ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable pods are unavailable
func (dsc *DaemonSetsController) rollingUpdate(ds *extensions.DaemonSet) error {
newPods, oldPods, err := dsc.getAllDaemonSetPods(ds)
allPods := append(oldPods, newPods...)
maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, allPods)
if err != nil {
return fmt.Errorf("Couldn't get unavailable numbers: %v", err)
}
oldAvailablePods, oldUnavailablePods := util.SplitByAvailablePods(ds.Spec.MinReadySeconds, oldPods)
// for oldPods delete all not running pods
var podsToDelete []string
glog.V(4).Infof("Marking all unavailable old pods for deletion")
for _, pod := range oldUnavailablePods {
// Skip terminating pods. We won't delete them again
if pod.DeletionTimestamp != nil {
continue
}
glog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
podsToDelete = append(podsToDelete, pod.Name)
}
glog.V(4).Infof("Marking old pods for deletion")
for _, pod := range oldAvailablePods {
if numUnavailable >= maxUnavailable {
glog.V(4).Infof("Number of unavailable DaemonSet pods: %d, is equal to or exceeds allowed maximum: %d", numUnavailable, maxUnavailable)
break
}
glog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
podsToDelete = append(podsToDelete, pod.Name)
numUnavailable++
}
errors := dsc.syncNodes(ds, podsToDelete, []string{})
return utilerrors.NewAggregate(errors)
}
func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *extensions.DaemonSet) ([]*v1.Pod, []*v1.Pod, error) {
var newPods []*v1.Pod
var oldPods []*v1.Pod
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
if err != nil {
return newPods, oldPods, err
}
daemonPods, err := dsc.podLister.Pods(ds.Namespace).List(selector)
if err != nil {
return newPods, oldPods, fmt.Errorf("Couldn't get list of pods for daemon set %#v: %v", ds, err)
}
for _, pod := range daemonPods {
if util.IsPodUpdated(ds.TemplateGeneration, pod) {
newPods = append(newPods, pod)
} else {
oldPods = append(oldPods, pod)
}
}
return newPods, oldPods, nil
}
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *extensions.DaemonSet, allPods []*v1.Pod) (int, int, error) {
glog.V(4).Infof("Getting unavailable numbers")
// TODO: get nodeList once in syncDaemonSet and pass it to other functions
nodeList, err := dsc.nodeLister.List(labels.Everything())
if err != nil {
return -1, -1, fmt.Errorf("couldn't get list of nodes during rolling update of daemon set %#v: %v", ds, err)
}
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
return -1, -1, fmt.Errorf("couldn't get node to daemon pods mapping for daemon set %#v: %v", ds, err)
}
var numUnavailable, desiredNumberScheduled int
for i := range nodeList {
node := nodeList[i]
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
if err != nil {
return -1, -1, err
}
if !wantToRun {
continue
}
desiredNumberScheduled++
daemonPods, exists := nodeToDaemonPods[node.Name]
if !exists {
numUnavailable++
continue
}
available := false
for _, pod := range daemonPods {
if v1.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) {
available = true
break
}
}
if !available {
numUnavailable++
}
}
maxUnavailable, err := intstrutil.GetValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, desiredNumberScheduled, true)
if err != nil {
return -1, -1, fmt.Errorf("Invalid value for MaxUnavailable: %v", err)
}
return maxUnavailable, numUnavailable, nil
}

View File

@ -0,0 +1,35 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package daemon
import (
"testing"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
)
// DaemonSets without node selectors should launch pods on every node.
func TestSimpleDaemonSetUpdatesWithRollingUpdate(t *testing.T) {
manager, podControl, _ := newTestController()
addNodes(manager.nodeStore, 0, 5, nil)
ds := newDaemonSet("foo")
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
// change strategy type to RollingUpdate
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
}

View File

@ -0,0 +1,33 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["daemonset_util.go"],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/util/labels:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,62 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
)
// GetPodTemplateWithHash returns copy of provided template with additional
// label which contains hash of provided template
func GetPodTemplateWithGeneration(template v1.PodTemplateSpec, generation int64) v1.PodTemplateSpec {
obj, _ := api.Scheme.DeepCopy(template)
newTemplate := obj.(v1.PodTemplateSpec)
templateGenerationStr := fmt.Sprint(generation)
newTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel(
template.ObjectMeta.Labels,
extensions.DaemonSetTemplateGenerationKey,
templateGenerationStr,
)
return newTemplate
}
// IsPodUpdate checks if pod contains label with provided hash
func IsPodUpdated(dsTemplateGeneration int64, pod *v1.Pod) bool {
podTemplateGeneration, generationExists := pod.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey]
dsTemplateGenerationStr := fmt.Sprint(dsTemplateGeneration)
return generationExists && podTemplateGeneration == dsTemplateGenerationStr
}
// SplitByAvailablePods splits provided daemon set pods by availabilty
func SplitByAvailablePods(minReadySeconds int32, pods []*v1.Pod) ([]*v1.Pod, []*v1.Pod) {
unavailablePods := []*v1.Pod{}
availablePods := []*v1.Pod{}
for _, pod := range pods {
if v1.IsPodAvailable(pod, minReadySeconds, metav1.Now()) {
availablePods = append(availablePods, pod)
} else {
unavailablePods = append(unavailablePods, pod)
}
}
return availablePods, unavailablePods
}

View File

@ -0,0 +1,87 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions"
)
func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()},
ObjectMeta: metav1.ObjectMeta{
Labels: label,
Namespace: metav1.NamespaceDefault,
},
Spec: v1.PodSpec{
NodeName: nodeName,
Containers: []v1.Container{
{
Image: "foo/bar",
},
},
},
}
pod.Name = podName
return pod
}
func TestIsPodUpdated(t *testing.T) {
tests := []struct {
templateGeneration int64
pod *v1.Pod
isUpdated bool
}{
{
int64(12345),
newPod("pod1", "node1", map[string]string{extensions.DaemonSetTemplateGenerationKey: "12345"}),
true,
},
{
int64(12355),
newPod("pod1", "node1", map[string]string{extensions.DaemonSetTemplateGenerationKey: "12345"}),
false,
},
{
int64(12355),
newPod("pod1", "node1", map[string]string{}),
false,
},
}
for _, test := range tests {
updated := IsPodUpdated(test.templateGeneration, test.pod)
if updated != test.isUpdated {
t.Errorf("IsPodUpdated returned wrong value. Expected %t, got %t. TemplateGeneration: %d", test.isUpdated, updated, test.templateGeneration)
}
}
}
func TestGetPodTemplateWithRevision(t *testing.T) {
generation := int64(1)
podTemplateSpec := v1.PodTemplateSpec{}
newPodTemplate := GetPodTemplateWithGeneration(podTemplateSpec, generation)
label, exists := newPodTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey]
if !exists || label != fmt.Sprint(generation) {
t.Errorf("Error in getting podTemplateSpec with label geneartion. Exists: %t, label: %s", exists, label)
}
}

View File

@ -44,6 +44,8 @@ func GetInternalPodTemplateSpecHash(template api.PodTemplateSpec) uint32 {
return podTemplateSpecHasher.Sum32() return podTemplateSpecHasher.Sum32()
} }
// TODO: move it to a better place. It's also used by DaemonSets
// Maybe to pkg/api/v1/resource_helpers.go
func GetPodTemplateSpecHashFnv(template v1.PodTemplateSpec) uint32 { func GetPodTemplateSpecHashFnv(template v1.PodTemplateSpec) uint32 {
podTemplateSpecHasher := fnv.New32a() podTemplateSpecHasher := fnv.New32a()
hashutil.DeepHashObject(podTemplateSpecHasher, template) hashutil.DeepHashObject(podTemplateSpecHasher, template)

View File

@ -49,6 +49,9 @@ func newValidDaemonSet() *extensions.DaemonSet {
Namespace: metav1.NamespaceDefault, Namespace: metav1.NamespaceDefault,
}, },
Spec: extensions.DaemonSetSpec{ Spec: extensions.DaemonSetSpec{
UpdateStrategy: extensions.DaemonSetUpdateStrategy{
Type: extensions.OnDeleteDaemonSetStrategyType,
},
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}, Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}},
Template: api.PodTemplateSpec{ Template: api.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{

View File

@ -53,6 +53,7 @@ func (daemonSetStrategy) PrepareForCreate(ctx genericapirequest.Context, obj run
daemonSet.Status = extensions.DaemonSetStatus{} daemonSet.Status = extensions.DaemonSetStatus{}
daemonSet.Generation = 1 daemonSet.Generation = 1
daemonSet.TemplateGeneration = 1
} }
// PrepareForUpdate clears fields that are not allowed to be set by end users on update. // PrepareForUpdate clears fields that are not allowed to be set by end users on update.
@ -63,6 +64,9 @@ func (daemonSetStrategy) PrepareForUpdate(ctx genericapirequest.Context, obj, ol
// update is not allowed to set status // update is not allowed to set status
newDaemonSet.Status = oldDaemonSet.Status newDaemonSet.Status = oldDaemonSet.Status
// update is not allowed to set TemplateGeneration
newDaemonSet.TemplateGeneration = oldDaemonSet.TemplateGeneration
// Any changes to the spec increment the generation number, any changes to the // Any changes to the spec increment the generation number, any changes to the
// status should reflect the generation number of the corresponding object. We push // status should reflect the generation number of the corresponding object. We push
// the burden of managing the status onto the clients because we can't (in general) // the burden of managing the status onto the clients because we can't (in general)
@ -74,6 +78,11 @@ func (daemonSetStrategy) PrepareForUpdate(ctx genericapirequest.Context, obj, ol
// //
// TODO: Any changes to a part of the object that represents desired state (labels, // TODO: Any changes to a part of the object that represents desired state (labels,
// annotations etc) should also increment the generation. // annotations etc) should also increment the generation.
if !reflect.DeepEqual(oldDaemonSet.Spec.Template, newDaemonSet.Spec.Template) {
newDaemonSet.TemplateGeneration = oldDaemonSet.TemplateGeneration + 1
newDaemonSet.Generation = oldDaemonSet.Generation + 1
return
}
if !reflect.DeepEqual(oldDaemonSet.Spec, newDaemonSet.Spec) { if !reflect.DeepEqual(oldDaemonSet.Spec, newDaemonSet.Spec) {
newDaemonSet.Generation = oldDaemonSet.Generation + 1 newDaemonSet.Generation = oldDaemonSet.Generation + 1
} }

View File

@ -91,6 +91,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
f = framework.NewDefaultFramework("daemonsets") f = framework.NewDefaultFramework("daemonsets")
image := "gcr.io/google_containers/serve_hostname:v1.4" image := "gcr.io/google_containers/serve_hostname:v1.4"
redisImage := "gcr.io/google_containers/redis:e2e"
dsName := "daemon-set" dsName := "daemon-set"
var ns string var ns string
@ -231,6 +232,64 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label)) err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
}) })
It("Should not update pod when spec was updated and update strategy is on delete", func() {
label := map[string]string{daemonsetNameLabel: dsName}
framework.Logf("Creating simple daemon set %s", dsName)
_, err := c.Extensions().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
Expect(err).NotTo(HaveOccurred())
By("Check that daemon pods launch on every node of the cluster.")
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
By("Update daemon pods image.")
ds, err := c.Extensions().DaemonSets(ns).Get(dsName, metav1.GetOptions{})
ds.Spec.Template.Spec.Containers[0].Image = redisImage
_, err = c.Extensions().DaemonSets(ns).Update(ds)
Expect(err).NotTo(HaveOccurred())
By("Check that demon pods have not set updated image.")
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImage(c, ns, label, image))
Expect(err).NotTo(HaveOccurred())
By("Check that daemon pods are still running on every node of the cluster.")
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
})
It("Should update pod when spec was updated and update strategy is RollingUpdate", func() {
label := map[string]string{daemonsetNameLabel: dsName}
framework.Logf("Creating simple daemon set %s", dsName)
_, err := c.Extensions().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
Expect(err).NotTo(HaveOccurred())
By("Check that daemon pods launch on every node of the cluster.")
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
By("Update daemon pods image.")
ds, err := c.Extensions().DaemonSets(ns).Get(dsName, metav1.GetOptions{})
ds.Spec.Template.Spec.Containers[0].Image = redisImage
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
_, err = c.Extensions().DaemonSets(ns).Update(ds)
Expect(err).NotTo(HaveOccurred())
By("Check that demon pods have not set updated image.")
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImage(c, ns, label, redisImage))
Expect(err).NotTo(HaveOccurred())
By("Check that daemon pods are still running on every node of the cluster.")
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
})
}) })
func newDaemonSet(dsName, image string, label map[string]string) *extensions.DaemonSet { func newDaemonSet(dsName, image string, label map[string]string) *extensions.DaemonSet {
@ -389,3 +448,24 @@ func checkDaemonStatus(f *framework.Framework, dsName string) error {
} }
return nil return nil
} }
func checkDaemonPodsImage(c clientset.Interface, ns string, selector map[string]string, image string) func() (bool, error) {
return func() (bool, error) {
selector := labels.Set(selector).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.Core().Pods(ns).List(options)
if err != nil {
return false, err
}
pods := podList.Items
for _, pod := range pods {
podImage := pod.Spec.Containers[0].Image
if podImage != image || !v1.IsPodReady(&pod) {
framework.Logf("Wrong image for pod: %s. Expected: %s, got: %s. Pod Ready: %t", pod.Name, image, podImage, v1.IsPodReady(&pod))
return false, nil
}
}
return true, nil
}
}