diff --git a/cluster/addons/calico-policy-controller/calico-etcd-petset.yaml b/cluster/addons/calico-policy-controller/calico-etcd-petset.yaml index 33cefee377b..b4d49ed641b 100644 --- a/cluster/addons/calico-policy-controller/calico-etcd-petset.yaml +++ b/cluster/addons/calico-policy-controller/calico-etcd-petset.yaml @@ -1,5 +1,5 @@ apiVersion: "apps/v1alpha1" -kind: PetSet +kind: StatefulSet metadata: name: calico-etcd namespace: kube-system diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index d532844bfba..e4980809181 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -440,15 +440,15 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl groupVersion = "apps/v1alpha1" resources, found = resourceMap[groupVersion] - glog.Infof("Attempting to start petset, full resource map %+v", resourceMap) + glog.Infof("Attempting to start statefulset, full resource map %+v", resourceMap) if containsVersion(versions, groupVersion) && found { glog.Infof("Starting %s apis", groupVersion) - if containsResource(resources, "petsets") { - glog.Infof("Starting PetSet controller") + if containsResource(resources, "statefulsets") { + glog.Infof("Starting StatefulSet controller") resyncPeriod := ResyncPeriod(s)() - go petset.NewPetSetController( + go petset.NewStatefulSetController( sharedInformers.Pods().Informer(), - client("petset-controller"), + client("statefulset-controller"), resyncPeriod, ).Run(1, wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) diff --git a/examples/examples_test.go b/examples/examples_test.go index 1127c8d13fe..c03b29f1ac2 100644 --- a/examples/examples_test.go +++ b/examples/examples_test.go @@ -130,11 +130,11 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { t.Namespace = api.NamespaceDefault } errors = expvalidation.ValidateDaemonSet(t) - case *apps.PetSet: + case *apps.StatefulSet: if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - errors = appsvalidation.ValidatePetSet(t) + errors = appsvalidation.ValidateStatefulSet(t) default: errors = field.ErrorList{} errors = append(errors, field.InternalError(field.NewPath(""), fmt.Errorf("no validation defined for %#v", obj))) @@ -221,7 +221,7 @@ func TestExampleObjectSchemas(t *testing.T) { "cassandra-daemonset": &extensions.DaemonSet{}, "cassandra-controller": &api.ReplicationController{}, "cassandra-service": &api.Service{}, - "cassandra-petset": &apps.PetSet{}, + "cassandra-petset": &apps.StatefulSet{}, }, "../examples/cluster-dns": { "dns-backend-rc": &api.ReplicationController{}, diff --git a/examples/storage/cassandra/cassandra-petset.yaml b/examples/storage/cassandra/cassandra-petset.yaml index 9227e0ddf74..6c1e5b1b75a 100644 --- a/examples/storage/cassandra/cassandra-petset.yaml +++ b/examples/storage/cassandra/cassandra-petset.yaml @@ -1,5 +1,5 @@ apiVersion: "apps/v1alpha1" -kind: PetSet +kind: StatefulSet metadata: name: cassandra spec: diff --git a/hack/make-rules/test-cmd.sh b/hack/make-rules/test-cmd.sh index dab33477e7e..46e4b25ca3c 100755 --- a/hack/make-rules/test-cmd.sh +++ b/hack/make-rules/test-cmd.sh @@ -334,7 +334,7 @@ runTests() { hpa_min_field=".spec.minReplicas" hpa_max_field=".spec.maxReplicas" hpa_cpu_field=".spec.targetCPUUtilizationPercentage" - petset_replicas_field=".spec.replicas" + statefulset_replicas_field=".spec.replicas" job_parallelism_field=".spec.parallelism" deployment_replicas=".spec.replicas" secret_data=".data" @@ -1185,7 +1185,7 @@ __EOF__ kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/pods 200 OK" kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/replicationcontrollers 200 OK" kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/services 200 OK" - kube::test::if_has_string "${output_message}" "/apis/apps/v1alpha1/namespaces/default/petsets 200 OK" + kube::test::if_has_string "${output_message}" "/apis/apps/v1alpha1/namespaces/default/statefulsets 200 OK" kube::test::if_has_string "${output_message}" "/apis/autoscaling/v1/namespaces/default/horizontalpodautoscalers 200" kube::test::if_has_string "${output_message}" "/apis/batch/v1/namespaces/default/jobs 200 OK" kube::test::if_has_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/deployments 200 OK" @@ -2396,25 +2396,25 @@ __EOF__ - ############ - # Pet Sets # - ############ + ################# + # Stateful Sets # + ################# - kube::log::status "Testing kubectl(${version}:petsets)" + kube::log::status "Testing kubectl(${version}:statefulsets)" - ### Create and stop petset, make sure it doesn't leak pods - # Pre-condition: no petset exists - kube::test::get_object_assert petset "{{range.items}}{{$id_field}}:{{end}}" '' - # Command: create petset + ### Create and stop statefulset, make sure it doesn't leak pods + # Pre-condition: no statefulset exists + kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" '' + # Command: create statefulset kubectl create -f hack/testdata/nginx-petset.yaml "${kube_flags[@]}" - ### Scale petset test with current-replicas and replicas + ### Scale statefulset test with current-replicas and replicas # Pre-condition: 0 replicas - kube::test::get_object_assert 'petset nginx' "{{$petset_replicas_field}}" '0' + kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '0' # Command: Scale up - kubectl scale --current-replicas=0 --replicas=1 petset nginx "${kube_flags[@]}" + kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]}" # Post-condition: 1 replica, named nginx-0 - kube::test::get_object_assert 'petset nginx' "{{$petset_replicas_field}}" '1' + kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '1' # Typically we'd wait and confirm that N>1 replicas are up, but this framework # doesn't start the scheduler, so pet-0 will block all others. # TODO: test robust scaling in an e2e. @@ -2422,7 +2422,7 @@ __EOF__ ### Clean up kubectl delete -f hack/testdata/nginx-petset.yaml "${kube_flags[@]}" - # Post-condition: no pods from petset controller + # Post-condition: no pods from statefulset controller wait-for-pods-with-label "app=nginx-petset" "" diff --git a/hack/testdata/nginx-petset.yaml b/hack/testdata/nginx-petset.yaml index 30b4b6dca07..776f1d423cf 100644 --- a/hack/testdata/nginx-petset.yaml +++ b/hack/testdata/nginx-petset.yaml @@ -17,7 +17,7 @@ spec: app: nginx-petset --- apiVersion: apps/v1alpha1 -kind: PetSet +kind: StatefulSet metadata: name: nginx spec: diff --git a/pkg/api/defaulting_test.go b/pkg/api/defaulting_test.go index 6988b68a243..bd1cc096a55 100644 --- a/pkg/api/defaulting_test.go +++ b/pkg/api/defaulting_test.go @@ -77,8 +77,8 @@ func TestDefaulting(t *testing.T) { {Group: "", Version: "v1", Kind: "SecretList"}: {}, {Group: "", Version: "v1", Kind: "Service"}: {}, {Group: "", Version: "v1", Kind: "ServiceList"}: {}, - {Group: "apps", Version: "v1alpha1", Kind: "PetSet"}: {}, - {Group: "apps", Version: "v1alpha1", Kind: "PetSetList"}: {}, + {Group: "apps", Version: "v1alpha1", Kind: "StatefulSet"}: {}, + {Group: "apps", Version: "v1alpha1", Kind: "StatefulSetList"}: {}, {Group: "autoscaling", Version: "v1", Kind: "HorizontalPodAutoscaler"}: {}, {Group: "autoscaling", Version: "v1", Kind: "HorizontalPodAutoscalerList"}: {}, {Group: "batch", Version: "v1", Kind: "Job"}: {}, diff --git a/pkg/api/unversioned/group_version_test.go b/pkg/api/unversioned/group_version_test.go index f82d37a0059..510a8a460e7 100644 --- a/pkg/api/unversioned/group_version_test.go +++ b/pkg/api/unversioned/group_version_test.go @@ -175,7 +175,7 @@ func TestKindForGroupVersionKinds(t *testing.T) { ok: true, }, { - input: []GroupVersionKind{{Group: "apps", Version: "v1alpha1", Kind: "PetSet"}}, + input: []GroupVersionKind{{Group: "apps", Version: "v1alpha1", Kind: "StatefulSet"}}, target: GroupVersionKind{}, ok: false, }, diff --git a/pkg/apis/apps/register.go b/pkg/apis/apps/register.go index 9864ffabc04..41d929ad602 100644 --- a/pkg/apis/apps/register.go +++ b/pkg/apis/apps/register.go @@ -47,8 +47,8 @@ func Resource(resource string) unversioned.GroupResource { func addKnownTypes(scheme *runtime.Scheme) error { // TODO this will get cleaned up with the scheme types are fixed scheme.AddKnownTypes(SchemeGroupVersion, - &PetSet{}, - &PetSetList{}, + &StatefulSet{}, + &StatefulSetList{}, &api.ListOptions{}, &api.DeleteOptions{}, ) diff --git a/pkg/apis/apps/types.go b/pkg/apis/apps/types.go index 223d480cd2b..841cc5a16c0 100644 --- a/pkg/apis/apps/types.go +++ b/pkg/apis/apps/types.go @@ -23,30 +23,30 @@ import ( // +genclient=true -// PetSet represents a set of pods with consistent identities. +// StatefulSet represents a set of pods with consistent identities. // Identities are defined as: // - Network: A single stable DNS and hostname. // - Storage: As many VolumeClaims as requested. -// The PetSet guarantees that a given network identity will always -// map to the same storage identity. PetSet is currently in alpha and +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. StatefulSet is currently in alpha and // and subject to change without notice. -type PetSet struct { +type StatefulSet struct { unversioned.TypeMeta `json:",inline"` // +optional api.ObjectMeta `json:"metadata,omitempty"` - // Spec defines the desired identities of pets in this set. + // Spec defines the desired identities of pods in this set. // +optional - Spec PetSetSpec `json:"spec,omitempty"` + Spec StatefulSetSpec `json:"spec,omitempty"` - // Status is the current status of Pets in this PetSet. This data + // Status is the current status of Pods in this StatefulSet. This data // may be out of date by some window of time. // +optional - Status PetSetStatus `json:"status,omitempty"` + Status StatefulSetStatus `json:"status,omitempty"` } -// A PetSetSpec is the specification of a PetSet. -type PetSetSpec struct { +// A StatefulSetSpec is the specification of a StatefulSet. +type StatefulSetSpec struct { // Replicas is the desired number of replicas of the given Template. // These are replicas in the sense that they are instantiations of the // same Template, but individual replicas also have a consistent identity. @@ -62,14 +62,14 @@ type PetSetSpec struct { Selector *unversioned.LabelSelector `json:"selector,omitempty"` // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. Each pod stamped out by the PetSet + // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest - // of the PetSet. + // of the StatefulSet. Template api.PodTemplateSpec `json:"template"` - // VolumeClaimTemplates is a list of claims that pets are allowed to reference. - // The PetSet controller is responsible for mapping network identities to - // claims in a way that maintains the identity of a pet. Every claim in + // VolumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in // this list must have at least one matching (by name) volumeMount in one // container in the template. A claim in this list takes precedence over // any volumes in the template, with the same name. @@ -77,16 +77,16 @@ type PetSetSpec struct { // +optional VolumeClaimTemplates []api.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"` - // ServiceName is the name of the service that governs this PetSet. - // This service must exist before the PetSet, and is responsible for - // the network identity of the set. Pets get DNS/hostnames that follow the - // pattern: pet-specific-string.serviceName.default.svc.cluster.local - // where "pet-specific-string" is managed by the PetSet controller. + // ServiceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. ServiceName string `json:"serviceName"` } -// PetSetStatus represents the current state of a PetSet. -type PetSetStatus struct { +// StatefulSetStatus represents the current state of a StatefulSet. +type StatefulSetStatus struct { // most recent generation observed by this autoscaler. // +optional ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -95,10 +95,10 @@ type PetSetStatus struct { Replicas int32 `json:"replicas"` } -// PetSetList is a collection of PetSets. -type PetSetList struct { +// StatefulSetList is a collection of StatefulSets. +type StatefulSetList struct { unversioned.TypeMeta `json:",inline"` // +optional unversioned.ListMeta `json:"metadata,omitempty"` - Items []PetSet `json:"items"` + Items []StatefulSet `json:"items"` } diff --git a/pkg/apis/apps/v1alpha1/conversion.go b/pkg/apis/apps/v1alpha1/conversion.go index 54a70a8b154..b8248147a6d 100644 --- a/pkg/apis/apps/v1alpha1/conversion.go +++ b/pkg/apis/apps/v1alpha1/conversion.go @@ -33,14 +33,14 @@ func addConversionFuncs(scheme *runtime.Scheme) error { // it, but a plain int32 is more convenient in the internal type. These // functions are the same as the autogenerated ones in every other way. err := scheme.AddConversionFuncs( - Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec, - Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec, + Convert_v1alpha1_StatefulSetSpec_To_apps_StatefulSetSpec, + Convert_apps_StatefulSetSpec_To_v1alpha1_StatefulSetSpec, ) if err != nil { return err } - return api.Scheme.AddFieldLabelConversionFunc("apps/v1alpha1", "PetSet", + return api.Scheme.AddFieldLabelConversionFunc("apps/v1alpha1", "StatefulSet", func(label, value string) (string, string, error) { switch label { case "metadata.name", "metadata.namespace", "status.successful": @@ -52,7 +52,7 @@ func addConversionFuncs(scheme *runtime.Scheme) error { ) } -func Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec(in *PetSetSpec, out *apps.PetSetSpec, s conversion.Scope) error { +func Convert_v1alpha1_StatefulSetSpec_To_apps_StatefulSetSpec(in *StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas } @@ -83,7 +83,7 @@ func Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec(in *PetSetSpec, out *apps.Pe return nil } -func Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec(in *apps.PetSetSpec, out *PetSetSpec, s conversion.Scope) error { +func Convert_apps_StatefulSetSpec_To_v1alpha1_StatefulSetSpec(in *apps.StatefulSetSpec, out *StatefulSetSpec, s conversion.Scope) error { out.Replicas = new(int32) *out.Replicas = in.Replicas if in.Selector != nil { diff --git a/pkg/apis/apps/v1alpha1/defaults.go b/pkg/apis/apps/v1alpha1/defaults.go index 7658c644327..ec956f6ee9d 100644 --- a/pkg/apis/apps/v1alpha1/defaults.go +++ b/pkg/apis/apps/v1alpha1/defaults.go @@ -24,11 +24,11 @@ import ( func addDefaultingFuncs(scheme *runtime.Scheme) error { RegisterDefaults(scheme) return scheme.AddDefaultingFuncs( - SetDefaults_PetSet, + SetDefaults_StatefulSet, ) } -func SetDefaults_PetSet(obj *PetSet) { +func SetDefaults_StatefulSet(obj *StatefulSet) { labels := obj.Spec.Template.Labels if labels != nil { if obj.Spec.Selector == nil { diff --git a/pkg/apis/apps/v1alpha1/register.go b/pkg/apis/apps/v1alpha1/register.go index 90f9bd1c24b..33d443f998e 100644 --- a/pkg/apis/apps/v1alpha1/register.go +++ b/pkg/apis/apps/v1alpha1/register.go @@ -37,8 +37,8 @@ var ( // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &PetSet{}, - &PetSetList{}, + &StatefulSet{}, + &StatefulSetList{}, &v1.ListOptions{}, &v1.DeleteOptions{}, ) @@ -46,5 +46,5 @@ func addKnownTypes(scheme *runtime.Scheme) error { return nil } -func (obj *PetSet) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *PetSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *StatefulSet) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *StatefulSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/pkg/apis/apps/v1alpha1/types.go b/pkg/apis/apps/v1alpha1/types.go index d4bd62ce75a..7e8a56be2fe 100644 --- a/pkg/apis/apps/v1alpha1/types.go +++ b/pkg/apis/apps/v1alpha1/types.go @@ -23,30 +23,30 @@ import ( // +genclient=true -// PetSet represents a set of pods with consistent identities. +// StatefulSet represents a set of pods with consistent identities. // Identities are defined as: // - Network: A single stable DNS and hostname. // - Storage: As many VolumeClaims as requested. -// The PetSet guarantees that a given network identity will always -// map to the same storage identity. PetSet is currently in alpha +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. StatefulSet is currently in alpha // and subject to change without notice. -type PetSet struct { +type StatefulSet struct { unversioned.TypeMeta `json:",inline"` // +optional v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec defines the desired identities of pets in this set. + // Spec defines the desired identities of pods in this set. // +optional - Spec PetSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current status of Pets in this PetSet. This data + // Status is the current status of Pods in this StatefulSet. This data // may be out of date by some window of time. // +optional - Status PetSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` + Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } -// A PetSetSpec is the specification of a PetSet. -type PetSetSpec struct { +// A StatefulSetSpec is the specification of a StatefulSet. +type StatefulSetSpec struct { // Replicas is the desired number of replicas of the given Template. // These are replicas in the sense that they are instantiations of the // same Template, but individual replicas also have a consistent identity. @@ -62,14 +62,14 @@ type PetSetSpec struct { Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. Each pod stamped out by the PetSet + // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest - // of the PetSet. + // of the StatefulSet. Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` - // VolumeClaimTemplates is a list of claims that pets are allowed to reference. - // The PetSet controller is responsible for mapping network identities to - // claims in a way that maintains the identity of a pet. Every claim in + // VolumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in // this list must have at least one matching (by name) volumeMount in one // container in the template. A claim in this list takes precedence over // any volumes in the template, with the same name. @@ -77,16 +77,16 @@ type PetSetSpec struct { // +optional VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` - // ServiceName is the name of the service that governs this PetSet. - // This service must exist before the PetSet, and is responsible for - // the network identity of the set. Pets get DNS/hostnames that follow the - // pattern: pet-specific-string.serviceName.default.svc.cluster.local - // where "pet-specific-string" is managed by the PetSet controller. + // ServiceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` } -// PetSetStatus represents the current state of a PetSet. -type PetSetStatus struct { +// StatefulSetStatus represents the current state of a StatefulSet. +type StatefulSetStatus struct { // most recent generation observed by this autoscaler. // +optional ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` @@ -95,10 +95,10 @@ type PetSetStatus struct { Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` } -// PetSetList is a collection of PetSets. -type PetSetList struct { +// StatefulSetList is a collection of StatefulSets. +type StatefulSetList struct { unversioned.TypeMeta `json:",inline"` // +optional unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - Items []PetSet `json:"items" protobuf:"bytes,2,rep,name=items"` + Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/pkg/apis/apps/validation/validation.go b/pkg/apis/apps/validation/validation.go index 67203dbe46b..cea8e2a3ad1 100644 --- a/pkg/apis/apps/validation/validation.go +++ b/pkg/apis/apps/validation/validation.go @@ -28,24 +28,24 @@ import ( "k8s.io/kubernetes/pkg/util/validation/field" ) -// ValidatePetSetName can be used to check whether the given PetSet name is valid. +// ValidateStatefulSetName can be used to check whether the given StatefulSet name is valid. // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. -func ValidatePetSetName(name string, prefix bool) []string { - // TODO: Validate that there's name for the suffix inserted by the pets. +func ValidateStatefulSetName(name string, prefix bool) []string { + // TODO: Validate that there's name for the suffix inserted by the pods. // Currently this is just "-index". In the future we may allow a user // specified list of suffixes and we need to validate the longest one. return apivalidation.NameIsDNSSubdomain(name, prefix) } // Validates the given template and ensures that it is in accordance with the desired selector. -func ValidatePodTemplateSpecForPetSet(template *api.PodTemplateSpec, selector labels.Selector, fldPath *field.Path) field.ErrorList { +func ValidatePodTemplateSpecForStatefulSet(template *api.PodTemplateSpec, selector labels.Selector, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if template == nil { allErrs = append(allErrs, field.Required(fldPath, "")) } else { if !selector.Empty() { - // Verify that the PetSet selector matches the labels in template. + // Verify that the StatefulSet selector matches the labels in template. labels := labels.Set(template.Labels) if !selector.Matches(labels) { allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`")) @@ -62,8 +62,8 @@ func ValidatePodTemplateSpecForPetSet(template *api.PodTemplateSpec, selector la return allErrs } -// ValidatePetSetSpec tests if required fields in the PetSet spec are set. -func ValidatePetSetSpec(spec *apps.PetSetSpec, fldPath *field.Path) field.ErrorList { +// ValidateStatefulSetSpec tests if required fields in the StatefulSet spec are set. +func ValidateStatefulSetSpec(spec *apps.StatefulSetSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) @@ -72,7 +72,7 @@ func ValidatePetSetSpec(spec *apps.PetSetSpec, fldPath *field.Path) field.ErrorL } else { allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for petset.")) + allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for statefulset.")) } } @@ -80,7 +80,7 @@ func ValidatePetSetSpec(spec *apps.PetSetSpec, fldPath *field.Path) field.ErrorL if err != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "")) } else { - allErrs = append(allErrs, ValidatePodTemplateSpecForPetSet(&spec.Template, selector, fldPath.Child("template"))...) + allErrs = append(allErrs, ValidatePodTemplateSpecForStatefulSet(&spec.Template, selector, fldPath.Child("template"))...) } if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways { @@ -90,42 +90,42 @@ func ValidatePetSetSpec(spec *apps.PetSetSpec, fldPath *field.Path) field.ErrorL return allErrs } -// ValidatePetSet validates a PetSet. -func ValidatePetSet(petSet *apps.PetSet) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&petSet.ObjectMeta, true, ValidatePetSetName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidatePetSetSpec(&petSet.Spec, field.NewPath("spec"))...) +// ValidateStatefulSet validates a StatefulSet. +func ValidateStatefulSet(statefulSet *apps.StatefulSet) field.ErrorList { + allErrs := apivalidation.ValidateObjectMeta(&statefulSet.ObjectMeta, true, ValidateStatefulSetName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateStatefulSetSpec(&statefulSet.Spec, field.NewPath("spec"))...) return allErrs } -// ValidatePetSetUpdate tests if required fields in the PetSet are set. -func ValidatePetSetUpdate(petSet, oldPetSet *apps.PetSet) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&petSet.ObjectMeta, &oldPetSet.ObjectMeta, field.NewPath("metadata")) +// ValidateStatefulSetUpdate tests if required fields in the StatefulSet are set. +func ValidateStatefulSetUpdate(statefulSet, oldStatefulSet *apps.StatefulSet) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&statefulSet.ObjectMeta, &oldStatefulSet.ObjectMeta, field.NewPath("metadata")) // TODO: For now we're taking the safe route and disallowing all updates to // spec except for Replicas, for scaling, and Template.Spec.containers.image // for rolling-update. Enable others on a case by case basis. - restoreReplicas := petSet.Spec.Replicas - petSet.Spec.Replicas = oldPetSet.Spec.Replicas + restoreReplicas := statefulSet.Spec.Replicas + statefulSet.Spec.Replicas = oldStatefulSet.Spec.Replicas - restoreContainers := petSet.Spec.Template.Spec.Containers - petSet.Spec.Template.Spec.Containers = oldPetSet.Spec.Template.Spec.Containers + restoreContainers := statefulSet.Spec.Template.Spec.Containers + statefulSet.Spec.Template.Spec.Containers = oldStatefulSet.Spec.Template.Spec.Containers - if !reflect.DeepEqual(petSet.Spec, oldPetSet.Spec) { - allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to petset spec for fields other than 'replicas' are forbidden.")) + if !reflect.DeepEqual(statefulSet.Spec, oldStatefulSet.Spec) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to statefulset spec for fields other than 'replicas' are forbidden.")) } - petSet.Spec.Replicas = restoreReplicas - petSet.Spec.Template.Spec.Containers = restoreContainers + statefulSet.Spec.Replicas = restoreReplicas + statefulSet.Spec.Template.Spec.Containers = restoreContainers - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(petSet.Spec.Replicas), field.NewPath("spec", "replicas"))...) - containerErrs, _ := apivalidation.ValidateContainerUpdates(petSet.Spec.Template.Spec.Containers, oldPetSet.Spec.Template.Spec.Containers, field.NewPath("spec").Child("template").Child("containers")) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(statefulSet.Spec.Replicas), field.NewPath("spec", "replicas"))...) + containerErrs, _ := apivalidation.ValidateContainerUpdates(statefulSet.Spec.Template.Spec.Containers, oldStatefulSet.Spec.Template.Spec.Containers, field.NewPath("spec").Child("template").Child("containers")) allErrs = append(allErrs, containerErrs...) return allErrs } -// ValidatePetSetStatusUpdate tests if required fields in the PetSet are set. -func ValidatePetSetStatusUpdate(petSet, oldPetSet *apps.PetSet) field.ErrorList { +// ValidateStatefulSetStatusUpdate tests if required fields in the StatefulSet are set. +func ValidateStatefulSetStatusUpdate(statefulSet, oldStatefulSet *apps.StatefulSet) field.ErrorList { allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&petSet.ObjectMeta, &oldPetSet.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&statefulSet.ObjectMeta, &oldStatefulSet.ObjectMeta, field.NewPath("metadata"))...) // TODO: Validate status. return allErrs } diff --git a/pkg/apis/apps/validation/validation_test.go b/pkg/apis/apps/validation/validation_test.go index 0a0e6f9e818..2fcd5ac4608 100644 --- a/pkg/apis/apps/validation/validation_test.go +++ b/pkg/apis/apps/validation/validation_test.go @@ -25,7 +25,7 @@ import ( "k8s.io/kubernetes/pkg/apis/apps" ) -func TestValidatePetSet(t *testing.T) { +func TestValidateStatefulSet(t *testing.T) { validLabels := map[string]string{"a": "b"} validPodTemplate := api.PodTemplate{ Template: api.PodTemplateSpec{ @@ -51,65 +51,65 @@ func TestValidatePetSet(t *testing.T) { }, }, } - successCases := []apps.PetSet{ + successCases := []apps.StatefulSet{ { ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, { ObjectMeta: api.ObjectMeta{Name: "abc-123", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, } for _, successCase := range successCases { - if errs := ValidatePetSet(&successCase); len(errs) != 0 { + if errs := ValidateStatefulSet(&successCase); len(errs) != 0 { t.Errorf("expected success: %v", errs) } } - errorCases := map[string]apps.PetSet{ + errorCases := map[string]apps.StatefulSet{ "zero-length ID": { ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, "missing-namespace": { ObjectMeta: api.ObjectMeta{Name: "abc-123"}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, "empty selector": { ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Template: validPodTemplate.Template, }, }, "selector_doesnt_match": { ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, Template: validPodTemplate.Template, }, }, "invalid manifest": { ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, }, }, "negative_replicas": { ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Replicas: -1, Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, }, @@ -122,7 +122,7 @@ func TestValidatePetSet(t *testing.T) { "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, }, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, @@ -135,7 +135,7 @@ func TestValidatePetSet(t *testing.T) { "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, }, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Template: invalidPodTemplate.Template, }, }, @@ -147,7 +147,7 @@ func TestValidatePetSet(t *testing.T) { "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, }, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, @@ -157,7 +157,7 @@ func TestValidatePetSet(t *testing.T) { Name: "abc-123", Namespace: api.NamespaceDefault, }, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: api.PodTemplateSpec{ Spec: api.PodSpec{ @@ -176,7 +176,7 @@ func TestValidatePetSet(t *testing.T) { Name: "abc-123", Namespace: api.NamespaceDefault, }, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: api.PodTemplateSpec{ Spec: api.PodSpec{ @@ -192,7 +192,7 @@ func TestValidatePetSet(t *testing.T) { }, } for k, v := range errorCases { - errs := ValidatePetSet(&v) + errs := ValidateStatefulSet(&v) if len(errs) == 0 { t.Errorf("expected failure for %s", k) } @@ -215,7 +215,7 @@ func TestValidatePetSet(t *testing.T) { } } -func TestValidatePetSetUpdate(t *testing.T) { +func TestValidateStatefulSetUpdate(t *testing.T) { validLabels := map[string]string{"a": "b"} validPodTemplate := api.PodTemplate{ Template: api.PodTemplateSpec{ @@ -255,21 +255,21 @@ func TestValidatePetSetUpdate(t *testing.T) { }, } type psUpdateTest struct { - old apps.PetSet - update apps.PetSet + old apps.StatefulSet + update apps.StatefulSet } successCases := []psUpdateTest{ { - old: apps.PetSet{ + old: apps.StatefulSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, - update: apps.PetSet{ + update: apps.StatefulSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Replicas: 3, Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, @@ -280,22 +280,22 @@ func TestValidatePetSetUpdate(t *testing.T) { for _, successCase := range successCases { successCase.old.ObjectMeta.ResourceVersion = "1" successCase.update.ObjectMeta.ResourceVersion = "1" - if errs := ValidatePetSetUpdate(&successCase.update, &successCase.old); len(errs) != 0 { + if errs := ValidateStatefulSetUpdate(&successCase.update, &successCase.old); len(errs) != 0 { t.Errorf("expected success: %v", errs) } } errorCases := map[string]psUpdateTest{ "more than one read/write": { - old: apps.PetSet{ + old: apps.StatefulSet{ ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, - update: apps.PetSet{ + update: apps.StatefulSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Replicas: 2, Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: readWriteVolumePodTemplate.Template, @@ -303,16 +303,16 @@ func TestValidatePetSetUpdate(t *testing.T) { }, }, "updates to a field other than spec.Replicas": { - old: apps.PetSet{ + old: apps.StatefulSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, - update: apps.PetSet{ + update: apps.StatefulSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Replicas: 1, Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: readWriteVolumePodTemplate.Template, @@ -320,16 +320,16 @@ func TestValidatePetSetUpdate(t *testing.T) { }, }, "invalid selector": { - old: apps.PetSet{ + old: apps.StatefulSet{ ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, - update: apps.PetSet{ + update: apps.StatefulSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Replicas: 2, Selector: &unversioned.LabelSelector{MatchLabels: invalidLabels}, Template: validPodTemplate.Template, @@ -337,16 +337,16 @@ func TestValidatePetSetUpdate(t *testing.T) { }, }, "invalid pod": { - old: apps.PetSet{ + old: apps.StatefulSet{ ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, - update: apps.PetSet{ + update: apps.StatefulSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Replicas: 2, Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: invalidPodTemplate.Template, @@ -354,16 +354,16 @@ func TestValidatePetSetUpdate(t *testing.T) { }, }, "negative replicas": { - old: apps.PetSet{ + old: apps.StatefulSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, }, }, - update: apps.PetSet{ + update: apps.StatefulSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Replicas: -1, Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, Template: validPodTemplate.Template, @@ -372,7 +372,7 @@ func TestValidatePetSetUpdate(t *testing.T) { }, } for testName, errorCase := range errorCases { - if errs := ValidatePetSetUpdate(&errorCase.update, &errorCase.old); len(errs) == 0 { + if errs := ValidateStatefulSetUpdate(&errorCase.update, &errorCase.old); len(errs) == 0 { t.Errorf("expected failure: %s", testName) } } diff --git a/pkg/client/cache/listers.go b/pkg/client/cache/listers.go index 82c61e5fb05..7bc5706e3a5 100644 --- a/pkg/client/cache/listers.go +++ b/pkg/client/cache/listers.go @@ -364,13 +364,13 @@ func (s *StoreToPVFetcher) GetPersistentVolumeInfo(id string) (*api.PersistentVo return o.(*api.PersistentVolume), nil } -// StoreToPetSetLister gives a store List and Exists methods. The store must contain only PetSets. -type StoreToPetSetLister struct { +// StoreToStatefulSetLister gives a store List and Exists methods. The store must contain only StatefulSets. +type StoreToStatefulSetLister struct { Store } -// Exists checks if the given PetSet exists in the store. -func (s *StoreToPetSetLister) Exists(ps *apps.PetSet) (bool, error) { +// Exists checks if the given StatefulSet exists in the store. +func (s *StoreToStatefulSetLister) Exists(ps *apps.StatefulSet) (bool, error) { _, exists, err := s.Store.Get(ps) if err != nil { return false, err @@ -378,35 +378,35 @@ func (s *StoreToPetSetLister) Exists(ps *apps.PetSet) (bool, error) { return exists, nil } -// List lists all PetSets in the store. -func (s *StoreToPetSetLister) List() (psList []apps.PetSet, err error) { +// List lists all StatefulSets in the store. +func (s *StoreToStatefulSetLister) List() (psList []apps.StatefulSet, err error) { for _, ps := range s.Store.List() { - psList = append(psList, *(ps.(*apps.PetSet))) + psList = append(psList, *(ps.(*apps.StatefulSet))) } return psList, nil } -type storePetSetsNamespacer struct { +type storeStatefulSetsNamespacer struct { store Store namespace string } -func (s *StoreToPetSetLister) PetSets(namespace string) storePetSetsNamespacer { - return storePetSetsNamespacer{s.Store, namespace} +func (s *StoreToStatefulSetLister) StatefulSets(namespace string) storeStatefulSetsNamespacer { + return storeStatefulSetsNamespacer{s.Store, namespace} } -// GetPodPetSets returns a list of PetSets managing a pod. Returns an error only if no matching PetSets are found. -func (s *StoreToPetSetLister) GetPodPetSets(pod *api.Pod) (psList []apps.PetSet, err error) { +// GetPodStatefulSets returns a list of StatefulSets managing a pod. Returns an error only if no matching StatefulSets are found. +func (s *StoreToStatefulSetLister) GetPodStatefulSets(pod *api.Pod) (psList []apps.StatefulSet, err error) { var selector labels.Selector - var ps apps.PetSet + var ps apps.StatefulSet if len(pod.Labels) == 0 { - err = fmt.Errorf("no PetSets found for pod %v because it has no labels", pod.Name) + err = fmt.Errorf("no StatefulSets found for pod %v because it has no labels", pod.Name) return } for _, m := range s.Store.List() { - ps = *m.(*apps.PetSet) + ps = *m.(*apps.StatefulSet) if ps.Namespace != pod.Namespace { continue } @@ -416,14 +416,14 @@ func (s *StoreToPetSetLister) GetPodPetSets(pod *api.Pod) (psList []apps.PetSet, return } - // If a PetSet with a nil or empty selector creeps in, it should match nothing, not everything. + // If a StatefulSet with a nil or empty selector creeps in, it should match nothing, not everything. if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { continue } psList = append(psList, ps) } if len(psList) == 0 { - err = fmt.Errorf("could not find PetSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + err = fmt.Errorf("could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) } return } diff --git a/pkg/client/unversioned/apps.go b/pkg/client/unversioned/apps.go new file mode 100644 index 00000000000..ee51a63cc48 --- /dev/null +++ b/pkg/client/unversioned/apps.go @@ -0,0 +1,55 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/client/restclient" +) + +type AppsInterface interface { + StatefulSetNamespacer +} + +// AppsClient is used to interact with Kubernetes batch features. +type AppsClient struct { + *restclient.RESTClient +} + +func (c *AppsClient) StatefulSets(namespace string) StatefulSetInterface { + return newStatefulSet(c, namespace) +} + +func NewApps(c *restclient.Config) (*AppsClient, error) { + config := *c + if err := setGroupDefaults(apps.GroupName, &config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AppsClient{client}, nil +} + +func NewAppsOrDie(c *restclient.Config) *AppsClient { + client, err := NewApps(c) + if err != nil { + panic(err) + } + return client +} diff --git a/pkg/client/unversioned/conditions.go b/pkg/client/unversioned/conditions.go index 6bc87ca05c0..7eb4048148e 100644 --- a/pkg/client/unversioned/conditions.go +++ b/pkg/client/unversioned/conditions.go @@ -77,11 +77,11 @@ func ReplicaSetHasDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, r } } -// PetSetHasDesiredPets returns a conditon that checks the number of petset replicas -func PetSetHasDesiredPets(psClient appsclient.PetSetsGetter, petset *apps.PetSet) wait.ConditionFunc { +// StatefulSetHasDesiredPets returns a conditon that checks the number of petset replicas +func StatefulSetHasDesiredPets(psClient appsclient.StatefulSetsGetter, petset *apps.StatefulSet) wait.ConditionFunc { // TODO: Differentiate between 0 pets and a really quick scale down using generation. return func() (bool, error) { - ps, err := psClient.PetSets(petset.Namespace).Get(petset.Name) + ps, err := psClient.StatefulSets(petset.Namespace).Get(petset.Name) if err != nil { return false, err } diff --git a/pkg/client/unversioned/pet_sets.go b/pkg/client/unversioned/pet_sets.go new file mode 100644 index 00000000000..5cd225625ed --- /dev/null +++ b/pkg/client/unversioned/pet_sets.go @@ -0,0 +1,100 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/watch" +) + +// StatefulSetNamespacer has methods to work with StatefulSet resources in a namespace +type StatefulSetNamespacer interface { + StatefulSets(namespace string) StatefulSetInterface +} + +// StatefulSetInterface exposes methods to work on StatefulSet resources. +type StatefulSetInterface interface { + List(opts api.ListOptions) (*apps.StatefulSetList, error) + Get(name string) (*apps.StatefulSet, error) + Create(statefulSet *apps.StatefulSet) (*apps.StatefulSet, error) + Update(statefulSet *apps.StatefulSet) (*apps.StatefulSet, error) + Delete(name string, options *api.DeleteOptions) error + Watch(opts api.ListOptions) (watch.Interface, error) + UpdateStatus(statefulSet *apps.StatefulSet) (*apps.StatefulSet, error) +} + +// statefulSet implements StatefulSetNamespacer interface +type statefulSet struct { + r *AppsClient + ns string +} + +// newStatefulSet returns a statefulSet +func newStatefulSet(c *AppsClient, namespace string) *statefulSet { + return &statefulSet{c, namespace} +} + +// List returns a list of statefulSet that match the label and field selectors. +func (c *statefulSet) List(opts api.ListOptions) (result *apps.StatefulSetList, err error) { + result = &apps.StatefulSetList{} + err = c.r.Get().Namespace(c.ns).Resource("statefulsets").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get returns information about a particular statefulSet. +func (c *statefulSet) Get(name string) (result *apps.StatefulSet, err error) { + result = &apps.StatefulSet{} + err = c.r.Get().Namespace(c.ns).Resource("statefulsets").Name(name).Do().Into(result) + return +} + +// Create creates a new statefulSet. +func (c *statefulSet) Create(statefulSet *apps.StatefulSet) (result *apps.StatefulSet, err error) { + result = &apps.StatefulSet{} + err = c.r.Post().Namespace(c.ns).Resource("statefulsets").Body(statefulSet).Do().Into(result) + return +} + +// Update updates an existing statefulSet. +func (c *statefulSet) Update(statefulSet *apps.StatefulSet) (result *apps.StatefulSet, err error) { + result = &apps.StatefulSet{} + err = c.r.Put().Namespace(c.ns).Resource("statefulsets").Name(statefulSet.Name).Body(statefulSet).Do().Into(result) + return +} + +// Delete deletes a statefulSet, returns error if one occurs. +func (c *statefulSet) Delete(name string, options *api.DeleteOptions) (err error) { + return c.r.Delete().Namespace(c.ns).Resource("statefulsets").Name(name).Body(options).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested statefulSet. +func (c *statefulSet) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// UpdateStatus takes the name of the statefulSet and the new status. Returns the server's representation of the statefulSet, and an error, if it occurs. +func (c *statefulSet) UpdateStatus(statefulSet *apps.StatefulSet) (result *apps.StatefulSet, err error) { + result = &apps.StatefulSet{} + err = c.r.Put().Namespace(c.ns).Resource("statefulsets").Name(statefulSet.Name).SubResource("status").Body(statefulSet).Do().Into(result) + return +} diff --git a/pkg/client/unversioned/pet_sets_test.go b/pkg/client/unversioned/pet_sets_test.go new file mode 100644 index 00000000000..6a03f3a75ee --- /dev/null +++ b/pkg/client/unversioned/pet_sets_test.go @@ -0,0 +1,165 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +func getStatefulSetResourceName() string { + return "statefulsets" +} + +func TestListStatefulSets(t *testing.T) { + ns := api.NamespaceAll + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Apps.ResourcePath(getStatefulSetResourceName(), ns, ""), + }, + Response: simple.Response{StatusCode: 200, + Body: &apps.StatefulSetList{ + Items: []apps.StatefulSet{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: apps.StatefulSetSpec{ + Replicas: 2, + Template: api.PodTemplateSpec{}, + }, + }, + }, + }, + }, + } + receivedRSList, err := c.Setup(t).Apps().StatefulSets(ns).List(api.ListOptions{}) + c.Validate(t, receivedRSList, err) +} + +func TestGetStatefulSet(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "GET", Path: testapi.Apps.ResourcePath(getStatefulSetResourceName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &apps.StatefulSet{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: apps.StatefulSetSpec{ + Replicas: 2, + Template: api.PodTemplateSpec{}, + }, + }, + }, + } + receivedRS, err := c.Setup(t).Apps().StatefulSets(ns).Get("foo") + c.Validate(t, receivedRS, err) +} + +func TestGetStatefulSetWithNoName(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{Error: true} + receivedPod, err := c.Setup(t).Apps().StatefulSets(ns).Get("") + if (err != nil) && (err.Error() != simple.NameRequiredError) { + t.Errorf("Expected error: %v, but got %v", simple.NameRequiredError, err) + } + + c.Validate(t, receivedPod, err) +} + +func TestUpdateStatefulSet(t *testing.T) { + ns := api.NamespaceDefault + requestRS := &apps.StatefulSet{ + ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Apps.ResourcePath(getStatefulSetResourceName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &apps.StatefulSet{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: apps.StatefulSetSpec{ + Replicas: 2, + Template: api.PodTemplateSpec{}, + }, + }, + }, + } + receivedRS, err := c.Setup(t).Apps().StatefulSets(ns).Update(requestRS) + c.Validate(t, receivedRS, err) +} + +func TestDeleteStatefulSet(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "DELETE", Path: testapi.Apps.ResourcePath(getStatefulSetResourceName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).Apps().StatefulSets(ns).Delete("foo", nil) + c.Validate(t, nil, err) +} + +func TestCreateStatefulSet(t *testing.T) { + ns := api.NamespaceDefault + requestRS := &apps.StatefulSet{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + } + c := &simple.Client{ + Request: simple.Request{Method: "POST", Path: testapi.Apps.ResourcePath(getStatefulSetResourceName(), ns, ""), Body: requestRS, Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &apps.StatefulSet{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: apps.StatefulSetSpec{ + Replicas: 2, + Template: api.PodTemplateSpec{}, + }, + }, + }, + } + receivedRS, err := c.Setup(t).Apps().StatefulSets(ns).Create(requestRS) + c.Validate(t, receivedRS, err) +} + +// TODO: Test Status actions. diff --git a/pkg/client/unversioned/testclient/fake_petsets.go b/pkg/client/unversioned/testclient/fake_petsets.go new file mode 100644 index 00000000000..f11eaa84b53 --- /dev/null +++ b/pkg/client/unversioned/testclient/fake_petsets.go @@ -0,0 +1,83 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testclient + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/watch" +) + +// FakeStatefulSets implements StatefulSetsInterface. Meant to be embedded into a struct to get a default +// implementation. This makes faking out just the method you want to test easier. +type FakeStatefulSets struct { + Fake *FakeApps + Namespace string +} + +func (c *FakeStatefulSets) Get(name string) (*apps.StatefulSet, error) { + obj, err := c.Fake.Invokes(NewGetAction("statefulsets", c.Namespace, name), &apps.StatefulSet{}) + if obj == nil { + return nil, err + } + + return obj.(*apps.StatefulSet), err +} + +func (c *FakeStatefulSets) List(opts api.ListOptions) (*apps.StatefulSetList, error) { + obj, err := c.Fake.Invokes(NewListAction("statefulsets", c.Namespace, opts), &apps.StatefulSetList{}) + if obj == nil { + return nil, err + } + return obj.(*apps.StatefulSetList), err +} + +func (c *FakeStatefulSets) Create(rs *apps.StatefulSet) (*apps.StatefulSet, error) { + obj, err := c.Fake.Invokes(NewCreateAction("statefulsets", c.Namespace, rs), rs) + if obj == nil { + return nil, err + } + + return obj.(*apps.StatefulSet), err +} + +func (c *FakeStatefulSets) Update(rs *apps.StatefulSet) (*apps.StatefulSet, error) { + obj, err := c.Fake.Invokes(NewUpdateAction("statefulsets", c.Namespace, rs), rs) + if obj == nil { + return nil, err + } + + return obj.(*apps.StatefulSet), err +} + +func (c *FakeStatefulSets) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake.Invokes(NewDeleteAction("statefulsets", c.Namespace, name), &apps.StatefulSet{}) + return err +} + +func (c *FakeStatefulSets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake.InvokesWatch(NewWatchAction("statefulsets", c.Namespace, opts)) +} + +func (c *FakeStatefulSets) UpdateStatus(rs *apps.StatefulSet) (result *apps.StatefulSet, err error) { + obj, err := c.Fake.Invokes(NewUpdateSubresourceAction("statefulsets", "status", c.Namespace, rs), rs) + if obj == nil { + return nil, err + } + + return obj.(*apps.StatefulSet), err +} diff --git a/pkg/client/unversioned/testclient/testclient.go b/pkg/client/unversioned/testclient/testclient.go new file mode 100644 index 00000000000..32fd131e6f0 --- /dev/null +++ b/pkg/client/unversioned/testclient/testclient.go @@ -0,0 +1,533 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testclient + +import ( + "fmt" + "sync" + + "github.com/emicklei/go-restful/swagger" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/typed/discovery" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/version" + "k8s.io/kubernetes/pkg/watch" +) + +// NewSimpleFake returns a client that will respond with the provided objects +func NewSimpleFake(objects ...runtime.Object) *Fake { + o := NewObjects(api.Scheme, api.Codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + fakeClient := &Fake{} + fakeClient.AddReactor("*", "*", ObjectReaction(o, registered.RESTMapper())) + + fakeClient.AddWatchReactor("*", DefaultWatchReactor(watch.NewFake(), nil)) + + return fakeClient +} + +// Fake implements client.Interface. Meant to be embedded into a struct to get a default +// implementation. This makes faking out just the method you want to test easier. +type Fake struct { + sync.RWMutex + actions []Action // these may be castable to other types, but "Action" is the minimum + + // ReactionChain is the list of reactors that will be attempted for every request in the order they are tried + ReactionChain []Reactor + // WatchReactionChain is the list of watch reactors that will be attempted for every request in the order they are tried + WatchReactionChain []WatchReactor + // ProxyReactionChain is the list of proxy reactors that will be attempted for every request in the order they are tried + ProxyReactionChain []ProxyReactor + + Resources map[string]*unversioned.APIResourceList +} + +// Reactor is an interface to allow the composition of reaction functions. +type Reactor interface { + // Handles indicates whether or not this Reactor deals with a given action + Handles(action Action) bool + // React handles the action and returns results. It may choose to delegate by indicated handled=false + React(action Action) (handled bool, ret runtime.Object, err error) +} + +// WatchReactor is an interface to allow the composition of watch functions. +type WatchReactor interface { + // Handles indicates whether or not this Reactor deals with a given action + Handles(action Action) bool + // React handles a watch action and returns results. It may choose to delegate by indicated handled=false + React(action Action) (handled bool, ret watch.Interface, err error) +} + +// ProxyReactor is an interface to allow the composition of proxy get functions. +type ProxyReactor interface { + // Handles indicates whether or not this Reactor deals with a given action + Handles(action Action) bool + // React handles a watch action and returns results. It may choose to delegate by indicated handled=false + React(action Action) (handled bool, ret restclient.ResponseWrapper, err error) +} + +// ReactionFunc is a function that returns an object or error for a given Action. If "handled" is false, +// then the test client will continue ignore the results and continue to the next ReactionFunc +type ReactionFunc func(action Action) (handled bool, ret runtime.Object, err error) + +// WatchReactionFunc is a function that returns a watch interface. If "handled" is false, +// then the test client will continue ignore the results and continue to the next ReactionFunc +type WatchReactionFunc func(action Action) (handled bool, ret watch.Interface, err error) + +// ProxyReactionFunc is a function that returns a ResponseWrapper interface for a given Action. If "handled" is false, +// then the test client will continue ignore the results and continue to the next ProxyReactionFunc +type ProxyReactionFunc func(action Action) (handled bool, ret restclient.ResponseWrapper, err error) + +// AddReactor appends a reactor to the end of the chain +func (c *Fake) AddReactor(verb, resource string, reaction ReactionFunc) { + c.ReactionChain = append(c.ReactionChain, &SimpleReactor{verb, resource, reaction}) +} + +// PrependReactor adds a reactor to the beginning of the chain +func (c *Fake) PrependReactor(verb, resource string, reaction ReactionFunc) { + c.ReactionChain = append([]Reactor{&SimpleReactor{verb, resource, reaction}}, c.ReactionChain...) +} + +// AddWatchReactor appends a reactor to the end of the chain +func (c *Fake) AddWatchReactor(resource string, reaction WatchReactionFunc) { + c.WatchReactionChain = append(c.WatchReactionChain, &SimpleWatchReactor{resource, reaction}) +} + +// PrependWatchReactor adds a reactor to the beginning of the chain +func (c *Fake) PrependWatchReactor(resource string, reaction WatchReactionFunc) { + c.WatchReactionChain = append([]WatchReactor{&SimpleWatchReactor{resource, reaction}}, c.WatchReactionChain...) +} + +// AddProxyReactor appends a reactor to the end of the chain +func (c *Fake) AddProxyReactor(resource string, reaction ProxyReactionFunc) { + c.ProxyReactionChain = append(c.ProxyReactionChain, &SimpleProxyReactor{resource, reaction}) +} + +// PrependProxyReactor adds a reactor to the beginning of the chain +func (c *Fake) PrependProxyReactor(resource string, reaction ProxyReactionFunc) { + c.ProxyReactionChain = append([]ProxyReactor{&SimpleProxyReactor{resource, reaction}}, c.ProxyReactionChain...) +} + +// Invokes records the provided Action and then invokes the ReactFn (if provided). +// defaultReturnObj is expected to be of the same type a normal call would return. +func (c *Fake) Invokes(action Action, defaultReturnObj runtime.Object) (runtime.Object, error) { + c.Lock() + defer c.Unlock() + + c.actions = append(c.actions, action) + for _, reactor := range c.ReactionChain { + if !reactor.Handles(action) { + continue + } + + handled, ret, err := reactor.React(action) + if !handled { + continue + } + + return ret, err + } + + return defaultReturnObj, nil +} + +// InvokesWatch records the provided Action and then invokes the ReactFn (if provided). +func (c *Fake) InvokesWatch(action Action) (watch.Interface, error) { + c.Lock() + defer c.Unlock() + + c.actions = append(c.actions, action) + for _, reactor := range c.WatchReactionChain { + if !reactor.Handles(action) { + continue + } + + handled, ret, err := reactor.React(action) + if !handled { + continue + } + + return ret, err + } + + return nil, fmt.Errorf("unhandled watch: %#v", action) +} + +// InvokesProxy records the provided Action and then invokes the ReactFn (if provided). +func (c *Fake) InvokesProxy(action Action) restclient.ResponseWrapper { + c.Lock() + defer c.Unlock() + + c.actions = append(c.actions, action) + for _, reactor := range c.ProxyReactionChain { + if !reactor.Handles(action) { + continue + } + + handled, ret, err := reactor.React(action) + if !handled || err != nil { + continue + } + + return ret + } + + return nil +} + +// ClearActions clears the history of actions called on the fake client +func (c *Fake) ClearActions() { + c.Lock() + defer c.Unlock() + + c.actions = make([]Action, 0) +} + +// Actions returns a chronologically ordered slice fake actions called on the fake client +func (c *Fake) Actions() []Action { + c.RLock() + defer c.RUnlock() + fa := make([]Action, len(c.actions)) + copy(fa, c.actions) + return fa +} + +func (c *Fake) LimitRanges(namespace string) client.LimitRangeInterface { + return &FakeLimitRanges{Fake: c, Namespace: namespace} +} + +func (c *Fake) ResourceQuotas(namespace string) client.ResourceQuotaInterface { + return &FakeResourceQuotas{Fake: c, Namespace: namespace} +} + +func (c *Fake) ReplicationControllers(namespace string) client.ReplicationControllerInterface { + return &FakeReplicationControllers{Fake: c, Namespace: namespace} +} + +func (c *Fake) Nodes() client.NodeInterface { + return &FakeNodes{Fake: c} +} + +func (c *Fake) PodSecurityPolicies() client.PodSecurityPolicyInterface { + return &FakePodSecurityPolicy{Fake: c} +} + +func (c *Fake) Events(namespace string) client.EventInterface { + return &FakeEvents{Fake: c, Namespace: namespace} +} + +func (c *Fake) Endpoints(namespace string) client.EndpointsInterface { + return &FakeEndpoints{Fake: c, Namespace: namespace} +} + +func (c *Fake) PersistentVolumes() client.PersistentVolumeInterface { + return &FakePersistentVolumes{Fake: c} +} + +func (c *Fake) PersistentVolumeClaims(namespace string) client.PersistentVolumeClaimInterface { + return &FakePersistentVolumeClaims{Fake: c, Namespace: namespace} +} + +func (c *Fake) Pods(namespace string) client.PodInterface { + return &FakePods{Fake: c, Namespace: namespace} +} + +func (c *Fake) PodTemplates(namespace string) client.PodTemplateInterface { + return &FakePodTemplates{Fake: c, Namespace: namespace} +} + +func (c *Fake) Services(namespace string) client.ServiceInterface { + return &FakeServices{Fake: c, Namespace: namespace} +} + +func (c *Fake) ServiceAccounts(namespace string) client.ServiceAccountsInterface { + return &FakeServiceAccounts{Fake: c, Namespace: namespace} +} + +func (c *Fake) Secrets(namespace string) client.SecretsInterface { + return &FakeSecrets{Fake: c, Namespace: namespace} +} + +func (c *Fake) Namespaces() client.NamespaceInterface { + return &FakeNamespaces{Fake: c} +} + +func (c *Fake) Apps() client.AppsInterface { + return &FakeApps{c} +} + +func (c *Fake) Authorization() client.AuthorizationInterface { + return &FakeAuthorization{c} +} + +func (c *Fake) Autoscaling() client.AutoscalingInterface { + return &FakeAutoscaling{c} +} + +func (c *Fake) Batch() client.BatchInterface { + return &FakeBatch{c} +} + +func (c *Fake) Certificates() client.CertificatesInterface { + return &FakeCertificates{c} +} + +func (c *Fake) Extensions() client.ExtensionsInterface { + return &FakeExperimental{c} +} + +func (c *Fake) Discovery() discovery.DiscoveryInterface { + return &FakeDiscovery{c} +} + +func (c *Fake) ComponentStatuses() client.ComponentStatusInterface { + return &FakeComponentStatuses{Fake: c} +} + +func (c *Fake) ConfigMaps(namespace string) client.ConfigMapsInterface { + return &FakeConfigMaps{Fake: c, Namespace: namespace} +} + +func (c *Fake) Rbac() client.RbacInterface { + return &FakeRbac{Fake: c} +} + +func (c *Fake) Storage() client.StorageInterface { + return &FakeStorage{Fake: c} +} + +func (c *Fake) Authentication() client.AuthenticationInterface { + return &FakeAuthentication{Fake: c} +} + +// SwaggerSchema returns an empty swagger.ApiDeclaration for testing +func (c *Fake) SwaggerSchema(version unversioned.GroupVersion) (*swagger.ApiDeclaration, error) { + action := ActionImpl{} + action.Verb = "get" + if version == v1.SchemeGroupVersion { + action.Resource = "/swaggerapi/api/" + version.Version + } else { + action.Resource = "/swaggerapi/apis/" + version.Group + "/" + version.Version + } + + c.Invokes(action, nil) + return &swagger.ApiDeclaration{}, nil +} + +// NewSimpleFakeApps returns a client that will respond with the provided objects +func NewSimpleFakeApps(objects ...runtime.Object) *FakeApps { + return &FakeApps{Fake: NewSimpleFake(objects...)} +} + +type FakeApps struct { + *Fake +} + +func (c *FakeApps) StatefulSets(namespace string) client.StatefulSetInterface { + return &FakeStatefulSets{Fake: c, Namespace: namespace} +} + +// NewSimpleFakeAuthorization returns a client that will respond with the provided objects +func NewSimpleFakeAuthorization(objects ...runtime.Object) *FakeAuthorization { + return &FakeAuthorization{Fake: NewSimpleFake(objects...)} +} + +type FakeAuthorization struct { + *Fake +} + +func (c *FakeAuthorization) SubjectAccessReviews() client.SubjectAccessReviewInterface { + return &FakeSubjectAccessReviews{Fake: c} +} + +// NewSimpleFakeAutoscaling returns a client that will respond with the provided objects +func NewSimpleFakeAutoscaling(objects ...runtime.Object) *FakeAutoscaling { + return &FakeAutoscaling{Fake: NewSimpleFake(objects...)} +} + +type FakeAutoscaling struct { + *Fake +} + +func (c *FakeAutoscaling) HorizontalPodAutoscalers(namespace string) client.HorizontalPodAutoscalerInterface { + return &FakeHorizontalPodAutoscalers{Fake: c, Namespace: namespace} +} + +func NewSimpleFakeAuthentication(objects ...runtime.Object) *FakeAuthentication { + return &FakeAuthentication{Fake: NewSimpleFake(objects...)} +} + +type FakeAuthentication struct { + *Fake +} + +func (c *FakeAuthentication) TokenReviews() client.TokenReviewInterface { + return &FakeTokenReviews{Fake: c} +} + +// NewSimpleFakeBatch returns a client that will respond with the provided objects +func NewSimpleFakeBatch(objects ...runtime.Object) *FakeBatch { + return &FakeBatch{Fake: NewSimpleFake(objects...)} +} + +type FakeBatch struct { + *Fake +} + +func (c *FakeBatch) Jobs(namespace string) client.JobInterface { + return &FakeJobsV1{Fake: c, Namespace: namespace} +} + +func (c *FakeBatch) ScheduledJobs(namespace string) client.ScheduledJobInterface { + return &FakeScheduledJobs{Fake: c, Namespace: namespace} +} + +// NewSimpleFakeExp returns a client that will respond with the provided objects +func NewSimpleFakeExp(objects ...runtime.Object) *FakeExperimental { + return &FakeExperimental{Fake: NewSimpleFake(objects...)} +} + +type FakeExperimental struct { + *Fake +} + +func (c *FakeExperimental) DaemonSets(namespace string) client.DaemonSetInterface { + return &FakeDaemonSets{Fake: c, Namespace: namespace} +} + +func (c *FakeExperimental) Deployments(namespace string) client.DeploymentInterface { + return &FakeDeployments{Fake: c, Namespace: namespace} +} + +func (c *FakeExperimental) Scales(namespace string) client.ScaleInterface { + return &FakeScales{Fake: c, Namespace: namespace} +} + +func (c *FakeExperimental) Jobs(namespace string) client.JobInterface { + return &FakeJobs{Fake: c, Namespace: namespace} +} + +func (c *FakeExperimental) Ingress(namespace string) client.IngressInterface { + return &FakeIngress{Fake: c, Namespace: namespace} +} + +func (c *FakeExperimental) ThirdPartyResources() client.ThirdPartyResourceInterface { + return &FakeThirdPartyResources{Fake: c} +} + +func (c *FakeExperimental) ReplicaSets(namespace string) client.ReplicaSetInterface { + return &FakeReplicaSets{Fake: c, Namespace: namespace} +} + +func (c *FakeExperimental) NetworkPolicies(namespace string) client.NetworkPolicyInterface { + return &FakeNetworkPolicies{Fake: c, Namespace: namespace} +} + +func NewSimpleFakeRbac(objects ...runtime.Object) *FakeRbac { + return &FakeRbac{Fake: NewSimpleFake(objects...)} +} + +type FakeRbac struct { + *Fake +} + +func (c *FakeRbac) Roles(namespace string) client.RoleInterface { + return &FakeRoles{Fake: c, Namespace: namespace} +} + +func (c *FakeRbac) RoleBindings(namespace string) client.RoleBindingInterface { + return &FakeRoleBindings{Fake: c, Namespace: namespace} +} + +func (c *FakeRbac) ClusterRoles() client.ClusterRoleInterface { + return &FakeClusterRoles{Fake: c} +} + +func (c *FakeRbac) ClusterRoleBindings() client.ClusterRoleBindingInterface { + return &FakeClusterRoleBindings{Fake: c} +} + +func NewSimpleFakeStorage(objects ...runtime.Object) *FakeStorage { + return &FakeStorage{Fake: NewSimpleFake(objects...)} +} + +type FakeStorage struct { + *Fake +} + +func (c *FakeStorage) StorageClasses() client.StorageClassInterface { + return &FakeStorageClasses{Fake: c} +} + +type FakeDiscovery struct { + *Fake +} + +func (c *FakeDiscovery) ServerPreferredResources() ([]unversioned.GroupVersionResource, error) { + return nil, nil +} + +func (c *FakeDiscovery) ServerPreferredNamespacedResources() ([]unversioned.GroupVersionResource, error) { + return nil, nil +} + +func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*unversioned.APIResourceList, error) { + action := ActionImpl{ + Verb: "get", + Resource: "resource", + } + c.Invokes(action, nil) + return c.Resources[groupVersion], nil +} + +func (c *FakeDiscovery) ServerResources() (map[string]*unversioned.APIResourceList, error) { + action := ActionImpl{ + Verb: "get", + Resource: "resource", + } + c.Invokes(action, nil) + return c.Resources, nil +} + +func (c *FakeDiscovery) ServerGroups() (*unversioned.APIGroupList, error) { + return nil, nil +} + +func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { + action := ActionImpl{} + action.Verb = "get" + action.Resource = "version" + + c.Invokes(action, nil) + versionInfo := version.Get() + return &versionInfo, nil +} + +func (c *FakeDiscovery) RESTClient() restclient.Interface { + return nil +} diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index bb228105461..91aad17b2b9 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -57,9 +57,9 @@ const ( // An annotation on the Service denoting if the endpoints controller should // go ahead and create endpoints for unready pods. This annotation is - // currently only used by PetSets, where we need the pet to be DNS + // currently only used by StatefulSets, where we need the pod to be DNS // resolvable during initialization. In this situation we create a headless - // service just for the PetSet, and clients shouldn't be using this Service + // service just for the StatefulSet, and clients shouldn't be using this Service // for anything so unready endpoints don't matter. TolerateUnreadyEndpointsAnnotation = "service.alpha.kubernetes.io/tolerate-unready-endpoints" ) diff --git a/pkg/controller/petset/fakes.go b/pkg/controller/petset/fakes.go index 9ef0ba2657d..bb403174e1e 100644 --- a/pkg/controller/petset/fakes.go +++ b/pkg/controller/petset/fakes.go @@ -51,7 +51,7 @@ func newPVC(name string) api.PersistentVolumeClaim { } } -func newPetSetWithVolumes(replicas int, name string, petMounts []api.VolumeMount, podMounts []api.VolumeMount) *apps.PetSet { +func newStatefulSetWithVolumes(replicas int, name string, petMounts []api.VolumeMount, podMounts []api.VolumeMount) *apps.StatefulSet { mounts := append(petMounts, podMounts...) claims := []api.PersistentVolumeClaim{} for _, m := range petMounts { @@ -70,9 +70,9 @@ func newPetSetWithVolumes(replicas int, name string, petMounts []api.VolumeMount }) } - return &apps.PetSet{ + return &apps.StatefulSet{ TypeMeta: unversioned.TypeMeta{ - Kind: "PetSet", + Kind: "StatefulSet", APIVersion: "apps/v1beta1", }, ObjectMeta: api.ObjectMeta{ @@ -80,7 +80,7 @@ func newPetSetWithVolumes(replicas int, name string, petMounts []api.VolumeMount Namespace: api.NamespaceDefault, UID: types.UID("test"), }, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{ MatchLabels: map[string]string{"foo": "bar"}, }, @@ -110,7 +110,7 @@ func runningPod(ns, name string) *api.Pod { return p } -func newPodList(ps *apps.PetSet, num int) []*api.Pod { +func newPodList(ps *apps.StatefulSet, num int) []*api.Pod { // knownPods are pods in the system knownPods := []*api.Pod{} for i := 0; i < num; i++ { @@ -120,14 +120,14 @@ func newPodList(ps *apps.PetSet, num int) []*api.Pod { return knownPods } -func newPetSet(replicas int) *apps.PetSet { +func newStatefulSet(replicas int) *apps.StatefulSet { petMounts := []api.VolumeMount{ {Name: "datadir", MountPath: "/tmp/zookeeper"}, } podMounts := []api.VolumeMount{ {Name: "home", MountPath: "/home"}, } - return newPetSetWithVolumes(replicas, "foo", petMounts, podMounts) + return newStatefulSetWithVolumes(replicas, "foo", petMounts, podMounts) } func checkPodForMount(pod *api.Pod, mountName string) error { @@ -168,14 +168,14 @@ func (f *fakePetClient) Delete(p *pcb) error { for i, pet := range f.pets { if p.pod.Name == pet.pod.Name { found = true - f.recorder.Eventf(pet.parent, api.EventTypeNormal, "SuccessfulDelete", "pet: %v", pet.pod.Name) + f.recorder.Eventf(pet.parent, api.EventTypeNormal, "SuccessfulDelete", "pod: %v", pet.pod.Name) continue } pets = append(pets, f.pets[i]) } if !found { // TODO: Return proper not found error - return fmt.Errorf("Delete failed: pet %v doesn't exist", p.pod.Name) + return fmt.Errorf("Delete failed: pod %v doesn't exist", p.pod.Name) } f.pets = pets f.petsDeleted++ @@ -196,10 +196,10 @@ func (f *fakePetClient) Get(p *pcb) (*pcb, bool, error) { func (f *fakePetClient) Create(p *pcb) error { for _, pet := range f.pets { if p.pod.Name == pet.pod.Name { - return fmt.Errorf("Create failed: pet %v already exists", p.pod.Name) + return fmt.Errorf("Create failed: pod %v already exists", p.pod.Name) } } - f.recorder.Eventf(p.parent, api.EventTypeNormal, "SuccessfulCreate", "pet: %v", p.pod.Name) + f.recorder.Eventf(p.parent, api.EventTypeNormal, "SuccessfulCreate", "pod: %v", p.pod.Name) f.pets = append(f.pets, p) f.petsCreated++ return nil @@ -220,7 +220,7 @@ func (f *fakePetClient) Update(expected, wanted *pcb) error { } f.pets = pets if !found { - return fmt.Errorf("Cannot update pet %v not found", wanted.pod.Name) + return fmt.Errorf("Cannot update pod %v not found", wanted.pod.Name) } // TODO: Delete pvcs/volumes that are in wanted but not in expected. return nil @@ -252,7 +252,7 @@ func (f *fakePetClient) setHealthy(index int) error { return fmt.Errorf("Index out of range, len %v index %v", len(f.pets), index) } f.pets[index].pod.Status.Phase = api.PodRunning - f.pets[index].pod.Annotations[PetSetInitAnnotation] = "true" + f.pets[index].pod.Annotations[StatefulSetInitAnnotation] = "true" f.pets[index].pod.Status.Conditions = []api.PodCondition{ {Type: api.PodReady, Status: api.ConditionTrue}, } diff --git a/pkg/controller/petset/identity_mappers.go b/pkg/controller/petset/identity_mappers.go index 985d289f0c5..5aedded48d0 100644 --- a/pkg/controller/petset/identity_mappers.go +++ b/pkg/controller/petset/identity_mappers.go @@ -30,7 +30,7 @@ import ( ) // identityMapper is an interface for assigning identities to a pet. -// All existing identity mappers just append "-(index)" to the petset name to +// All existing identity mappers just append "-(index)" to the statefulset name to // generate a unique identity. This is used in claims/DNS/hostname/petname // etc. There's a more elegant way to achieve this mapping, but we're // taking the simplest route till we have data on whether users will need @@ -39,15 +39,15 @@ import ( // your pet a unique identity. You must run them all. Order doesn't matter. type identityMapper interface { // SetIdentity takes an id and assigns the given pet an identity based - // on the pet set spec. The is must be unique amongst members of the - // pet set. + // on the stateful set spec. The is must be unique amongst members of the + // stateful set. SetIdentity(id string, pet *api.Pod) // Identity returns the identity of the pet. Identity(pod *api.Pod) string } -func newIdentityMappers(ps *apps.PetSet) []identityMapper { +func newIdentityMappers(ps *apps.StatefulSet) []identityMapper { return []identityMapper{ &NameIdentityMapper{ps}, &NetworkIdentityMapper{ps}, @@ -57,7 +57,7 @@ func newIdentityMappers(ps *apps.PetSet) []identityMapper { // NetworkIdentityMapper assigns network identity to pets. type NetworkIdentityMapper struct { - ps *apps.PetSet + ps *apps.StatefulSet } // SetIdentity sets network identity on the pet. @@ -81,7 +81,7 @@ func (n *NetworkIdentityMapper) String(pet *api.Pod) string { // VolumeIdentityMapper assigns storage identity to pets. type VolumeIdentityMapper struct { - ps *apps.PetSet + ps *apps.StatefulSet } // SetIdentity sets storge identity on the pet. @@ -90,16 +90,16 @@ func (v *VolumeIdentityMapper) SetIdentity(id string, pet *api.Pod) { petClaims := v.GetClaims(id) // These volumes will all go down with the pod. If a name matches one of - // the claims in the pet set, it gets clobbered. + // the claims in the stateful set, it gets clobbered. podVolumes := map[string]api.Volume{} for _, podVol := range pet.Spec.Volumes { podVolumes[podVol.Name] = podVol } - // Insert claims for the idempotent petSet volumes + // Insert claims for the idempotent statefulset volumes for name, claim := range petClaims { // Volumes on a pet for which there are no associated claims on the - // petset are pod local, and die with the pod. + // statefulset are pod local, and die with the pod. podVol, ok := podVolumes[name] if ok { // TODO: Validate and reject this. @@ -143,7 +143,7 @@ func (v *VolumeIdentityMapper) String(pet *api.Pod) string { } for _, podVol := range pet.Spec.Volumes { // Volumes on a pet for which there are no associated claims on the - // petset are pod local, and die with the pod. + // statefulset are pod local, and die with the pod. if !petVols.Has(podVol.Name) { continue } @@ -159,7 +159,7 @@ func (v *VolumeIdentityMapper) String(pet *api.Pod) string { } // GetClaims returns the volume claims associated with the given id. -// The claims belong to the petset. The id should be unique within a petset. +// The claims belong to the statefulset. The id should be unique within a statefulset. func (v *VolumeIdentityMapper) GetClaims(id string) map[string]api.PersistentVolumeClaim { petClaims := map[string]api.PersistentVolumeClaim{} for _, pvc := range v.ps.Spec.VolumeClaimTemplates { @@ -192,7 +192,7 @@ func (v *VolumeIdentityMapper) GetClaimsForPet(pet *api.Pod) []api.PersistentVol // NameIdentityMapper assigns names to pets. // It also puts the pet in the same namespace as the parent. type NameIdentityMapper struct { - ps *apps.PetSet + ps *apps.StatefulSet } // SetIdentity sets the pet namespace and name. @@ -214,7 +214,7 @@ func (n *NameIdentityMapper) String(pet *api.Pod) string { // identityHash computes a hash of the pet by running all the above identity // mappers. -func identityHash(ps *apps.PetSet, pet *api.Pod) string { +func identityHash(ps *apps.StatefulSet, pet *api.Pod) string { id := "" for _, idMapper := range newIdentityMappers(ps) { id += idMapper.Identity(pet) diff --git a/pkg/controller/petset/identity_mappers_test.go b/pkg/controller/petset/identity_mappers_test.go index f19eaf05f40..1ee97df8c3b 100644 --- a/pkg/controller/petset/identity_mappers_test.go +++ b/pkg/controller/petset/identity_mappers_test.go @@ -29,7 +29,7 @@ import ( func TestPetIDName(t *testing.T) { replicas := 3 - ps := newPetSet(replicas) + ps := newStatefulSet(replicas) for i := 0; i < replicas; i++ { petName := fmt.Sprintf("%v-%d", ps.Name, i) pcb, err := newPCB(fmt.Sprintf("%d", i), ps) @@ -45,7 +45,7 @@ func TestPetIDName(t *testing.T) { func TestPetIDDNS(t *testing.T) { replicas := 3 - ps := newPetSet(replicas) + ps := newStatefulSet(replicas) for i := 0; i < replicas; i++ { petName := fmt.Sprintf("%v-%d", ps.Name, i) petSubdomain := ps.Spec.ServiceName @@ -65,7 +65,7 @@ func TestPetIDDNS(t *testing.T) { } func TestPetIDVolume(t *testing.T) { replicas := 3 - ps := newPetSet(replicas) + ps := newStatefulSet(replicas) for i := 0; i < replicas; i++ { pcb, err := newPCB(fmt.Sprintf("%d", i), ps) if err != nil { @@ -99,7 +99,7 @@ func TestPetIDVolume(t *testing.T) { func TestPetIDVolumeClaims(t *testing.T) { replicas := 3 - ps := newPetSet(replicas) + ps := newStatefulSet(replicas) for i := 0; i < replicas; i++ { pcb, err := newPCB(fmt.Sprintf("%v", i), ps) if err != nil { @@ -116,7 +116,7 @@ func TestPetIDVolumeClaims(t *testing.T) { func TestPetIDCrossAssignment(t *testing.T) { replicas := 3 - ps := newPetSet(replicas) + ps := newStatefulSet(replicas) nameMapper := &NameIdentityMapper{ps} volumeMapper := &VolumeIdentityMapper{ps} @@ -144,7 +144,7 @@ func TestPetIDCrossAssignment(t *testing.T) { func TestPetIDReset(t *testing.T) { replicas := 2 - ps := newPetSet(replicas) + ps := newStatefulSet(replicas) firstPCB, err := newPCB("1", ps) secondPCB, err := newPCB("2", ps) if identityHash(ps, firstPCB.pod) == identityHash(ps, secondPCB.pod) { diff --git a/pkg/controller/petset/iterator.go b/pkg/controller/petset/iterator.go index d86c8eaac63..77c37a05c0d 100644 --- a/pkg/controller/petset/iterator.go +++ b/pkg/controller/petset/iterator.go @@ -27,7 +27,7 @@ import ( ) // newPCB generates a new PCB using the id string as a unique qualifier -func newPCB(id string, ps *apps.PetSet) (*pcb, error) { +func newPCB(id string, ps *apps.StatefulSet) (*pcb, error) { petPod, err := controller.GetPodFromTemplate(&ps.Spec.Template, ps, nil) if err != nil { return nil, err @@ -87,7 +87,7 @@ func (pt *petQueue) empty() bool { } // NewPetQueue returns a queue for tracking pets -func NewPetQueue(ps *apps.PetSet, podList []*api.Pod) *petQueue { +func NewPetQueue(ps *apps.StatefulSet, podList []*api.Pod) *petQueue { pt := petQueue{pets: []*pcb{}, idMapper: &NameIdentityMapper{ps}} // Seed the queue with existing pets. Assume all pets are scheduled for // deletion, enqueuing a pet will "undelete" it. We always want to delete @@ -102,10 +102,10 @@ func NewPetQueue(ps *apps.PetSet, podList []*api.Pod) *petQueue { return &pt } -// petsetIterator implements a simple iterator over pets in the given petset. -type petSetIterator struct { - // ps is the petset for this iterator. - ps *apps.PetSet +// statefulsetIterator implements a simple iterator over pets in the given statefulset. +type statefulSetIterator struct { + // ps is the statefulset for this iterator. + ps *apps.StatefulSet // queue contains the elements to iterate over. queue *petQueue // errs is a list because we always want the iterator to drain. @@ -115,7 +115,7 @@ type petSetIterator struct { } // Next returns true for as long as there are elements in the underlying queue. -func (pi *petSetIterator) Next() bool { +func (pi *statefulSetIterator) Next() bool { var pet *pcb var err error if pi.petCount < pi.ps.Spec.Replicas { @@ -133,14 +133,14 @@ func (pi *petSetIterator) Next() bool { } // Value dequeues an element from the queue. -func (pi *petSetIterator) Value() *pcb { +func (pi *statefulSetIterator) Value() *pcb { return pi.queue.dequeue() } -// NewPetSetIterator returns a new iterator. All pods in the given podList +// NewStatefulSetIterator returns a new iterator. All pods in the given podList // are used to seed the queue of the iterator. -func NewPetSetIterator(ps *apps.PetSet, podList []*api.Pod) *petSetIterator { - pi := &petSetIterator{ +func NewStatefulSetIterator(ps *apps.StatefulSet, podList []*api.Pod) *statefulSetIterator { + pi := &statefulSetIterator{ ps: ps, queue: NewPetQueue(ps, podList), errs: []error{}, diff --git a/pkg/controller/petset/iterator_test.go b/pkg/controller/petset/iterator_test.go index b65d10a2561..adbc52f0749 100644 --- a/pkg/controller/petset/iterator_test.go +++ b/pkg/controller/petset/iterator_test.go @@ -27,7 +27,7 @@ import ( func TestPetQueueCreates(t *testing.T) { replicas := 3 - ps := newPetSet(replicas) + ps := newStatefulSet(replicas) q := NewPetQueue(ps, []*api.Pod{}) for i := 0; i < replicas; i++ { pet, _ := newPCB(fmt.Sprintf("%v", i), ps) @@ -38,13 +38,13 @@ func TestPetQueueCreates(t *testing.T) { } } if q.dequeue() != nil { - t.Errorf("Expected no pets") + t.Errorf("Expected no pods") } } func TestPetQueueScaleDown(t *testing.T) { replicas := 1 - ps := newPetSet(replicas) + ps := newStatefulSet(replicas) // knownPods are the pods in the system knownPods := newPodList(ps, 3) @@ -74,13 +74,13 @@ func TestPetQueueScaleDown(t *testing.T) { } } if q.dequeue() != nil { - t.Errorf("Expected no pets") + t.Errorf("Expected no pods") } } func TestPetQueueScaleUp(t *testing.T) { replicas := 5 - ps := newPetSet(replicas) + ps := newStatefulSet(replicas) // knownPods are pods in the system knownPods := newPodList(ps, 2) @@ -94,14 +94,14 @@ func TestPetQueueScaleUp(t *testing.T) { pet := q.dequeue() expectedName := fmt.Sprintf("%v-%d", ps.Name, i) if pet.event != syncPet || pet.pod.Name != expectedName { - t.Errorf("Unexpected pet %+v, expected %v", pet.pod.Name, expectedName) + t.Errorf("Unexpected pod %+v, expected %v", pet.pod.Name, expectedName) } } } -func TestPetSetIteratorRelist(t *testing.T) { +func TestStatefulSetIteratorRelist(t *testing.T) { replicas := 5 - ps := newPetSet(replicas) + ps := newStatefulSet(replicas) // knownPods are pods in the system knownPods := newPodList(ps, 5) @@ -109,7 +109,7 @@ func TestPetSetIteratorRelist(t *testing.T) { knownPods[i].Spec.NodeName = fmt.Sprintf("foo-node-%v", i) knownPods[i].Status.Phase = api.PodRunning } - pi := NewPetSetIterator(ps, knownPods) + pi := NewStatefulSetIterator(ps, knownPods) // A simple resync should not change identity of pods in the system i := 0 @@ -124,12 +124,12 @@ func TestPetSetIteratorRelist(t *testing.T) { i++ } if i != 5 { - t.Errorf("Unexpected iterations %v, this probably means too many/few pets", i) + t.Errorf("Unexpected iterations %v, this probably means too many/few pods", i) } // Scale to 0 should delete all pods in system ps.Spec.Replicas = 0 - pi = NewPetSetIterator(ps, knownPods) + pi = NewStatefulSetIterator(ps, knownPods) i = 0 for pi.Next() { p := pi.Value() @@ -139,11 +139,11 @@ func TestPetSetIteratorRelist(t *testing.T) { i++ } if i != 5 { - t.Errorf("Unexpected iterations %v, this probably means too many/few pets", i) + t.Errorf("Unexpected iterations %v, this probably means too many/few pods", i) } // Relist with 0 replicas should no-op - pi = NewPetSetIterator(ps, []*api.Pod{}) + pi = NewStatefulSetIterator(ps, []*api.Pod{}) if pi.Next() != false { t.Errorf("Unexpected iteration without any replicas or pods in system") } diff --git a/pkg/controller/petset/pet.go b/pkg/controller/petset/pet.go index 794146c2ad1..71f64905e53 100644 --- a/pkg/controller/petset/pet.go +++ b/pkg/controller/petset/pet.go @@ -41,10 +41,10 @@ const ( // updateRetries is the number of Get/Update cycles we perform when an // update fails. updateRetries = 3 - // PetSetInitAnnotation is an annotation which when set, indicates that the + // StatefulSetInitAnnotation is an annotation which when set, indicates that the // pet has finished initializing itself. // TODO: Replace this with init container status. - PetSetInitAnnotation = "pod.alpha.kubernetes.io/initialized" + StatefulSetInitAnnotation = "pod.alpha.kubernetes.io/initialized" ) // pcb is the control block used to transmit all updates about a single pet. @@ -59,8 +59,8 @@ type pcb struct { event petLifeCycleEvent // id is the identity index of this pet. id string - // parent is a pointer to the parent petset. - parent *apps.PetSet + // parent is a pointer to the parent statefulset. + parent *apps.StatefulSet } // pvcClient is a client for managing persistent volume claims. @@ -113,12 +113,12 @@ func (p *petSyncer) Sync(pet *pcb) error { } } else if exists { if !p.isHealthy(realPet.pod) { - glog.Infof("PetSet %v waiting on unhealthy pet %v", pet.parent.Name, realPet.pod.Name) + glog.Infof("StatefulSet %v waiting on unhealthy pet %v", pet.parent.Name, realPet.pod.Name) } return p.Update(realPet, pet) } if p.blockingPet != nil { - message := errUnhealthyPet(fmt.Sprintf("Create of %v in PetSet %v blocked by unhealthy pet %v", pet.pod.Name, pet.parent.Name, p.blockingPet.pod.Name)) + message := errUnhealthyPet(fmt.Sprintf("Create of %v in StatefulSet %v blocked by unhealthy pet %v", pet.pod.Name, pet.parent.Name, p.blockingPet.pod.Name)) glog.Info(message) return message } @@ -135,7 +135,7 @@ func (p *petSyncer) Sync(pet *pcb) error { return nil } -// Delete deletes the given pet, if no other pet in the petset is blocking a +// Delete deletes the given pet, if no other pet in the statefulset is blocking a // scale event. func (p *petSyncer) Delete(pet *pcb) error { if pet == nil { @@ -149,17 +149,17 @@ func (p *petSyncer) Delete(pet *pcb) error { return nil } if p.blockingPet != nil { - glog.Infof("Delete of %v in PetSet %v blocked by unhealthy pet %v", realPet.pod.Name, pet.parent.Name, p.blockingPet.pod.Name) + glog.Infof("Delete of %v in StatefulSet %v blocked by unhealthy pet %v", realPet.pod.Name, pet.parent.Name, p.blockingPet.pod.Name) return nil } // This is counted as a delete, even if it fails. // The returned error will force a requeue. p.blockingPet = realPet if !p.isDying(realPet.pod) { - glog.Infof("PetSet %v deleting pet %v", pet.parent.Name, pet.pod.Name) + glog.Infof("StatefulSet %v deleting pet %v", pet.parent.Name, pet.pod.Name) return p.petClient.Delete(pet) } - glog.Infof("PetSet %v waiting on pet %v to die in %v", pet.parent.Name, realPet.pod.Name, realPet.pod.DeletionTimestamp) + glog.Infof("StatefulSet %v waiting on pet %v to die in %v", pet.parent.Name, realPet.pod.Name, realPet.pod.DeletionTimestamp) return nil } @@ -173,7 +173,7 @@ type petClient interface { Update(*pcb, *pcb) error } -// apiServerPetClient is a petset aware Kubernetes client. +// apiServerPetClient is a statefulset aware Kubernetes client. type apiServerPetClient struct { c internalclientset.Interface recorder record.EventRecorder @@ -223,7 +223,7 @@ func (p *apiServerPetClient) Update(pet *pcb, expectedPet *pcb) (updateErr error if err != nil || !needsUpdate { return err } - glog.Infof("Resetting pet %v/%v to match PetSet %v spec", pet.pod.Namespace, pet.pod.Name, pet.parent.Name) + glog.Infof("Resetting pet %v/%v to match StatefulSet %v spec", pet.pod.Namespace, pet.pod.Name, pet.parent.Name) _, updateErr = pc.Update(&updatePod) if updateErr == nil || i >= updateRetries { return updateErr @@ -303,9 +303,9 @@ func (d *defaultPetHealthChecker) isHealthy(pod *api.Pod) bool { if pod == nil || pod.Status.Phase != api.PodRunning { return false } - initialized, ok := pod.Annotations[PetSetInitAnnotation] + initialized, ok := pod.Annotations[StatefulSetInitAnnotation] if !ok { - glog.Infof("PetSet pod %v in %v, waiting on annotation %v", api.PodRunning, pod.Name, PetSetInitAnnotation) + glog.Infof("StatefulSet pod %v in %v, waiting on annotation %v", api.PodRunning, pod.Name, StatefulSetInitAnnotation) return false } b, err := strconv.ParseBool(initialized) diff --git a/pkg/controller/petset/pet_set.go b/pkg/controller/petset/pet_set.go index cee9afed8ac..c338ce694e7 100644 --- a/pkg/controller/petset/pet_set.go +++ b/pkg/controller/petset/pet_set.go @@ -46,12 +46,12 @@ const ( PodStoreSyncedPollPeriod = 100 * time.Millisecond // number of retries for a status update. statusUpdateRetries = 2 - // period to relist petsets and verify pets - petSetResyncPeriod = 30 * time.Second + // period to relist statefulsets and verify pets + statefulSetResyncPeriod = 30 * time.Second ) -// PetSetController controls petsets. -type PetSetController struct { +// StatefulSetController controls statefulsets. +type StatefulSetController struct { kubeClient internalclientset.Interface // newSyncer returns an interface capable of syncing a single pet. @@ -66,9 +66,9 @@ type PetSetController struct { // Watches changes to all pods. podController cache.ControllerInterface - // A store of PetSets, populated by the psController. - psStore cache.StoreToPetSetLister - // Watches changes to all PetSets. + // A store of StatefulSets, populated by the psController. + psStore cache.StoreToStatefulSetLister + // Watches changes to all StatefulSets. psController *cache.Controller // A store of the 1 unhealthy pet blocking progress for a given ps @@ -77,34 +77,34 @@ type PetSetController struct { // Controllers that need to be synced. queue workqueue.RateLimitingInterface - // syncHandler handles sync events for petsets. + // syncHandler handles sync events for statefulsets. // Abstracted as a func to allow injection for testing. syncHandler func(psKey string) error } -// NewPetSetController creates a new petset controller. -func NewPetSetController(podInformer cache.SharedIndexInformer, kubeClient internalclientset.Interface, resyncPeriod time.Duration) *PetSetController { +// NewStatefulSetController creates a new statefulset controller. +func NewStatefulSetController(podInformer cache.SharedIndexInformer, kubeClient internalclientset.Interface, resyncPeriod time.Duration) *StatefulSetController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) - recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "petset"}) + recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "statefulset"}) pc := &apiServerPetClient{kubeClient, recorder, &defaultPetHealthChecker{}} - psc := &PetSetController{ + psc := &StatefulSetController{ kubeClient: kubeClient, blockingPetStore: newUnHealthyPetTracker(pc), newSyncer: func(blockingPet *pcb) *petSyncer { return &petSyncer{pc, blockingPet} }, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "petset"), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "statefulset"), } podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - // lookup the petset and enqueue + // lookup the statefulset and enqueue AddFunc: psc.addPod, - // lookup current and old petset if labels changed + // lookup current and old statefulset if labels changed UpdateFunc: psc.updatePod, - // lookup petset accounting for deletion tombstones + // lookup statefulset accounting for deletion tombstones DeleteFunc: psc.deletePod, }) psc.podStore.Indexer = podInformer.GetIndexer() @@ -113,25 +113,25 @@ func NewPetSetController(podInformer cache.SharedIndexInformer, kubeClient inter psc.psStore.Store, psc.psController = cache.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return psc.kubeClient.Apps().PetSets(api.NamespaceAll).List(options) + return psc.kubeClient.Apps().StatefulSets(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return psc.kubeClient.Apps().PetSets(api.NamespaceAll).Watch(options) + return psc.kubeClient.Apps().StatefulSets(api.NamespaceAll).Watch(options) }, }, - &apps.PetSet{}, - petSetResyncPeriod, + &apps.StatefulSet{}, + statefulSetResyncPeriod, cache.ResourceEventHandlerFuncs{ - AddFunc: psc.enqueuePetSet, + AddFunc: psc.enqueueStatefulSet, UpdateFunc: func(old, cur interface{}) { - oldPS := old.(*apps.PetSet) - curPS := cur.(*apps.PetSet) + oldPS := old.(*apps.StatefulSet) + curPS := cur.(*apps.StatefulSet) if oldPS.Status.Replicas != curPS.Status.Replicas { - glog.V(4).Infof("Observed updated replica count for PetSet: %v, %d->%d", curPS.Name, oldPS.Status.Replicas, curPS.Status.Replicas) + glog.V(4).Infof("Observed updated replica count for StatefulSet: %v, %d->%d", curPS.Name, oldPS.Status.Replicas, curPS.Status.Replicas) } - psc.enqueuePetSet(cur) + psc.enqueueStatefulSet(cur) }, - DeleteFunc: psc.enqueuePetSet, + DeleteFunc: psc.enqueueStatefulSet, }, ) // TODO: Watch volumes @@ -140,34 +140,34 @@ func NewPetSetController(podInformer cache.SharedIndexInformer, kubeClient inter return psc } -// Run runs the petset controller. -func (psc *PetSetController) Run(workers int, stopCh <-chan struct{}) { +// Run runs the statefulset controller. +func (psc *StatefulSetController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() - glog.Infof("Starting petset controller") + glog.Infof("Starting statefulset controller") go psc.podController.Run(stopCh) go psc.psController.Run(stopCh) for i := 0; i < workers; i++ { go wait.Until(psc.worker, time.Second, stopCh) } <-stopCh - glog.Infof("Shutting down petset controller") + glog.Infof("Shutting down statefulset controller") psc.queue.ShutDown() } -// addPod adds the petset for the pod to the sync queue -func (psc *PetSetController) addPod(obj interface{}) { +// addPod adds the statefulset for the pod to the sync queue +func (psc *StatefulSetController) addPod(obj interface{}) { pod := obj.(*api.Pod) glog.V(4).Infof("Pod %s created, labels: %+v", pod.Name, pod.Labels) - ps := psc.getPetSetForPod(pod) + ps := psc.getStatefulSetForPod(pod) if ps == nil { return } - psc.enqueuePetSet(ps) + psc.enqueueStatefulSet(ps) } -// updatePod adds the petset for the current and old pods to the sync queue. -// If the labels of the pod didn't change, this method enqueues a single petset. -func (psc *PetSetController) updatePod(old, cur interface{}) { +// updatePod adds the statefulset for the current and old pods to the sync queue. +// If the labels of the pod didn't change, this method enqueues a single statefulset. +func (psc *StatefulSetController) updatePod(old, cur interface{}) { curPod := cur.(*api.Pod) oldPod := old.(*api.Pod) if curPod.ResourceVersion == oldPod.ResourceVersion { @@ -175,26 +175,26 @@ func (psc *PetSetController) updatePod(old, cur interface{}) { // Two different versions of the same pod will always have different RVs. return } - ps := psc.getPetSetForPod(curPod) + ps := psc.getStatefulSetForPod(curPod) if ps == nil { return } - psc.enqueuePetSet(ps) + psc.enqueueStatefulSet(ps) if !reflect.DeepEqual(curPod.Labels, oldPod.Labels) { - if oldPS := psc.getPetSetForPod(oldPod); oldPS != nil { - psc.enqueuePetSet(oldPS) + if oldPS := psc.getStatefulSetForPod(oldPod); oldPS != nil { + psc.enqueueStatefulSet(oldPS) } } } -// deletePod enqueues the petset for the pod accounting for deletion tombstones. -func (psc *PetSetController) deletePod(obj interface{}) { +// deletePod enqueues the statefulset for the pod accounting for deletion tombstones. +func (psc *StatefulSetController) deletePod(obj interface{}) { pod, ok := obj.(*api.Pod) // When a delete is dropped, the relist will notice a pod in the store not // in the list, leading to the insertion of a tombstone object which contains // the deleted key/value. Note that this value might be stale. If the pod - // changed labels the new PetSet will not be woken up till the periodic resync. + // changed labels the new StatefulSet will not be woken up till the periodic resync. if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { @@ -208,14 +208,14 @@ func (psc *PetSetController) deletePod(obj interface{}) { } } glog.V(4).Infof("Pod %s/%s deleted through %v.", pod.Namespace, pod.Name, utilruntime.GetCaller()) - if ps := psc.getPetSetForPod(pod); ps != nil { - psc.enqueuePetSet(ps) + if ps := psc.getStatefulSetForPod(pod); ps != nil { + psc.enqueueStatefulSet(ps) } } -// getPodsForPetSets returns the pods that match the selectors of the given petset. -func (psc *PetSetController) getPodsForPetSet(ps *apps.PetSet) ([]*api.Pod, error) { - // TODO: Do we want the petset to fight with RCs? check parent petset annoation, or name prefix? +// getPodsForStatefulSets returns the pods that match the selectors of the given statefulset. +func (psc *StatefulSetController) getPodsForStatefulSet(ps *apps.StatefulSet) ([]*api.Pod, error) { + // TODO: Do we want the statefulset to fight with RCs? check parent statefulset annoation, or name prefix? sel, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector) if err != nil { return []*api.Pod{}, err @@ -232,24 +232,24 @@ func (psc *PetSetController) getPodsForPetSet(ps *apps.PetSet) ([]*api.Pod, erro return result, nil } -// getPetSetForPod returns the pet set managing the given pod. -func (psc *PetSetController) getPetSetForPod(pod *api.Pod) *apps.PetSet { - ps, err := psc.psStore.GetPodPetSets(pod) +// getStatefulSetForPod returns the pet set managing the given pod. +func (psc *StatefulSetController) getStatefulSetForPod(pod *api.Pod) *apps.StatefulSet { + ps, err := psc.psStore.GetPodStatefulSets(pod) if err != nil { - glog.V(4).Infof("No PetSets found for pod %v, PetSet controller will avoid syncing", pod.Name) + glog.V(4).Infof("No StatefulSets found for pod %v, StatefulSet controller will avoid syncing", pod.Name) return nil } - // Resolve a overlapping petset tie by creation timestamp. - // Let's hope users don't create overlapping petsets. + // Resolve a overlapping statefulset tie by creation timestamp. + // Let's hope users don't create overlapping statefulsets. if len(ps) > 1 { - glog.Errorf("user error! more than one PetSet is selecting pods with labels: %+v", pod.Labels) - sort.Sort(overlappingPetSets(ps)) + glog.Errorf("user error! more than one StatefulSet is selecting pods with labels: %+v", pod.Labels) + sort.Sort(overlappingStatefulSets(ps)) } return &ps[0] } -// enqueuePetSet enqueues the given petset in the work queue. -func (psc *PetSetController) enqueuePetSet(obj interface{}) { +// enqueueStatefulSet enqueues the given statefulset in the work queue. +func (psc *StatefulSetController) enqueueStatefulSet(obj interface{}) { key, err := controller.KeyFunc(obj) if err != nil { glog.Errorf("Cound't get key for object %+v: %v", obj, err) @@ -260,7 +260,7 @@ func (psc *PetSetController) enqueuePetSet(obj interface{}) { // worker runs a worker thread that just dequeues items, processes them, and marks them done. // It enforces that the syncHandler is never invoked concurrently with the same key. -func (psc *PetSetController) worker() { +func (psc *StatefulSetController) worker() { for { func() { key, quit := psc.queue.Get() @@ -269,7 +269,7 @@ func (psc *PetSetController) worker() { } defer psc.queue.Done(key) if err := psc.syncHandler(key.(string)); err != nil { - glog.Errorf("Error syncing PetSet %v, requeuing: %v", key.(string), err) + glog.Errorf("Error syncing StatefulSet %v, requeuing: %v", key.(string), err) psc.queue.AddRateLimited(key) } else { psc.queue.Forget(key) @@ -278,11 +278,11 @@ func (psc *PetSetController) worker() { } } -// Sync syncs the given petset. -func (psc *PetSetController) Sync(key string) error { +// Sync syncs the given statefulset. +func (psc *StatefulSetController) Sync(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing pet set %q (%v)", key, time.Now().Sub(startTime)) + glog.V(4).Infof("Finished syncing statefulset %q (%v)", key, time.Now().Sub(startTime)) }() if !psc.podStoreSynced() { @@ -296,40 +296,40 @@ func (psc *PetSetController) Sync(key string) error { if err = psc.blockingPetStore.store.Delete(key); err != nil { return err } - glog.Infof("PetSet has been deleted %v", key) + glog.Infof("StatefulSet has been deleted %v", key) return nil } if err != nil { - glog.Errorf("Unable to retrieve PetSet %v from store: %v", key, err) + glog.Errorf("Unable to retrieve StatefulSet %v from store: %v", key, err) return err } - ps := *obj.(*apps.PetSet) - petList, err := psc.getPodsForPetSet(&ps) + ps := *obj.(*apps.StatefulSet) + petList, err := psc.getPodsForStatefulSet(&ps) if err != nil { return err } - numPets, syncErr := psc.syncPetSet(&ps, petList) + numPets, syncErr := psc.syncStatefulSet(&ps, petList) if updateErr := updatePetCount(psc.kubeClient.Apps(), ps, numPets); updateErr != nil { - glog.Infof("Failed to update replica count for petset %v/%v; requeuing; error: %v", ps.Namespace, ps.Name, updateErr) + glog.Infof("Failed to update replica count for statefulset %v/%v; requeuing; error: %v", ps.Namespace, ps.Name, updateErr) return errors.NewAggregate([]error{syncErr, updateErr}) } return syncErr } -// syncPetSet syncs a tuple of (petset, pets). -func (psc *PetSetController) syncPetSet(ps *apps.PetSet, pets []*api.Pod) (int, error) { - glog.Infof("Syncing PetSet %v/%v with %d pets", ps.Namespace, ps.Name, len(pets)) +// syncStatefulSet syncs a tuple of (statefulset, pets). +func (psc *StatefulSetController) syncStatefulSet(ps *apps.StatefulSet, pets []*api.Pod) (int, error) { + glog.Infof("Syncing StatefulSet %v/%v with %d pods", ps.Namespace, ps.Name, len(pets)) - it := NewPetSetIterator(ps, pets) + it := NewStatefulSetIterator(ps, pets) blockingPet, err := psc.blockingPetStore.Get(ps, pets) if err != nil { return 0, err } if blockingPet != nil { - glog.Infof("PetSet %v blocked from scaling on pet %v", ps.Name, blockingPet.pod.Name) + glog.Infof("StatefulSet %v blocked from scaling on pod %v", ps.Name, blockingPet.pod.Name) } petManager := psc.newSyncer(blockingPet) numPets := 0 @@ -351,7 +351,7 @@ func (psc *PetSetController) syncPetSet(ps *apps.PetSet, pets []*api.Pod) (int, switch err.(type) { case errUnhealthyPet: // We are not passing this error up, but we don't increment numPets if we encounter it, - // since numPets directly translates to petset.status.replicas + // since numPets directly translates to statefulset.status.replicas continue case nil: continue @@ -364,7 +364,7 @@ func (psc *PetSetController) syncPetSet(ps *apps.PetSet, pets []*api.Pod) (int, it.errs = append(it.errs, err) } // TODO: GC pvcs. We can't delete them per pet because of grace period, and - // in fact we *don't want to* till petset is stable to guarantee that bugs + // in fact we *don't want to* till statefulset is stable to guarantee that bugs // in the controller don't corrupt user data. return numPets, errors.NewAggregate(it.errs) } diff --git a/pkg/controller/petset/pet_set_test.go b/pkg/controller/petset/pet_set_test.go index 91f266c3712..4f869c1f331 100644 --- a/pkg/controller/petset/pet_set_test.go +++ b/pkg/controller/petset/pet_set_test.go @@ -32,13 +32,13 @@ import ( "k8s.io/kubernetes/pkg/util/errors" ) -func newFakePetSetController() (*PetSetController, *fakePetClient) { +func newFakeStatefulSetController() (*StatefulSetController, *fakePetClient) { fpc := newFakePetClient() - return &PetSetController{ + return &StatefulSetController{ kubeClient: nil, blockingPetStore: newUnHealthyPetTracker(fpc), podStoreSynced: func() bool { return true }, - psStore: cache.StoreToPetSetLister{Store: cache.NewStore(controller.KeyFunc)}, + psStore: cache.StoreToStatefulSetLister{Store: cache.NewStore(controller.KeyFunc)}, podStore: cache.StoreToPodLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{})}, newSyncer: func(blockingPet *pcb) *petSyncer { return &petSyncer{fpc, blockingPet} @@ -46,7 +46,7 @@ func newFakePetSetController() (*PetSetController, *fakePetClient) { }, fpc } -func checkPets(ps *apps.PetSet, creates, deletes int, fc *fakePetClient, t *testing.T) { +func checkPets(ps *apps.StatefulSet, creates, deletes int, fc *fakePetClient, t *testing.T) { if fc.petsCreated != creates || fc.petsDeleted != deletes { t.Errorf("Found (creates: %d, deletes: %d), expected (creates: %d, deletes: %d)", fc.petsCreated, fc.petsDeleted, creates, deletes) } @@ -57,12 +57,12 @@ func checkPets(ps *apps.PetSet, creates, deletes int, fc *fakePetClient, t *test for i := range fc.pets { expectedPet, _ := newPCB(fmt.Sprintf("%v", i), ps) if identityHash(ps, fc.pets[i].pod) != identityHash(ps, expectedPet.pod) { - t.Errorf("Unexpected pet at index %d", i) + t.Errorf("Unexpected pod at index %d", i) } for _, pvc := range expectedPet.pvcs { gotPVC, ok := gotClaims[pvc.Name] if !ok { - t.Errorf("PVC %v not created for pet %v", pvc.Name, expectedPet.pod.Name) + t.Errorf("PVC %v not created for pod %v", pvc.Name, expectedPet.pod.Name) } if !reflect.DeepEqual(gotPVC.Spec, pvc.Spec) { t.Errorf("got PVC %v differs from created pvc", pvc.Name) @@ -71,14 +71,14 @@ func checkPets(ps *apps.PetSet, creates, deletes int, fc *fakePetClient, t *test } } -func scalePetSet(t *testing.T, ps *apps.PetSet, psc *PetSetController, fc *fakePetClient, scale int) error { +func scaleStatefulSet(t *testing.T, ps *apps.StatefulSet, psc *StatefulSetController, fc *fakePetClient, scale int) error { errs := []error{} for i := 0; i < scale; i++ { pl := fc.getPodList() if len(pl) != i { - t.Errorf("Unexpected number of pets, expected %d found %d", i, len(pl)) + t.Errorf("Unexpected number of pods, expected %d found %d", i, len(pl)) } - if _, syncErr := psc.syncPetSet(ps, pl); syncErr != nil { + if _, syncErr := psc.syncStatefulSet(ps, pl); syncErr != nil { errs = append(errs, syncErr) } fc.setHealthy(i) @@ -87,35 +87,35 @@ func scalePetSet(t *testing.T, ps *apps.PetSet, psc *PetSetController, fc *fakeP return errors.NewAggregate(errs) } -func saturatePetSet(t *testing.T, ps *apps.PetSet, psc *PetSetController, fc *fakePetClient) { - err := scalePetSet(t, ps, psc, fc, int(ps.Spec.Replicas)) +func saturateStatefulSet(t *testing.T, ps *apps.StatefulSet, psc *StatefulSetController, fc *fakePetClient) { + err := scaleStatefulSet(t, ps, psc, fc, int(ps.Spec.Replicas)) if err != nil { - t.Errorf("Error scalePetSet: %v", err) + t.Errorf("Error scaleStatefulSet: %v", err) } } -func TestPetSetControllerCreates(t *testing.T) { - psc, fc := newFakePetSetController() +func TestStatefulSetControllerCreates(t *testing.T) { + psc, fc := newFakeStatefulSetController() replicas := 3 - ps := newPetSet(replicas) + ps := newStatefulSet(replicas) - saturatePetSet(t, ps, psc, fc) + saturateStatefulSet(t, ps, psc, fc) podList := fc.getPodList() // Deleted pet gets recreated fc.pets = fc.pets[:replicas-1] - if _, err := psc.syncPetSet(ps, podList); err != nil { - t.Errorf("Error syncing PetSet: %v", err) + if _, err := psc.syncStatefulSet(ps, podList); err != nil { + t.Errorf("Error syncing StatefulSet: %v", err) } checkPets(ps, replicas+1, 0, fc, t) } -func TestPetSetControllerDeletes(t *testing.T) { - psc, fc := newFakePetSetController() +func TestStatefulSetControllerDeletes(t *testing.T) { + psc, fc := newFakeStatefulSetController() replicas := 4 - ps := newPetSet(replicas) + ps := newStatefulSet(replicas) - saturatePetSet(t, ps, psc, fc) + saturateStatefulSet(t, ps, psc, fc) // Drain errs := []error{} @@ -123,30 +123,30 @@ func TestPetSetControllerDeletes(t *testing.T) { knownPods := fc.getPodList() for i := replicas - 1; i >= 0; i-- { if len(fc.pets) != i+1 { - t.Errorf("Unexpected number of pets, expected %d found %d", i+1, len(fc.pets)) + t.Errorf("Unexpected number of pods, expected %d found %d", i+1, len(fc.pets)) } - if _, syncErr := psc.syncPetSet(ps, knownPods); syncErr != nil { + if _, syncErr := psc.syncStatefulSet(ps, knownPods); syncErr != nil { errs = append(errs, syncErr) } } if len(errs) != 0 { - t.Errorf("Error syncing PetSet: %v", errors.NewAggregate(errs)) + t.Errorf("Error syncing StatefulSet: %v", errors.NewAggregate(errs)) } checkPets(ps, replicas, replicas, fc, t) } -func TestPetSetControllerRespectsTermination(t *testing.T) { - psc, fc := newFakePetSetController() +func TestStatefulSetControllerRespectsTermination(t *testing.T) { + psc, fc := newFakeStatefulSetController() replicas := 4 - ps := newPetSet(replicas) + ps := newStatefulSet(replicas) - saturatePetSet(t, ps, psc, fc) + saturateStatefulSet(t, ps, psc, fc) fc.setDeletionTimestamp(replicas - 1) ps.Spec.Replicas = 2 - _, err := psc.syncPetSet(ps, fc.getPodList()) + _, err := psc.syncStatefulSet(ps, fc.getPodList()) if err != nil { - t.Errorf("Error syncing PetSet: %v", err) + t.Errorf("Error syncing StatefulSet: %v", err) } // Finding a pod with the deletion timestamp will pause all deletions. knownPods := fc.getPodList() @@ -154,19 +154,19 @@ func TestPetSetControllerRespectsTermination(t *testing.T) { t.Errorf("Pods deleted prematurely before deletion timestamp expired, len %d", len(knownPods)) } fc.pets = fc.pets[:replicas-1] - _, err = psc.syncPetSet(ps, fc.getPodList()) + _, err = psc.syncStatefulSet(ps, fc.getPodList()) if err != nil { - t.Errorf("Error syncing PetSet: %v", err) + t.Errorf("Error syncing StatefulSet: %v", err) } checkPets(ps, replicas, 1, fc, t) } -func TestPetSetControllerRespectsOrder(t *testing.T) { - psc, fc := newFakePetSetController() +func TestStatefulSetControllerRespectsOrder(t *testing.T) { + psc, fc := newFakeStatefulSetController() replicas := 4 - ps := newPetSet(replicas) + ps := newStatefulSet(replicas) - saturatePetSet(t, ps, psc, fc) + saturateStatefulSet(t, ps, psc, fc) errs := []error{} ps.Spec.Replicas = 0 @@ -179,36 +179,36 @@ func TestPetSetControllerRespectsOrder(t *testing.T) { for i := 0; i < replicas; i++ { if len(fc.pets) != replicas-i { - t.Errorf("Unexpected number of pets, expected %d found %d", i, len(fc.pets)) + t.Errorf("Unexpected number of pods, expected %d found %d", i, len(fc.pets)) } - if _, syncErr := psc.syncPetSet(ps, knownPods); syncErr != nil { + if _, syncErr := psc.syncStatefulSet(ps, knownPods); syncErr != nil { errs = append(errs, syncErr) } checkPets(ps, replicas, i+1, fc, t) } if len(errs) != 0 { - t.Errorf("Error syncing PetSet: %v", errors.NewAggregate(errs)) + t.Errorf("Error syncing StatefulSet: %v", errors.NewAggregate(errs)) } } -func TestPetSetControllerBlocksScaling(t *testing.T) { - psc, fc := newFakePetSetController() +func TestStatefulSetControllerBlocksScaling(t *testing.T) { + psc, fc := newFakeStatefulSetController() replicas := 5 - ps := newPetSet(replicas) - scalePetSet(t, ps, psc, fc, 3) + ps := newStatefulSet(replicas) + scaleStatefulSet(t, ps, psc, fc, 3) // Create 4th pet, then before flipping it to healthy, kill the first pet. // There should only be 1 not-healty pet at a time. pl := fc.getPodList() - if _, err := psc.syncPetSet(ps, pl); err != nil { - t.Errorf("Error syncing PetSet: %v", err) + if _, err := psc.syncStatefulSet(ps, pl); err != nil { + t.Errorf("Error syncing StatefulSet: %v", err) } deletedPod := pl[0] fc.deletePetAtIndex(0) pl = fc.getPodList() - if _, err := psc.syncPetSet(ps, pl); err != nil { - t.Errorf("Error syncing PetSet: %v", err) + if _, err := psc.syncStatefulSet(ps, pl); err != nil { + t.Errorf("Error syncing StatefulSet: %v", err) } newPodList := fc.getPodList() for _, p := range newPodList { @@ -218,8 +218,8 @@ func TestPetSetControllerBlocksScaling(t *testing.T) { } fc.setHealthy(len(newPodList) - 1) - if _, err := psc.syncPetSet(ps, pl); err != nil { - t.Errorf("Error syncing PetSet: %v", err) + if _, err := psc.syncStatefulSet(ps, pl); err != nil { + t.Errorf("Error syncing StatefulSet: %v", err) } found := false @@ -234,55 +234,55 @@ func TestPetSetControllerBlocksScaling(t *testing.T) { } } -func TestPetSetBlockingPetIsCleared(t *testing.T) { - psc, fc := newFakePetSetController() - ps := newPetSet(3) - scalePetSet(t, ps, psc, fc, 1) +func TestStatefulSetBlockingPetIsCleared(t *testing.T) { + psc, fc := newFakeStatefulSetController() + ps := newStatefulSet(3) + scaleStatefulSet(t, ps, psc, fc, 1) if blocking, err := psc.blockingPetStore.Get(ps, fc.getPodList()); err != nil || blocking != nil { - t.Errorf("Unexpected blocking pet %v, err %v", blocking, err) + t.Errorf("Unexpected blocking pod %v, err %v", blocking, err) } // 1 not yet healthy pet - psc.syncPetSet(ps, fc.getPodList()) + psc.syncStatefulSet(ps, fc.getPodList()) if blocking, err := psc.blockingPetStore.Get(ps, fc.getPodList()); err != nil || blocking == nil { - t.Errorf("Expected blocking pet %v, err %v", blocking, err) + t.Errorf("Expected blocking pod %v, err %v", blocking, err) } - // Deleting the petset should clear the blocking pet + // Deleting the statefulset should clear the blocking pet if err := psc.psStore.Store.Delete(ps); err != nil { - t.Fatalf("Unable to delete pet %v from petset controller store.", ps.Name) + t.Fatalf("Unable to delete pod %v from statefulset controller store.", ps.Name) } if err := psc.Sync(fmt.Sprintf("%v/%v", ps.Namespace, ps.Name)); err != nil { - t.Errorf("Error during sync of deleted petset %v", err) + t.Errorf("Error during sync of deleted statefulset %v", err) } fc.pets = []*pcb{} fc.petsCreated = 0 if blocking, err := psc.blockingPetStore.Get(ps, fc.getPodList()); err != nil || blocking != nil { - t.Errorf("Unexpected blocking pet %v, err %v", blocking, err) + t.Errorf("Unexpected blocking pod %v, err %v", blocking, err) } - saturatePetSet(t, ps, psc, fc) + saturateStatefulSet(t, ps, psc, fc) // Make sure we don't leak the final blockin pet in the store - psc.syncPetSet(ps, fc.getPodList()) + psc.syncStatefulSet(ps, fc.getPodList()) if p, exists, err := psc.blockingPetStore.store.GetByKey(fmt.Sprintf("%v/%v", ps.Namespace, ps.Name)); err != nil || exists { - t.Errorf("Unexpected blocking pet, err %v: %+v", err, p) + t.Errorf("Unexpected blocking pod, err %v: %+v", err, p) } } -func TestSyncPetSetBlockedPet(t *testing.T) { - psc, fc := newFakePetSetController() - ps := newPetSet(3) - i, _ := psc.syncPetSet(ps, fc.getPodList()) +func TestSyncStatefulSetBlockedPet(t *testing.T) { + psc, fc := newFakeStatefulSetController() + ps := newStatefulSet(3) + i, _ := psc.syncStatefulSet(ps, fc.getPodList()) if i != len(fc.getPodList()) { - t.Errorf("syncPetSet should return actual amount of pods") + t.Errorf("syncStatefulSet should return actual amount of pods") } } type fakeClient struct { fake_internal.Clientset - petSetClient *fakePetSetClient + statefulsetClient *fakeStatefulSetClient } func (c *fakeClient) Apps() unversioned.AppsInterface { @@ -294,38 +294,38 @@ type fakeApps struct { *fake.FakeApps } -func (c *fakeApps) PetSets(namespace string) unversioned.PetSetInterface { - c.petSetClient.Namespace = namespace - return c.petSetClient +func (c *fakeApps) StatefulSets(namespace string) unversioned.StatefulSetInterface { + c.statefulsetClient.Namespace = namespace + return c.statefulsetClient } -type fakePetSetClient struct { - *fake.FakePetSets +type fakeStatefulSetClient struct { + *fake.FakeStatefulSets Namespace string replicas int32 } -func (f *fakePetSetClient) UpdateStatus(petSet *apps.PetSet) (*apps.PetSet, error) { - f.replicas = petSet.Status.Replicas - return petSet, nil +func (f *fakeStatefulSetClient) UpdateStatus(statefulset *apps.StatefulSet) (*apps.StatefulSet, error) { + f.replicas = statefulset.Status.Replicas + return statefulset, nil } -func TestPetSetReplicaCount(t *testing.T) { - fpsc := &fakePetSetClient{} - psc, _ := newFakePetSetController() +func TestStatefulSetReplicaCount(t *testing.T) { + fpsc := &fakeStatefulSetClient{} + psc, _ := newFakeStatefulSetController() psc.kubeClient = &fakeClient{ - petSetClient: fpsc, + statefulsetClient: fpsc, } - ps := newPetSet(3) + ps := newStatefulSet(3) psKey := fmt.Sprintf("%v/%v", ps.Namespace, ps.Name) psc.psStore.Store.Add(ps) if err := psc.Sync(psKey); err != nil { - t.Errorf("Error during sync of deleted petset %v", err) + t.Errorf("Error during sync of deleted statefulset %v", err) } if fpsc.replicas != 1 { - t.Errorf("Replicas count sent as status update for PetSet should be 1, is %d instead", fpsc.replicas) + t.Errorf("Replicas count sent as status update for StatefulSet should be 1, is %d instead", fpsc.replicas) } } diff --git a/pkg/controller/petset/pet_set_utils.go b/pkg/controller/petset/pet_set_utils.go index 1bcc84368ab..1b912a4cf6c 100644 --- a/pkg/controller/petset/pet_set_utils.go +++ b/pkg/controller/petset/pet_set_utils.go @@ -29,55 +29,55 @@ import ( "github.com/golang/glog" ) -// overlappingPetSets sorts a list of PetSets by creation timestamp, using their names as a tie breaker. -// Generally used to tie break between PetSets that have overlapping selectors. -type overlappingPetSets []apps.PetSet +// overlappingStatefulSets sorts a list of StatefulSets by creation timestamp, using their names as a tie breaker. +// Generally used to tie break between StatefulSets that have overlapping selectors. +type overlappingStatefulSets []apps.StatefulSet -func (o overlappingPetSets) Len() int { return len(o) } -func (o overlappingPetSets) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o overlappingStatefulSets) Len() int { return len(o) } +func (o overlappingStatefulSets) Swap(i, j int) { o[i], o[j] = o[j], o[i] } -func (o overlappingPetSets) Less(i, j int) bool { +func (o overlappingStatefulSets) Less(i, j int) bool { if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) { return o[i].Name < o[j].Name } return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) } -// updatePetCount attempts to update the Status.Replicas of the given PetSet, with a single GET/PUT retry. -func updatePetCount(psClient appsclientset.PetSetsGetter, ps apps.PetSet, numPets int) (updateErr error) { +// updatePetCount attempts to update the Status.Replicas of the given StatefulSet, with a single GET/PUT retry. +func updatePetCount(psClient appsclientset.StatefulSetsGetter, ps apps.StatefulSet, numPets int) (updateErr error) { if ps.Status.Replicas == int32(numPets) || psClient == nil { return nil } var getErr error for i, ps := 0, &ps; ; i++ { - glog.V(4).Infof(fmt.Sprintf("Updating replica count for PetSet: %s/%s, ", ps.Namespace, ps.Name) + + glog.V(4).Infof(fmt.Sprintf("Updating replica count for StatefulSet: %s/%s, ", ps.Namespace, ps.Name) + fmt.Sprintf("replicas %d->%d (need %d), ", ps.Status.Replicas, numPets, ps.Spec.Replicas)) - ps.Status = apps.PetSetStatus{Replicas: int32(numPets)} - _, updateErr = psClient.PetSets(ps.Namespace).UpdateStatus(ps) + ps.Status = apps.StatefulSetStatus{Replicas: int32(numPets)} + _, updateErr = psClient.StatefulSets(ps.Namespace).UpdateStatus(ps) if updateErr == nil || i >= statusUpdateRetries { return updateErr } - if ps, getErr = psClient.PetSets(ps.Namespace).Get(ps.Name); getErr != nil { + if ps, getErr = psClient.StatefulSets(ps.Namespace).Get(ps.Name); getErr != nil { return getErr } } } -// unhealthyPetTracker tracks unhealthy pets for petsets. +// unhealthyPetTracker tracks unhealthy pets for statefulsets. type unhealthyPetTracker struct { pc petClient store cache.Store storeLock sync.Mutex } -// Get returns a previously recorded blocking pet for the given petset. -func (u *unhealthyPetTracker) Get(ps *apps.PetSet, knownPets []*api.Pod) (*pcb, error) { +// Get returns a previously recorded blocking pet for the given statefulset. +func (u *unhealthyPetTracker) Get(ps *apps.StatefulSet, knownPets []*api.Pod) (*pcb, error) { u.storeLock.Lock() defer u.storeLock.Unlock() // We "Get" by key but "Add" by object because the store interface doesn't - // allow us to Get/Add a related obj (eg petset: blocking pet). + // allow us to Get/Add a related obj (eg statefulset: blocking pet). key, err := controller.KeyFunc(ps) if err != nil { return nil, err @@ -93,16 +93,16 @@ func (u *unhealthyPetTracker) Get(ps *apps.PetSet, knownPets []*api.Pod) (*pcb, if !exists { for _, p := range knownPets { if hc.isHealthy(p) && !hc.isDying(p) { - glog.V(4).Infof("Ignoring healthy pet %v for PetSet %v", p.Name, ps.Name) + glog.V(4).Infof("Ignoring healthy pod %v for StatefulSet %v", p.Name, ps.Name) continue } - glog.Infof("No recorded blocking pet, but found unhealthy pet %v for PetSet %v", p.Name, ps.Name) + glog.Infof("No recorded blocking pod, but found unhealthy pod %v for StatefulSet %v", p.Name, ps.Name) return &pcb{pod: p, parent: ps}, nil } return nil, nil } - // This is a pet that's blocking further creates/deletes of a petset. If it + // This is a pet that's blocking further creates/deletes of a statefulset. If it // disappears, it's no longer blocking. If it exists, it continues to block // till it turns healthy or disappears. bp := obj.(*pcb) @@ -111,12 +111,12 @@ func (u *unhealthyPetTracker) Get(ps *apps.PetSet, knownPets []*api.Pod) (*pcb, return nil, err } if !exists { - glog.V(4).Infof("Clearing blocking pet %v for PetSet %v because it's been deleted", bp.pod.Name, ps.Name) + glog.V(4).Infof("Clearing blocking pod %v for StatefulSet %v because it's been deleted", bp.pod.Name, ps.Name) return nil, nil } blockingPetPod := blockingPet.pod if hc.isHealthy(blockingPetPod) && !hc.isDying(blockingPetPod) { - glog.V(4).Infof("Clearing blocking pet %v for PetSet %v because it's healthy", bp.pod.Name, ps.Name) + glog.V(4).Infof("Clearing blocking pod %v for StatefulSet %v because it's healthy", bp.pod.Name, ps.Name) u.store.Delete(blockingPet) blockingPet = nil } @@ -131,11 +131,11 @@ func (u *unhealthyPetTracker) Add(blockingPet *pcb) error { if blockingPet == nil { return nil } - glog.V(4).Infof("Adding blocking pet %v for PetSet %v", blockingPet.pod.Name, blockingPet.parent.Name) + glog.V(4).Infof("Adding blocking pod %v for StatefulSet %v", blockingPet.pod.Name, blockingPet.parent.Name) return u.store.Add(blockingPet) } -// newUnHealthyPetTracker tracks unhealthy pets that block progress of petsets. +// newUnHealthyPetTracker tracks unhealthy pets that block progress of statefulsets. func newUnHealthyPetTracker(pc petClient) *unhealthyPetTracker { return &unhealthyPetTracker{pc: pc, store: cache.NewStore(pcbKeyFunc)} } @@ -148,10 +148,10 @@ func pcbKeyFunc(obj interface{}) (string, error) { } p, ok := obj.(*pcb) if !ok { - return "", fmt.Errorf("not a valid pet control block %#v", p) + return "", fmt.Errorf("not a valid pod control block %#v", p) } if p.parent == nil { - return "", fmt.Errorf("cannot compute pet control block key without parent pointer %#v", p) + return "", fmt.Errorf("cannot compute pod control block key without parent pointer %#v", p) } return controller.KeyFunc(p.parent) } diff --git a/pkg/kubectl/cmd/cmd.go b/pkg/kubectl/cmd/cmd.go index d1babebef5c..4209fe71889 100644 --- a/pkg/kubectl/cmd/cmd.go +++ b/pkg/kubectl/cmd/cmd.go @@ -183,7 +183,7 @@ __custom_func() { * limitranges (aka 'limits') * nodes (aka 'no') * namespaces (aka 'ns') - * petsets (alpha feature, may be unstable) + * statefulsets (alpha feature, may be unstable) * pods (aka 'po') * persistentvolumes (aka 'pv') * persistentvolumeclaims (aka 'pvc') diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index bae3a24b81c..287a89e6c1d 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -863,7 +863,7 @@ func (f *factory) UpdatePodSpecForObject(obj runtime.Object, fn func(*api.PodSpe return true, fn(&t.Spec.Template.Spec) case *extensions.ReplicaSet: return true, fn(&t.Spec.Template.Spec) - case *apps.PetSet: + case *apps.StatefulSet: return true, fn(&t.Spec.Template.Spec) case *batch.Job: return true, fn(&t.Spec.Template.Spec) diff --git a/pkg/kubectl/cmd/util/factory_test.go b/pkg/kubectl/cmd/util/factory_test.go index e51dc031059..6d466691486 100644 --- a/pkg/kubectl/cmd/util/factory_test.go +++ b/pkg/kubectl/cmd/util/factory_test.go @@ -736,12 +736,12 @@ func TestDiscoveryReplaceAliases(t *testing.T) { { name: "all-replacement", arg: "all", - expected: "pods,replicationcontrollers,services,petsets,horizontalpodautoscalers,jobs,deployments,replicasets", + expected: "pods,replicationcontrollers,services,statefulsets,horizontalpodautoscalers,jobs,deployments,replicasets", }, { name: "alias-in-comma-separated-arg", arg: "all,secrets", - expected: "pods,replicationcontrollers,services,petsets,horizontalpodautoscalers,jobs,deployments,replicasets,secrets", + expected: "pods,replicationcontrollers,services,statefulsets,horizontalpodautoscalers,jobs,deployments,replicasets,secrets", }, } diff --git a/pkg/kubectl/cmd/util/shortcut_restmapper.go b/pkg/kubectl/cmd/util/shortcut_restmapper.go index 60c781d8174..e807c32b75f 100644 --- a/pkg/kubectl/cmd/util/shortcut_restmapper.go +++ b/pkg/kubectl/cmd/util/shortcut_restmapper.go @@ -111,7 +111,7 @@ var userResources = []unversioned.GroupResource{ {Group: "", Resource: "pods"}, {Group: "", Resource: "replicationcontrollers"}, {Group: "", Resource: "services"}, - {Group: "apps", Resource: "petsets"}, + {Group: "apps", Resource: "statefulsets"}, {Group: "autoscaling", Resource: "horizontalpodautoscalers"}, {Group: "extensions", Resource: "jobs"}, {Group: "extensions", Resource: "deployments"}, diff --git a/pkg/kubectl/cmd/util/shortcut_restmapper_test.go b/pkg/kubectl/cmd/util/shortcut_restmapper_test.go index be445c03623..20d671ec8e4 100644 --- a/pkg/kubectl/cmd/util/shortcut_restmapper_test.go +++ b/pkg/kubectl/cmd/util/shortcut_restmapper_test.go @@ -37,12 +37,12 @@ func TestReplaceAliases(t *testing.T) { { name: "all-replacement", arg: "all", - expected: "pods,replicationcontrollers,services,petsets,horizontalpodautoscalers,jobs,deployments,replicasets", + expected: "pods,replicationcontrollers,services,statefulsets,horizontalpodautoscalers,jobs,deployments,replicasets", }, { name: "alias-in-comma-separated-arg", arg: "all,secrets", - expected: "pods,replicationcontrollers,services,petsets,horizontalpodautoscalers,jobs,deployments,replicasets,secrets", + expected: "pods,replicationcontrollers,services,statefulsets,horizontalpodautoscalers,jobs,deployments,replicasets,secrets", }, } diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index e4342d00218..d96ba6466f1 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -117,7 +117,7 @@ func describerMap(c clientset.Interface) map[unversioned.GroupKind]Describer { extensions.Kind("Ingress"): &IngressDescriber{c}, batch.Kind("Job"): &JobDescriber{c}, batch.Kind("ScheduledJob"): &ScheduledJobDescriber{c}, - apps.Kind("PetSet"): &PetSetDescriber{c}, + apps.Kind("StatefulSet"): &StatefulSetDescriber{c}, certificates.Kind("CertificateSigningRequest"): &CertificateSigningRequestDescriber{c}, storage.Kind("StorageClass"): &StorageClassDescriber{c}, } @@ -1863,12 +1863,12 @@ func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events }) } -type PetSetDescriber struct { +type StatefulSetDescriber struct { client clientset.Interface } -func (p *PetSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - ps, err := p.client.Apps().PetSets(namespace).Get(name) +func (p *StatefulSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + ps, err := p.client.Apps().StatefulSets(namespace).Get(name) if err != nil { return "", err } diff --git a/pkg/kubectl/resource_printer.go b/pkg/kubectl/resource_printer.go index 2e62cdbab08..e2736ba21f8 100644 --- a/pkg/kubectl/resource_printer.go +++ b/pkg/kubectl/resource_printer.go @@ -470,7 +470,7 @@ var ( scheduledJobColumns = []string{"NAME", "SCHEDULE", "SUSPEND", "ACTIVE", "LAST-SCHEDULE"} serviceColumns = []string{"NAME", "CLUSTER-IP", "EXTERNAL-IP", "PORT(S)", "AGE"} ingressColumns = []string{"NAME", "HOSTS", "ADDRESS", "PORTS", "AGE"} - petSetColumns = []string{"NAME", "DESIRED", "CURRENT", "AGE"} + statefulSetColumns = []string{"NAME", "DESIRED", "CURRENT", "AGE"} endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"} nodeColumns = []string{"NAME", "STATUS", "AGE"} daemonSetColumns = []string{"NAME", "DESIRED", "CURRENT", "READY", "NODE-SELECTOR", "AGE"} @@ -539,8 +539,8 @@ func (h *HumanReadablePrinter) addDefaultHandlers() { h.Handler(serviceColumns, printServiceList) h.Handler(ingressColumns, printIngress) h.Handler(ingressColumns, printIngressList) - h.Handler(petSetColumns, printPetSet) - h.Handler(petSetColumns, printPetSetList) + h.Handler(statefulSetColumns, printStatefulSet) + h.Handler(statefulSetColumns, printStatefulSetList) h.Handler(endpointColumns, printEndpoints) h.Handler(endpointColumns, printEndpointsList) h.Handler(nodeColumns, printNode) @@ -1227,7 +1227,7 @@ func printIngressList(ingressList *extensions.IngressList, w io.Writer, options return nil } -func printPetSet(ps *apps.PetSet, w io.Writer, options PrintOptions) error { +func printStatefulSet(ps *apps.StatefulSet, w io.Writer, options PrintOptions) error { name := formatResourceName(options.Kind, ps.Name, options.WithKind) namespace := ps.Namespace @@ -1266,9 +1266,9 @@ func printPetSet(ps *apps.PetSet, w io.Writer, options PrintOptions) error { return nil } -func printPetSetList(petSetList *apps.PetSetList, w io.Writer, options PrintOptions) error { - for _, ps := range petSetList.Items { - if err := printPetSet(&ps, w, options); err != nil { +func printStatefulSetList(statefulSetList *apps.StatefulSetList, w io.Writer, options PrintOptions) error { + for _, ps := range statefulSetList.Items { + if err := printStatefulSet(&ps, w, options); err != nil { return err } } diff --git a/pkg/kubectl/scale.go b/pkg/kubectl/scale.go index a79d0e52d86..f97a5d4af10 100644 --- a/pkg/kubectl/scale.go +++ b/pkg/kubectl/scale.go @@ -57,8 +57,8 @@ func ScalerFor(kind unversioned.GroupKind, c internalclientset.Interface) (Scale return &ReplicaSetScaler{c.Extensions()}, nil case extensions.Kind("Job"), batch.Kind("Job"): return &JobScaler{c.Batch()}, nil // Either kind of job can be scaled with Batch interface. - case apps.Kind("PetSet"): - return &PetSetScaler{c.Apps()}, nil + case apps.Kind("StatefulSet"): + return &StatefulSetScaler{c.Apps()}, nil case extensions.Kind("Deployment"): return &DeploymentScaler{c.Extensions()}, nil } @@ -137,8 +137,8 @@ func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name s } } -// ValidatePetSet ensures that the preconditions match. Returns nil if they are valid, an error otherwise. -func (precondition *ScalePrecondition) ValidatePetSet(ps *apps.PetSet) error { +// ValidateStatefulSet ensures that the preconditions match. Returns nil if they are valid, an error otherwise. +func (precondition *ScalePrecondition) ValidateStatefulSet(ps *apps.StatefulSet) error { if precondition.Size != -1 && int(ps.Spec.Replicas) != precondition.Size { return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(ps.Spec.Replicas))} } @@ -328,34 +328,34 @@ func (precondition *ScalePrecondition) ValidateJob(job *batch.Job) error { return nil } -type PetSetScaler struct { - c appsclient.PetSetsGetter +type StatefulSetScaler struct { + c appsclient.StatefulSetsGetter } // ScaleSimple does a simple one-shot attempt at scaling. It returns the -// resourceVersion of the petset if the update is successful. -func (scaler *PetSetScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { - ps, err := scaler.c.PetSets(namespace).Get(name) +// resourceVersion of the statefulset if the update is successful. +func (scaler *StatefulSetScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { + ps, err := scaler.c.StatefulSets(namespace).Get(name) if err != nil { return "", ScaleError{ScaleGetFailure, "Unknown", err} } if preconditions != nil { - if err := preconditions.ValidatePetSet(ps); err != nil { + if err := preconditions.ValidateStatefulSet(ps); err != nil { return "", err } } ps.Spec.Replicas = int32(newSize) - updatedPetSet, err := scaler.c.PetSets(namespace).Update(ps) + updatedStatefulSet, err := scaler.c.StatefulSets(namespace).Update(ps) if err != nil { if errors.IsConflict(err) { return "", ScaleError{ScaleUpdateConflictFailure, ps.ResourceVersion, err} } return "", ScaleError{ScaleUpdateFailure, ps.ResourceVersion, err} } - return updatedPetSet.ResourceVersion, nil + return updatedStatefulSet.ResourceVersion, nil } -func (scaler *PetSetScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { +func (scaler *StatefulSetScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { if preconditions == nil { preconditions = &ScalePrecondition{-1, ""} } @@ -368,11 +368,11 @@ func (scaler *PetSetScaler) Scale(namespace, name string, newSize uint, precondi return err } if waitForReplicas != nil { - job, err := scaler.c.PetSets(namespace).Get(name) + job, err := scaler.c.StatefulSets(namespace).Get(name) if err != nil { return err } - err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.PetSetHasDesiredPets(scaler.c, job)) + err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.StatefulSetHasDesiredPets(scaler.c, job)) if err == wait.ErrWaitTimeout { return fmt.Errorf("timed out waiting for %q to be synced", name) } diff --git a/pkg/kubectl/stop.go b/pkg/kubectl/stop.go index d252705f351..90eea0c9cbc 100644 --- a/pkg/kubectl/stop.go +++ b/pkg/kubectl/stop.go @@ -87,8 +87,8 @@ func ReaperFor(kind unversioned.GroupKind, c internalclientset.Interface) (Reape case extensions.Kind("Job"), batch.Kind("Job"): return &JobReaper{c.Batch(), c.Core(), Interval, Timeout}, nil - case apps.Kind("PetSet"): - return &PetSetReaper{c.Apps(), c.Core(), Interval, Timeout}, nil + case apps.Kind("StatefulSet"): + return &StatefulSetReaper{c.Apps(), c.Core(), Interval, Timeout}, nil case extensions.Kind("Deployment"): return &DeploymentReaper{c.Extensions(), c.Extensions(), Interval, Timeout}, nil @@ -129,8 +129,8 @@ type PodReaper struct { type ServiceReaper struct { client coreclient.ServicesGetter } -type PetSetReaper struct { - client appsclient.PetSetsGetter +type StatefulSetReaper struct { + client appsclient.StatefulSetsGetter podClient coreclient.PodsGetter pollInterval, timeout time.Duration } @@ -325,10 +325,10 @@ func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duratio return reaper.client.DaemonSets(namespace).Delete(name, nil) } -func (reaper *PetSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - petsets := reaper.client.PetSets(namespace) - scaler := &PetSetScaler{reaper.client} - ps, err := petsets.Get(name) +func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { + statefulsets := reaper.client.StatefulSets(namespace) + scaler := &StatefulSetScaler{reaper.client} + ps, err := statefulsets.Get(name) if err != nil { return err } @@ -337,13 +337,13 @@ func (reaper *PetSetReaper) Stop(namespace, name string, timeout time.Duration, timeout = Timeout + time.Duration(10*numPets)*time.Second } retry := NewRetryParams(reaper.pollInterval, reaper.timeout) - waitForPetSet := NewRetryParams(reaper.pollInterval, reaper.timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForPetSet); err != nil { + waitForStatefulSet := NewRetryParams(reaper.pollInterval, reaper.timeout) + if err = scaler.Scale(namespace, name, 0, nil, retry, waitForStatefulSet); err != nil { return err } - // TODO: This shouldn't be needed, see corresponding TODO in PetSetHasDesiredPets. - // PetSet should track generation number. + // TODO: This shouldn't be needed, see corresponding TODO in StatefulSetHasDesiredPets. + // StatefulSet should track generation number. pods := reaper.podClient.Pods(namespace) selector, _ := unversioned.LabelSelectorAsSelector(ps.Spec.Selector) options := api.ListOptions{LabelSelector: selector} @@ -365,8 +365,8 @@ func (reaper *PetSetReaper) Stop(namespace, name string, timeout time.Duration, } // TODO: Cleanup volumes? We don't want to accidentally delete volumes from - // stop, so just leave this up to the petset. - return petsets.Delete(name, nil) + // stop, so just leave this up to the statefulset. + return statefulsets.Delete(name, nil) } func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { diff --git a/pkg/registry/apps/petset/etcd/etcd.go b/pkg/registry/apps/petset/etcd/etcd.go index bf2efd42705..634cbda3bfa 100644 --- a/pkg/registry/apps/petset/etcd/etcd.go +++ b/pkg/registry/apps/petset/etcd/etcd.go @@ -37,11 +37,11 @@ type REST struct { func NewREST(opts generic.RESTOptions) (*REST, *StatusREST) { prefix := "/" + opts.ResourcePrefix - newListFunc := func() runtime.Object { return &appsapi.PetSetList{} } + newListFunc := func() runtime.Object { return &appsapi.StatefulSetList{} } storageInterface, dFunc := opts.Decorator( opts.StorageConfig, - cachesize.GetWatchCacheSizeByResource(cachesize.PetSet), - &appsapi.PetSet{}, + cachesize.GetWatchCacheSizeByResource(cachesize.StatefulSet), + &appsapi.StatefulSet{}, prefix, petset.Strategy, newListFunc, @@ -49,27 +49,27 @@ func NewREST(opts generic.RESTOptions) (*REST, *StatusREST) { ) store := ®istry.Store{ - NewFunc: func() runtime.Object { return &appsapi.PetSet{} }, + NewFunc: func() runtime.Object { return &appsapi.StatefulSet{} }, // NewListFunc returns an object capable of storing results of an etcd list. NewListFunc: newListFunc, - // Produces a petSet that etcd understands, to the root of the resource + // Produces a statefulSet that etcd understands, to the root of the resource // by combining the namespace in the context with the given prefix KeyRootFunc: func(ctx api.Context) string { return registry.NamespaceKeyRootFunc(ctx, prefix) }, - // Produces a petSet that etcd understands, to the resource by combining + // Produces a statefulSet that etcd understands, to the resource by combining // the namespace in the context with the given prefix KeyFunc: func(ctx api.Context, name string) (string, error) { return registry.NamespaceKeyFunc(ctx, prefix, name) }, // Retrieve the name field of a replication controller ObjectNameFunc: func(obj runtime.Object) (string, error) { - return obj.(*appsapi.PetSet).Name, nil + return obj.(*appsapi.StatefulSet).Name, nil }, // Used to match objects based on labels/fields for list and watch - PredicateFunc: petset.MatchPetSet, - QualifiedResource: appsapi.Resource("petsets"), + PredicateFunc: petset.MatchStatefulSet, + QualifiedResource: appsapi.Resource("statefulsets"), EnableGarbageCollection: opts.EnableGarbageCollection, DeleteCollectionWorkers: opts.DeleteCollectionWorkers, @@ -88,13 +88,13 @@ func NewREST(opts generic.RESTOptions) (*REST, *StatusREST) { return &REST{store}, &StatusREST{store: &statusStore} } -// StatusREST implements the REST endpoint for changing the status of an petSet +// StatusREST implements the REST endpoint for changing the status of an statefulSet type StatusREST struct { store *registry.Store } func (r *StatusREST) New() runtime.Object { - return &appsapi.PetSet{} + return &appsapi.StatefulSet{} } // Get retrieves the object from the storage. It is required to support Patch. diff --git a/pkg/registry/apps/petset/etcd/etcd_test.go b/pkg/registry/apps/petset/etcd/etcd_test.go index 38ef4033769..090c79b755b 100644 --- a/pkg/registry/apps/petset/etcd/etcd_test.go +++ b/pkg/registry/apps/petset/etcd/etcd_test.go @@ -33,30 +33,30 @@ import ( func newStorage(t *testing.T) (*REST, *StatusREST, *etcdtesting.EtcdTestServer) { etcdStorage, server := registrytest.NewEtcdStorage(t, apps.GroupName) - restOptions := generic.RESTOptions{StorageConfig: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1, ResourcePrefix: "petsets"} - petSetStorage, statusStorage := NewREST(restOptions) - return petSetStorage, statusStorage, server + restOptions := generic.RESTOptions{StorageConfig: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1, ResourcePrefix: "statefulsets"} + statefulSetStorage, statusStorage := NewREST(restOptions) + return statefulSetStorage, statusStorage, server } -// createPetSet is a helper function that returns a PetSet with the updated resource version. -func createPetSet(storage *REST, ps apps.PetSet, t *testing.T) (apps.PetSet, error) { +// createStatefulSet is a helper function that returns a StatefulSet with the updated resource version. +func createStatefulSet(storage *REST, ps apps.StatefulSet, t *testing.T) (apps.StatefulSet, error) { ctx := api.WithNamespace(api.NewContext(), ps.Namespace) obj, err := storage.Create(ctx, &ps) if err != nil { - t.Errorf("Failed to create PetSet, %v", err) + t.Errorf("Failed to create StatefulSet, %v", err) } - newPS := obj.(*apps.PetSet) + newPS := obj.(*apps.StatefulSet) return *newPS, nil } -func validNewPetSet() *apps.PetSet { - return &apps.PetSet{ +func validNewStatefulSet() *apps.StatefulSet { + return &apps.StatefulSet{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, Labels: map[string]string{"a": "b"}, }, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"a": "b"}}, Template: api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ @@ -76,7 +76,7 @@ func validNewPetSet() *apps.PetSet { }, Replicas: 7, }, - Status: apps.PetSetStatus{}, + Status: apps.StatefulSetStatus{}, } } @@ -85,7 +85,7 @@ func TestCreate(t *testing.T) { defer server.Terminate(t) defer storage.Store.DestroyFunc() test := registrytest.New(t, storage.Store) - ps := validNewPetSet() + ps := validNewStatefulSet() ps.ObjectMeta = api.ObjectMeta{} test.TestCreate( // valid @@ -101,17 +101,17 @@ func TestStatusUpdate(t *testing.T) { defer server.Terminate(t) defer storage.Store.DestroyFunc() ctx := api.WithNamespace(api.NewContext(), api.NamespaceDefault) - key := etcdtest.AddPrefix("/petsets/" + api.NamespaceDefault + "/foo") - validPetSet := validNewPetSet() - if err := storage.Storage.Create(ctx, key, validPetSet, nil, 0); err != nil { + key := etcdtest.AddPrefix("/statefulsets/" + api.NamespaceDefault + "/foo") + validStatefulSet := validNewStatefulSet() + if err := storage.Storage.Create(ctx, key, validStatefulSet, nil, 0); err != nil { t.Fatalf("unexpected error: %v", err) } - update := apps.PetSet{ - ObjectMeta: validPetSet.ObjectMeta, - Spec: apps.PetSetSpec{ + update := apps.StatefulSet{ + ObjectMeta: validStatefulSet.ObjectMeta, + Spec: apps.StatefulSetSpec{ Replicas: 7, }, - Status: apps.PetSetStatus{ + Status: apps.StatefulSetStatus{ Replicas: 7, }, } @@ -124,7 +124,7 @@ func TestStatusUpdate(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - ps := obj.(*apps.PetSet) + ps := obj.(*apps.StatefulSet) if ps.Spec.Replicas != 7 { t.Errorf("we expected .spec.replicas to not be updated but it was updated to %v", ps.Spec.Replicas) } @@ -138,7 +138,7 @@ func TestGet(t *testing.T) { defer server.Terminate(t) defer storage.Store.DestroyFunc() test := registrytest.New(t, storage.Store) - test.TestGet(validNewPetSet()) + test.TestGet(validNewStatefulSet()) } func TestList(t *testing.T) { @@ -146,7 +146,7 @@ func TestList(t *testing.T) { defer server.Terminate(t) defer storage.Store.DestroyFunc() test := registrytest.New(t, storage.Store) - test.TestList(validNewPetSet()) + test.TestList(validNewStatefulSet()) } func TestDelete(t *testing.T) { @@ -154,7 +154,7 @@ func TestDelete(t *testing.T) { defer server.Terminate(t) defer storage.Store.DestroyFunc() test := registrytest.New(t, storage.Store) - test.TestDelete(validNewPetSet()) + test.TestDelete(validNewStatefulSet()) } func TestWatch(t *testing.T) { @@ -163,7 +163,7 @@ func TestWatch(t *testing.T) { defer storage.Store.DestroyFunc() test := registrytest.New(t, storage.Store) test.TestWatch( - validNewPetSet(), + validNewStatefulSet(), // matching labels []labels.Set{ {"a": "b"}, diff --git a/pkg/registry/apps/petset/strategy.go b/pkg/registry/apps/petset/strategy.go index 2a87a8491f0..e76a3ac7fea 100644 --- a/pkg/registry/apps/petset/strategy.go +++ b/pkg/registry/apps/petset/strategy.go @@ -31,109 +31,109 @@ import ( "k8s.io/kubernetes/pkg/util/validation/field" ) -// petSetStrategy implements verification logic for Replication PetSets. -type petSetStrategy struct { +// statefulSetStrategy implements verification logic for Replication StatefulSets. +type statefulSetStrategy struct { runtime.ObjectTyper api.NameGenerator } -// Strategy is the default logic that applies when creating and updating Replication PetSet objects. -var Strategy = petSetStrategy{api.Scheme, api.SimpleNameGenerator} +// Strategy is the default logic that applies when creating and updating Replication StatefulSet objects. +var Strategy = statefulSetStrategy{api.Scheme, api.SimpleNameGenerator} -// NamespaceScoped returns true because all PetSet' need to be within a namespace. -func (petSetStrategy) NamespaceScoped() bool { +// NamespaceScoped returns true because all StatefulSet' need to be within a namespace. +func (statefulSetStrategy) NamespaceScoped() bool { return true } -// PrepareForCreate clears the status of an PetSet before creation. -func (petSetStrategy) PrepareForCreate(ctx api.Context, obj runtime.Object) { - petSet := obj.(*apps.PetSet) +// PrepareForCreate clears the status of an StatefulSet before creation. +func (statefulSetStrategy) PrepareForCreate(ctx api.Context, obj runtime.Object) { + statefulSet := obj.(*apps.StatefulSet) // create cannot set status - petSet.Status = apps.PetSetStatus{} + statefulSet.Status = apps.StatefulSetStatus{} - petSet.Generation = 1 + statefulSet.Generation = 1 } // PrepareForUpdate clears fields that are not allowed to be set by end users on update. -func (petSetStrategy) PrepareForUpdate(ctx api.Context, obj, old runtime.Object) { - newPetSet := obj.(*apps.PetSet) - oldPetSet := old.(*apps.PetSet) +func (statefulSetStrategy) PrepareForUpdate(ctx api.Context, obj, old runtime.Object) { + newStatefulSet := obj.(*apps.StatefulSet) + oldStatefulSet := old.(*apps.StatefulSet) // Update is not allowed to set status - newPetSet.Status = oldPetSet.Status + newStatefulSet.Status = oldStatefulSet.Status // Any changes to the spec increment the generation number, any changes to the // status should reflect the generation number of the corresponding object. // See api.ObjectMeta description for more information on Generation. - if !reflect.DeepEqual(oldPetSet.Spec, newPetSet.Spec) { - newPetSet.Generation = oldPetSet.Generation + 1 + if !reflect.DeepEqual(oldStatefulSet.Spec, newStatefulSet.Spec) { + newStatefulSet.Generation = oldStatefulSet.Generation + 1 } } -// Validate validates a new PetSet. -func (petSetStrategy) Validate(ctx api.Context, obj runtime.Object) field.ErrorList { - petSet := obj.(*apps.PetSet) - return validation.ValidatePetSet(petSet) +// Validate validates a new StatefulSet. +func (statefulSetStrategy) Validate(ctx api.Context, obj runtime.Object) field.ErrorList { + statefulSet := obj.(*apps.StatefulSet) + return validation.ValidateStatefulSet(statefulSet) } // Canonicalize normalizes the object after validation. -func (petSetStrategy) Canonicalize(obj runtime.Object) { +func (statefulSetStrategy) Canonicalize(obj runtime.Object) { } -// AllowCreateOnUpdate is false for PetSet; this means POST is needed to create one. -func (petSetStrategy) AllowCreateOnUpdate() bool { +// AllowCreateOnUpdate is false for StatefulSet; this means POST is needed to create one. +func (statefulSetStrategy) AllowCreateOnUpdate() bool { return false } // ValidateUpdate is the default update validation for an end user. -func (petSetStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) field.ErrorList { - validationErrorList := validation.ValidatePetSet(obj.(*apps.PetSet)) - updateErrorList := validation.ValidatePetSetUpdate(obj.(*apps.PetSet), old.(*apps.PetSet)) +func (statefulSetStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) field.ErrorList { + validationErrorList := validation.ValidateStatefulSet(obj.(*apps.StatefulSet)) + updateErrorList := validation.ValidateStatefulSetUpdate(obj.(*apps.StatefulSet), old.(*apps.StatefulSet)) return append(validationErrorList, updateErrorList...) } -// AllowUnconditionalUpdate is the default update policy for PetSet objects. -func (petSetStrategy) AllowUnconditionalUpdate() bool { +// AllowUnconditionalUpdate is the default update policy for StatefulSet objects. +func (statefulSetStrategy) AllowUnconditionalUpdate() bool { return true } -// PetSetToSelectableFields returns a field set that represents the object. -func PetSetToSelectableFields(petSet *apps.PetSet) fields.Set { - return generic.ObjectMetaFieldsSet(&petSet.ObjectMeta, true) +// StatefulSetToSelectableFields returns a field set that represents the object. +func StatefulSetToSelectableFields(statefulSet *apps.StatefulSet) fields.Set { + return generic.ObjectMetaFieldsSet(&statefulSet.ObjectMeta, true) } -// MatchPetSet is the filter used by the generic etcd backend to watch events +// MatchStatefulSet is the filter used by the generic etcd backend to watch events // from etcd to clients of the apiserver only interested in specific labels/fields. -func MatchPetSet(label labels.Selector, field fields.Selector) storage.SelectionPredicate { +func MatchStatefulSet(label labels.Selector, field fields.Selector) storage.SelectionPredicate { return storage.SelectionPredicate{ Label: label, Field: field, GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { - petSet, ok := obj.(*apps.PetSet) + statefulSet, ok := obj.(*apps.StatefulSet) if !ok { - return nil, nil, fmt.Errorf("given object is not an PetSet.") + return nil, nil, fmt.Errorf("given object is not an StatefulSet.") } - return labels.Set(petSet.ObjectMeta.Labels), PetSetToSelectableFields(petSet), nil + return labels.Set(statefulSet.ObjectMeta.Labels), StatefulSetToSelectableFields(statefulSet), nil }, } } -type petSetStatusStrategy struct { - petSetStrategy +type statefulSetStatusStrategy struct { + statefulSetStrategy } -var StatusStrategy = petSetStatusStrategy{Strategy} +var StatusStrategy = statefulSetStatusStrategy{Strategy} // PrepareForUpdate clears fields that are not allowed to be set by end users on update of status -func (petSetStatusStrategy) PrepareForUpdate(ctx api.Context, obj, old runtime.Object) { - newPetSet := obj.(*apps.PetSet) - oldPetSet := old.(*apps.PetSet) +func (statefulSetStatusStrategy) PrepareForUpdate(ctx api.Context, obj, old runtime.Object) { + newStatefulSet := obj.(*apps.StatefulSet) + oldStatefulSet := old.(*apps.StatefulSet) // status changes are not allowed to update spec - newPetSet.Spec = oldPetSet.Spec + newStatefulSet.Spec = oldStatefulSet.Spec } // ValidateUpdate is the default update validation for an end user updating status -func (petSetStatusStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) field.ErrorList { +func (statefulSetStatusStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) field.ErrorList { // TODO: Validate status updates. - return validation.ValidatePetSetStatusUpdate(obj.(*apps.PetSet), old.(*apps.PetSet)) + return validation.ValidateStatefulSetStatusUpdate(obj.(*apps.StatefulSet), old.(*apps.StatefulSet)) } diff --git a/pkg/registry/apps/petset/strategy_test.go b/pkg/registry/apps/petset/strategy_test.go index d4fd81805ff..ee981939adf 100644 --- a/pkg/registry/apps/petset/strategy_test.go +++ b/pkg/registry/apps/petset/strategy_test.go @@ -24,13 +24,13 @@ import ( "k8s.io/kubernetes/pkg/apis/apps" ) -func TestPetSetStrategy(t *testing.T) { +func TestStatefulSetStrategy(t *testing.T) { ctx := api.NewDefaultContext() if !Strategy.NamespaceScoped() { - t.Errorf("PetSet must be namespace scoped") + t.Errorf("StatefulSet must be namespace scoped") } if Strategy.AllowCreateOnUpdate() { - t.Errorf("PetSet should not allow create on update") + t.Errorf("StatefulSet should not allow create on update") } validSelector := map[string]string{"a": "b"} @@ -46,18 +46,18 @@ func TestPetSetStrategy(t *testing.T) { }, }, } - ps := &apps.PetSet{ + ps := &apps.StatefulSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplate.Template, }, - Status: apps.PetSetStatus{Replicas: 3}, + Status: apps.StatefulSetStatus{Replicas: 3}, } Strategy.PrepareForCreate(ctx, ps) if ps.Status.Replicas != 0 { - t.Error("PetSet should not allow setting status.pets on create") + t.Error("StatefulSet should not allow setting status.replicas on create") } errs := Strategy.Validate(ctx, ps) if len(errs) != 0 { @@ -65,35 +65,35 @@ func TestPetSetStrategy(t *testing.T) { } // Just Spec.Replicas is allowed to change - validPs := &apps.PetSet{ + validPs := &apps.StatefulSet{ ObjectMeta: api.ObjectMeta{Name: ps.Name, Namespace: ps.Namespace, ResourceVersion: "1", Generation: 1}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: ps.Spec.Selector, Template: validPodTemplate.Template, }, - Status: apps.PetSetStatus{Replicas: 4}, + Status: apps.StatefulSetStatus{Replicas: 4}, } Strategy.PrepareForUpdate(ctx, validPs, ps) errs = Strategy.ValidateUpdate(ctx, validPs, ps) if len(errs) != 0 { - t.Errorf("Updating spec.Replicas is allowed on a petset: %v", errs) + t.Errorf("Updating spec.Replicas is allowed on a statefulset: %v", errs) } validPs.Spec.Selector = &unversioned.LabelSelector{MatchLabels: map[string]string{"a": "bar"}} Strategy.PrepareForUpdate(ctx, validPs, ps) errs = Strategy.ValidateUpdate(ctx, validPs, ps) if len(errs) == 0 { - t.Errorf("Expected a validation error since updates are disallowed on petsets.") + t.Errorf("Expected a validation error since updates are disallowed on statefulsets.") } } -func TestPetSetStatusStrategy(t *testing.T) { +func TestStatefulSetStatusStrategy(t *testing.T) { ctx := api.NewDefaultContext() if !StatusStrategy.NamespaceScoped() { - t.Errorf("PetSet must be namespace scoped") + t.Errorf("StatefulSet must be namespace scoped") } if StatusStrategy.AllowCreateOnUpdate() { - t.Errorf("PetSet should not allow create on update") + t.Errorf("StatefulSet should not allow create on update") } validSelector := map[string]string{"a": "b"} validPodTemplate := api.PodTemplate{ @@ -108,34 +108,34 @@ func TestPetSetStatusStrategy(t *testing.T) { }, }, } - oldPS := &apps.PetSet{ + oldPS := &apps.StatefulSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault, ResourceVersion: "10"}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Replicas: 3, Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplate.Template, }, - Status: apps.PetSetStatus{ + Status: apps.StatefulSetStatus{ Replicas: 1, }, } - newPS := &apps.PetSet{ + newPS := &apps.StatefulSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault, ResourceVersion: "9"}, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Replicas: 1, Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, Template: validPodTemplate.Template, }, - Status: apps.PetSetStatus{ + Status: apps.StatefulSetStatus{ Replicas: 2, }, } StatusStrategy.PrepareForUpdate(ctx, newPS, oldPS) if newPS.Status.Replicas != 2 { - t.Errorf("PetSet status updates should allow change of pets: %v", newPS.Status.Replicas) + t.Errorf("StatefulSet status updates should allow change of pods: %v", newPS.Status.Replicas) } if newPS.Spec.Replicas != 3 { - t.Errorf("PetSet status updates should not clobber spec: %v", newPS.Spec) + t.Errorf("StatefulSet status updates should not clobber spec: %v", newPS.Spec) } errs := StatusStrategy.ValidateUpdate(ctx, newPS, oldPS) if len(errs) != 0 { diff --git a/pkg/registry/apps/rest/storage_apps.go b/pkg/registry/apps/rest/storage_apps.go index 2b71436d9bb..c6bd946f260 100644 --- a/pkg/registry/apps/rest/storage_apps.go +++ b/pkg/registry/apps/rest/storage_apps.go @@ -21,7 +21,7 @@ import ( "k8s.io/kubernetes/pkg/apis/apps" appsapiv1alpha1 "k8s.io/kubernetes/pkg/apis/apps/v1alpha1" "k8s.io/kubernetes/pkg/genericapiserver" - petsetetcd "k8s.io/kubernetes/pkg/registry/apps/petset/etcd" + statefulsetetcd "k8s.io/kubernetes/pkg/registry/apps/petset/etcd" ) type RESTStorageProvider struct{} @@ -43,10 +43,10 @@ func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource genericapis version := appsapiv1alpha1.SchemeGroupVersion storage := map[string]rest.Storage{} - if apiResourceConfigSource.ResourceEnabled(version.WithResource("petsets")) { - petsetStorage, petsetStatusStorage := petsetetcd.NewREST(restOptionsGetter(apps.Resource("petsets"))) - storage["petsets"] = petsetStorage - storage["petsets/status"] = petsetStatusStorage + if apiResourceConfigSource.ResourceEnabled(version.WithResource("statefulsets")) { + statefulsetStorage, statefulsetStatusStorage := statefulsetetcd.NewREST(restOptionsGetter(apps.Resource("statefulsets"))) + storage["statefulsets"] = statefulsetStorage + storage["statefulsets/status"] = statefulsetStatusStorage } return storage } diff --git a/pkg/registry/cachesize/cachesize.go b/pkg/registry/cachesize/cachesize.go index 1685e7a5b34..9ab98b7b3d8 100644 --- a/pkg/registry/cachesize/cachesize.go +++ b/pkg/registry/cachesize/cachesize.go @@ -39,7 +39,7 @@ const ( HorizontalPodAutoscalers Resource = "horizontalpodautoscalers" Ingress Resource = "ingress" PodDisruptionBudget Resource = "poddisruptionbudgets" - PetSet Resource = "petset" + StatefulSet Resource = "statefulset" Jobs Resource = "jobs" LimitRanges Resource = "limitranges" Namespaces Resource = "namespaces" diff --git a/pkg/volume/util.go b/pkg/volume/util.go index 21d222f3bfc..a5ff196114d 100644 --- a/pkg/volume/util.go +++ b/pkg/volume/util.go @@ -274,7 +274,7 @@ func GetPath(mounter Mounter) (string, error) { // ChooseZone implements our heuristics for choosing a zone for volume creation based on the volume name // Volumes are generally round-robin-ed across all active zones, using the hash of the PVC Name. // However, if the PVCName ends with `-`, we will hash the prefix, and then add the integer to the hash. -// This means that a PetSet's volumes (`claimname-petsetname-id`) will spread across available zones, +// This means that a StatefulSet's volumes (`claimname-statefulsetname-id`) will spread across available zones, // assuming the id values are consecutive. func ChooseZoneForVolume(zones sets.String, pvcName string) string { // We create the volume in a zone determined by the name @@ -290,8 +290,8 @@ func ChooseZoneForVolume(zones sets.String, pvcName string) string { } else { hashString := pvcName - // Heuristic to make sure that volumes in a PetSet are spread across zones - // PetSet PVCs are (currently) named ClaimName-PetSetName-Id, + // Heuristic to make sure that volumes in a StatefulSet are spread across zones + // StatefulSet PVCs are (currently) named ClaimName-StatefulSetName-Id, // where Id is an integer index lastDash := strings.LastIndexByte(pvcName, '-') if lastDash != -1 { @@ -302,7 +302,7 @@ func ChooseZoneForVolume(zones sets.String, pvcName string) string { index = uint32(petID) // We still hash the volume name, but only the base hashString = pvcName[:lastDash] - glog.V(2).Infof("Detected PetSet-style volume name %q; index=%d", pvcName, index) + glog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index) } } @@ -314,7 +314,7 @@ func ChooseZoneForVolume(zones sets.String, pvcName string) string { // Zones.List returns zones in a consistent order (sorted) // We do have a potential failure case where volumes will not be properly spread, - // if the set of zones changes during PetSet volume creation. However, this is + // if the set of zones changes during StatefulSet volume creation. However, this is // probably relatively unlikely because we expect the set of zones to be essentially // static for clusters. // Hopefully we can address this problem if/when we do full scheduler integration of diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index 89bde5eebb4..7d27c30e1fb 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -82,7 +82,7 @@ func ClusterRoles() []rbac.ClusterRole { rbac.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(), rbac.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(appsGroup).Resources("petsets").RuleOrDie(), + rbac.NewRule(ReadWrite...).Groups(appsGroup).Resources("statefulsets").RuleOrDie(), rbac.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), @@ -112,7 +112,7 @@ func ClusterRoles() []rbac.ClusterRole { rbac.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(), rbac.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(appsGroup).Resources("petsets").RuleOrDie(), + rbac.NewRule(ReadWrite...).Groups(appsGroup).Resources("statefulsets").RuleOrDie(), rbac.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), @@ -135,7 +135,7 @@ func ClusterRoles() []rbac.ClusterRole { // indicator of which namespaces you have access to. rbac.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(), - rbac.NewRule(Read...).Groups(appsGroup).Resources("petsets").RuleOrDie(), + rbac.NewRule(Read...).Groups(appsGroup).Resources("statefulsets").RuleOrDie(), rbac.NewRule(Read...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), diff --git a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go index cc73967e51a..cf34528c96d 100644 --- a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -37,7 +37,7 @@ const ( // GCE instances can have up to 16 PD volumes attached. DefaultMaxGCEPDVolumes = 16 ClusterAutoscalerProvider = "ClusterAutoscalerProvider" - PetSetKind = "PetSet" + StatefulSetKind = "StatefulSet" ) // getMaxVols checks the max PD volumes environment variable, otherwise returning a default value @@ -239,7 +239,7 @@ func GetEquivalencePod(pod *api.Pod) interface{} { func isValidControllerKind(kind string) bool { switch kind { // list of kinds that we cannot handle - case PetSetKind: + case StatefulSetKind: return false default: return true diff --git a/test/e2e/examples.go b/test/e2e/examples.go index c65ffa747a4..badb0bc0bba 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -244,8 +244,8 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { }) }) - framework.KubeDescribe("CassandraPetSet", func() { - It("should create petset", func() { + framework.KubeDescribe("CassandraStatefulSet", func() { + It("should create statefulset", func() { mkpath := func(file string) string { return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/cassandra", file) } @@ -258,9 +258,9 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { output := strings.Replace(string(input), "cassandra-0.cassandra.default.svc.cluster.local", "cassandra-0.cassandra."+ns+".svc.cluster.local", -1) - petSetYaml := "/tmp/cassandra-petset.yaml" + statefulsetYaml := "/tmp/cassandra-petset.yaml" - err = ioutil.WriteFile(petSetYaml, []byte(output), 0644) + err = ioutil.WriteFile(statefulsetYaml, []byte(output), 0644) Expect(err).NotTo(HaveOccurred()) By("Starting the cassandra service") @@ -269,21 +269,21 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { err = framework.WaitForService(c, ns, "cassandra", true, framework.Poll, framework.ServiceRespondingTimeout) Expect(err).NotTo(HaveOccurred()) - // Create an PetSet with n nodes in it. Each node will then be verified. - By("Creating a Cassandra PetSet") + // Create an StatefulSet with n nodes in it. Each node will then be verified. + By("Creating a Cassandra StatefulSet") - framework.RunKubectlOrDie("create", "-f", petSetYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", statefulsetYaml, nsFlag) - petsetPoll := 30 * time.Second - petsetTimeout := 10 * time.Minute + statefulsetPoll := 30 * time.Second + statefulsetTimeout := 10 * time.Minute // TODO - parse this number out of the yaml numPets := 3 label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"})) - err = wait.PollImmediate(petsetPoll, petsetTimeout, + err = wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { podList, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: label}) if err != nil { - return false, fmt.Errorf("Unable to get list of pods in petset %s", label) + return false, fmt.Errorf("Unable to get list of pods in statefulset %s", label) } ExpectNoError(err) if len(podList.Items) < numPets { @@ -312,8 +312,8 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { framework.Failf("Cassandra pod ip %s is not reporting Up and Normal 'UN' via nodetool status", pod.Status.PodIP) } }) - // using out of petset e2e as deleting pvc is a pain - deleteAllPetSets(c, ns) + // using out of statefulset e2e as deleting pvc is a pain + deleteAllStatefulSets(c, ns) }) }) diff --git a/test/e2e/monitoring.go b/test/e2e/monitoring.go index 11205d3ff47..d4115d58f1f 100644 --- a/test/e2e/monitoring.go +++ b/test/e2e/monitoring.go @@ -110,7 +110,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, if err != nil { return nil, err } - psList, err := c.Apps().PetSets(api.NamespaceSystem).List(options) + psList, err := c.Apps().StatefulSets(api.NamespaceSystem).List(options) if err != nil { return nil, err } diff --git a/test/e2e/petset.go b/test/e2e/petset.go index 1131fccd8ba..6a66eceeac8 100644 --- a/test/e2e/petset.go +++ b/test/e2e/petset.go @@ -46,15 +46,15 @@ import ( ) const ( - petsetPoll = 10 * time.Second + statefulsetPoll = 10 * time.Second // Some pets install base packages via wget - petsetTimeout = 10 * time.Minute + statefulsetTimeout = 10 * time.Minute // Timeout for pet pods to change state petPodTimeout = 5 * time.Minute zookeeperManifestPath = "test/e2e/testing-manifests/petset/zookeeper" mysqlGaleraManifestPath = "test/e2e/testing-manifests/petset/mysql-galera" redisManifestPath = "test/e2e/testing-manifests/petset/redis" - // Should the test restart petset clusters? + // Should the test restart statefulset clusters? // TODO: enable when we've productionzed bringup of pets in this e2e. restartCluster = false @@ -65,7 +65,7 @@ const ( // Time: 25m, slow by design. // GCE Quota requirements: 3 pds, one per pet manifest declared above. // GCE Api requirements: nodes and master need storage r/w permissions. -var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() { +var _ = framework.KubeDescribe("StatefulSet [Slow] [Feature:PetSet]", func() { options := framework.FrameworkOptions{ GroupVersion: &unversioned.GroupVersion{Group: apps.GroupName, Version: "v1alpha1"}, } @@ -74,19 +74,19 @@ var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() { var c clientset.Interface BeforeEach(func() { - // PetSet is in alpha, so it's disabled on some platforms. We skip this + // StatefulSet is in alpha, so it's disabled on some platforms. We skip this // test if a resource get fails on non-GCE platforms. // In theory, tests that restart pets should pass on any platform with a // dynamic volume provisioner. if !framework.ProviderIs("gce") { - framework.SkipIfMissingResource(f.ClientPool, unversioned.GroupVersionResource{Group: apps.GroupName, Version: "v1alpha1", Resource: "petsets"}, f.Namespace.Name) + framework.SkipIfMissingResource(f.ClientPool, unversioned.GroupVersionResource{Group: apps.GroupName, Version: "v1alpha1", Resource: "statefulsets"}, f.Namespace.Name) } c = f.ClientSet ns = f.Namespace.Name }) - framework.KubeDescribe("Basic PetSet functionality", func() { + framework.KubeDescribe("Basic StatefulSet functionality", func() { psName := "pet" labels := map[string]string{ "foo": "bar", @@ -105,24 +105,24 @@ var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() { if CurrentGinkgoTestDescription().Failed { dumpDebugInfo(c, ns) } - framework.Logf("Deleting all petset in ns %v", ns) - deleteAllPetSets(c, ns) + framework.Logf("Deleting all statefulset in ns %v", ns) + deleteAllStatefulSets(c, ns) }) - It("should provide basic identity [Feature:PetSet]", func() { - By("creating petset " + psName + " in namespace " + ns) + It("should provide basic identity [Feature:StatefulSet]", func() { + By("creating statefulset " + psName + " in namespace " + ns) petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}} podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}} - ps := newPetSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels) - _, err := c.Apps().PetSets(ns).Create(ps) + ps := newStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels) + _, err := c.Apps().StatefulSets(ns).Create(ps) Expect(err).NotTo(HaveOccurred()) - pst := petSetTester{c: c} + pst := statefulSetTester{c: c} By("Saturating pet set " + ps.Name) pst.saturate(ps) - By("Verifying petset mounted data directory is usable") + By("Verifying statefulset mounted data directory is usable") ExpectNoError(pst.checkMount(ps, "/data")) cmd := "echo $(hostname) > /data/hostname; sync;" @@ -133,7 +133,7 @@ var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() { pst.restart(ps) pst.saturate(ps) - By("Verifying petset mounted data directory is usable") + By("Verifying statefulset mounted data directory is usable") ExpectNoError(pst.checkMount(ps, "/data")) cmd = "if [ \"$(cat /data/hostname)\" = \"$(hostname)\" ]; then exit 0; else exit 1; fi" @@ -142,15 +142,15 @@ var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() { }) It("should handle healthy pet restarts during scale [Feature:PetSet]", func() { - By("creating petset " + psName + " in namespace " + ns) + By("creating statefulset " + psName + " in namespace " + ns) petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}} podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}} - ps := newPetSet(psName, ns, headlessSvcName, 2, petMounts, podMounts, labels) - _, err := c.Apps().PetSets(ns).Create(ps) + ps := newStatefulSet(psName, ns, headlessSvcName, 2, petMounts, podMounts, labels) + _, err := c.Apps().StatefulSets(ns).Create(ps) Expect(err).NotTo(HaveOccurred()) - pst := petSetTester{c: c} + pst := statefulSetTester{c: c} pst.waitForRunning(1, ps) @@ -173,7 +173,7 @@ var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() { By("Deleting unhealthy pet at index 1.") pst.deletePetAtIndex(1, ps) - By("Confirming all pets in petset are created.") + By("Confirming all pets in statefulset are created.") pst.saturate(ps) }) }) @@ -183,12 +183,12 @@ var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() { if CurrentGinkgoTestDescription().Failed { dumpDebugInfo(c, ns) } - framework.Logf("Deleting all petset in ns %v", ns) - deleteAllPetSets(c, ns) + framework.Logf("Deleting all statefulset in ns %v", ns) + deleteAllStatefulSets(c, ns) }) It("should creating a working zookeeper cluster [Feature:PetSet]", func() { - pst := &petSetTester{c: c} + pst := &statefulSetTester{c: c} pet := &zookeeperTester{tester: pst} By("Deploying " + pet.name()) ps := pet.deploy(ns) @@ -209,7 +209,7 @@ var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() { }) It("should creating a working redis cluster [Feature:PetSet]", func() { - pst := &petSetTester{c: c} + pst := &statefulSetTester{c: c} pet := &redisTester{tester: pst} By("Deploying " + pet.name()) ps := pet.deploy(ns) @@ -230,7 +230,7 @@ var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() { }) It("should creating a working mysql cluster [Feature:PetSet]", func() { - pst := &petSetTester{c: c} + pst := &statefulSetTester{c: c} pet := &mysqlGaleraTester{tester: pst} By("Deploying " + pet.name()) ps := pet.deploy(ns) @@ -263,7 +263,7 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func( } headlessSvcName := "test" podName := "test-pod" - petSetName := "web" + statefulSetName := "web" petPodName := "web-0" BeforeEach(func() { @@ -280,11 +280,11 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func( if CurrentGinkgoTestDescription().Failed { dumpDebugInfo(c, ns) } - By("Deleting all petset in ns " + ns) - deleteAllPetSets(c, ns) + By("Deleting all statefulset in ns " + ns) + deleteAllStatefulSets(c, ns) }) - It("should recreate evicted petset", func() { + It("should recreate evicted statefulset", func() { By("looking for a node to schedule pet set and pod") nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) node := nodes.Items[0] @@ -309,12 +309,12 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func( pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) framework.ExpectNoError(err) - By("creating petset with conflicting port in namespace " + f.Namespace.Name) - ps := newPetSet(petSetName, f.Namespace.Name, headlessSvcName, 1, nil, nil, labels) + By("creating statefulset with conflicting port in namespace " + f.Namespace.Name) + ps := newStatefulSet(statefulSetName, f.Namespace.Name, headlessSvcName, 1, nil, nil, labels) petContainer := &ps.Spec.Template.Spec.Containers[0] petContainer.Ports = append(petContainer.Ports, conflictingPort) ps.Spec.Template.Spec.NodeName = node.Name - _, err = f.ClientSet.Apps().PetSets(f.Namespace.Name).Create(ps) + _, err = f.ClientSet.Apps().StatefulSets(f.Namespace.Name).Create(ps) framework.ExpectNoError(err) By("waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name) @@ -337,7 +337,7 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func( } return true, nil } - framework.Logf("Observed pet pod in namespace: %v, name: %v, uid: %v, status phase: %v. Waiting for petset controller to delete.", + framework.Logf("Observed pet pod in namespace: %v, name: %v, uid: %v, status phase: %v. Waiting for statefulset controller to delete.", pod.Namespace, pod.Name, pod.UID, pod.Status.Phase) initialPetPodUID = pod.UID return false, nil @@ -391,23 +391,23 @@ func kubectlExecWithRetries(args ...string) (out string) { } type petTester interface { - deploy(ns string) *apps.PetSet + deploy(ns string) *apps.StatefulSet write(petIndex int, kv map[string]string) read(petIndex int, key string) string name() string } type zookeeperTester struct { - ps *apps.PetSet - tester *petSetTester + ps *apps.StatefulSet + tester *statefulSetTester } func (z *zookeeperTester) name() string { return "zookeeper" } -func (z *zookeeperTester) deploy(ns string) *apps.PetSet { - z.ps = z.tester.createPetSet(zookeeperManifestPath, ns) +func (z *zookeeperTester) deploy(ns string) *apps.StatefulSet { + z.ps = z.tester.createStatefulSet(zookeeperManifestPath, ns) return z.ps } @@ -428,8 +428,8 @@ func (z *zookeeperTester) read(petIndex int, key string) string { } type mysqlGaleraTester struct { - ps *apps.PetSet - tester *petSetTester + ps *apps.StatefulSet + tester *statefulSetTester } func (m *mysqlGaleraTester) name() string { @@ -444,13 +444,13 @@ func (m *mysqlGaleraTester) mysqlExec(cmd, ns, podName string) string { return kubectlExecWithRetries(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd) } -func (m *mysqlGaleraTester) deploy(ns string) *apps.PetSet { - m.ps = m.tester.createPetSet(mysqlGaleraManifestPath, ns) +func (m *mysqlGaleraTester) deploy(ns string) *apps.StatefulSet { + m.ps = m.tester.createStatefulSet(mysqlGaleraManifestPath, ns) - framework.Logf("Deployed petset %v, initializing database", m.ps.Name) + framework.Logf("Deployed statefulset %v, initializing database", m.ps.Name) for _, cmd := range []string{ - "create database petset;", - "use petset; create table pet (k varchar(20), v varchar(20));", + "create database statefulset;", + "use statefulset; create table pet (k varchar(20), v varchar(20));", } { framework.Logf(m.mysqlExec(cmd, ns, fmt.Sprintf("%v-0", m.ps.Name))) } @@ -460,19 +460,19 @@ func (m *mysqlGaleraTester) deploy(ns string) *apps.PetSet { func (m *mysqlGaleraTester) write(petIndex int, kv map[string]string) { name := fmt.Sprintf("%v-%d", m.ps.Name, petIndex) for k, v := range kv { - cmd := fmt.Sprintf("use petset; insert into pet (k, v) values (\"%v\", \"%v\");", k, v) + cmd := fmt.Sprintf("use statefulset; insert into pet (k, v) values (\"%v\", \"%v\");", k, v) framework.Logf(m.mysqlExec(cmd, m.ps.Namespace, name)) } } func (m *mysqlGaleraTester) read(petIndex int, key string) string { name := fmt.Sprintf("%v-%d", m.ps.Name, petIndex) - return lastLine(m.mysqlExec(fmt.Sprintf("use petset; select v from pet where k=\"%v\";", key), m.ps.Namespace, name)) + return lastLine(m.mysqlExec(fmt.Sprintf("use statefulset; select v from pet where k=\"%v\";", key), m.ps.Namespace, name)) } type redisTester struct { - ps *apps.PetSet - tester *petSetTester + ps *apps.StatefulSet + tester *statefulSetTester } func (m *redisTester) name() string { @@ -484,8 +484,8 @@ func (m *redisTester) redisExec(cmd, ns, podName string) string { return framework.RunKubectlOrDie(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd) } -func (m *redisTester) deploy(ns string) *apps.PetSet { - m.ps = m.tester.createPetSet(redisManifestPath, ns) +func (m *redisTester) deploy(ns string) *apps.StatefulSet { + m.ps = m.tester.createStatefulSet(redisManifestPath, ns) return m.ps } @@ -506,9 +506,9 @@ func lastLine(out string) string { return outLines[len(outLines)-1] } -func petSetFromManifest(fileName, ns string) *apps.PetSet { - var ps apps.PetSet - framework.Logf("Parsing petset from %v", fileName) +func statefulSetFromManifest(fileName, ns string) *apps.StatefulSet { + var ps apps.StatefulSet + framework.Logf("Parsing statefulset from %v", fileName) data, err := ioutil.ReadFile(fileName) Expect(err).NotTo(HaveOccurred()) json, err := utilyaml.ToJSON(data) @@ -524,27 +524,27 @@ func petSetFromManifest(fileName, ns string) *apps.PetSet { return &ps } -// petSetTester has all methods required to test a single petset. -type petSetTester struct { +// statefulSetTester has all methods required to test a single statefulset. +type statefulSetTester struct { c clientset.Interface } -func (p *petSetTester) createPetSet(manifestPath, ns string) *apps.PetSet { +func (p *statefulSetTester) createStatefulSet(manifestPath, ns string) *apps.StatefulSet { mkpath := func(file string) string { return filepath.Join(framework.TestContext.RepoRoot, manifestPath, file) } - ps := petSetFromManifest(mkpath("petset.yaml"), ns) + ps := statefulSetFromManifest(mkpath("petset.yaml"), ns) framework.Logf(fmt.Sprintf("creating " + ps.Name + " service")) framework.RunKubectlOrDie("create", "-f", mkpath("service.yaml"), fmt.Sprintf("--namespace=%v", ns)) - framework.Logf(fmt.Sprintf("creating petset %v/%v with %d replicas and selector %+v", ps.Namespace, ps.Name, ps.Spec.Replicas, ps.Spec.Selector)) + framework.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ps.Namespace, ps.Name, ps.Spec.Replicas, ps.Spec.Selector)) framework.RunKubectlOrDie("create", "-f", mkpath("petset.yaml"), fmt.Sprintf("--namespace=%v", ns)) p.waitForRunning(ps.Spec.Replicas, ps) return ps } -func (p *petSetTester) checkMount(ps *apps.PetSet, mountPath string) error { +func (p *statefulSetTester) checkMount(ps *apps.StatefulSet, mountPath string) error { for _, cmd := range []string{ // Print inode, size etc fmt.Sprintf("ls -idlh %v", mountPath), @@ -560,7 +560,7 @@ func (p *petSetTester) checkMount(ps *apps.PetSet, mountPath string) error { return nil } -func (p *petSetTester) execInPets(ps *apps.PetSet, cmd string) error { +func (p *statefulSetTester) execInPets(ps *apps.StatefulSet, cmd string) error { podList := p.getPodList(ps) for _, pet := range podList.Items { stdout, err := framework.RunHostCmd(pet.Namespace, pet.Name, cmd) @@ -572,7 +572,7 @@ func (p *petSetTester) execInPets(ps *apps.PetSet, cmd string) error { return nil } -func (p *petSetTester) saturate(ps *apps.PetSet) { +func (p *statefulSetTester) saturate(ps *apps.StatefulSet) { // TODO: Watch events and check that creation timestamps don't overlap var i int32 for i = 0; i < ps.Spec.Replicas; i++ { @@ -583,23 +583,23 @@ func (p *petSetTester) saturate(ps *apps.PetSet) { } } -func (p *petSetTester) deletePetAtIndex(index int, ps *apps.PetSet) { +func (p *statefulSetTester) deletePetAtIndex(index int, ps *apps.StatefulSet) { // TODO: we won't use "-index" as the name strategy forever, // pull the name out from an identity mapper. name := fmt.Sprintf("%v-%v", ps.Name, index) noGrace := int64(0) if err := p.c.Core().Pods(ps.Namespace).Delete(name, &api.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil { - framework.Failf("Failed to delete pet %v for PetSet %v: %v", name, ps.Name, ps.Namespace, err) + framework.Failf("Failed to delete pet %v for StatefulSet %v: %v", name, ps.Name, ps.Namespace, err) } } -func (p *petSetTester) scale(ps *apps.PetSet, count int32) error { +func (p *statefulSetTester) scale(ps *apps.StatefulSet, count int32) error { name := ps.Name ns := ps.Namespace - p.update(ns, name, func(ps *apps.PetSet) { ps.Spec.Replicas = count }) + p.update(ns, name, func(ps *apps.StatefulSet) { ps.Spec.Replicas = count }) var petList *api.PodList - pollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) { + pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { petList = p.getPodList(ps) if int32(len(petList.Items)) == count { return true, nil @@ -614,36 +614,36 @@ func (p *petSetTester) scale(ps *apps.PetSet, count int32) error { unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", pet.Name, delTs, phase, readiness)) } } - return fmt.Errorf("Failed to scale petset to %d in %v. Remaining pods:\n%v", count, petsetTimeout, unhealthy) + return fmt.Errorf("Failed to scale statefulset to %d in %v. Remaining pods:\n%v", count, statefulsetTimeout, unhealthy) } return nil } -func (p *petSetTester) restart(ps *apps.PetSet) { +func (p *statefulSetTester) restart(ps *apps.StatefulSet) { oldReplicas := ps.Spec.Replicas ExpectNoError(p.scale(ps, 0)) - p.update(ps.Namespace, ps.Name, func(ps *apps.PetSet) { ps.Spec.Replicas = oldReplicas }) + p.update(ps.Namespace, ps.Name, func(ps *apps.StatefulSet) { ps.Spec.Replicas = oldReplicas }) } -func (p *petSetTester) update(ns, name string, update func(ps *apps.PetSet)) { +func (p *statefulSetTester) update(ns, name string, update func(ps *apps.StatefulSet)) { for i := 0; i < 3; i++ { - ps, err := p.c.Apps().PetSets(ns).Get(name) + ps, err := p.c.Apps().StatefulSets(ns).Get(name) if err != nil { - framework.Failf("failed to get petset %q: %v", name, err) + framework.Failf("failed to get statefulset %q: %v", name, err) } update(ps) - ps, err = p.c.Apps().PetSets(ns).Update(ps) + ps, err = p.c.Apps().StatefulSets(ns).Update(ps) if err == nil { return } if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) { - framework.Failf("failed to update petset %q: %v", name, err) + framework.Failf("failed to update statefulset %q: %v", name, err) } } - framework.Failf("too many retries draining petset %q", name) + framework.Failf("too many retries draining statefulset %q", name) } -func (p *petSetTester) getPodList(ps *apps.PetSet) *api.PodList { +func (p *statefulSetTester) getPodList(ps *apps.StatefulSet) *api.PodList { selector, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector) ExpectNoError(err) podList, err := p.c.Core().Pods(ps.Namespace).List(api.ListOptions{LabelSelector: selector}) @@ -651,22 +651,22 @@ func (p *petSetTester) getPodList(ps *apps.PetSet) *api.PodList { return podList } -func (p *petSetTester) confirmPetCount(count int, ps *apps.PetSet, timeout time.Duration) { +func (p *statefulSetTester) confirmPetCount(count int, ps *apps.StatefulSet, timeout time.Duration) { start := time.Now() deadline := start.Add(timeout) for t := time.Now(); t.Before(deadline); t = time.Now() { podList := p.getPodList(ps) petCount := len(podList.Items) if petCount != count { - framework.Failf("PetSet %v scaled unexpectedly scaled to %d -> %d replicas: %+v", ps.Name, count, len(podList.Items), podList) + framework.Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas: %+v", ps.Name, count, len(podList.Items), podList) } - framework.Logf("Verifying petset %v doesn't scale past %d for another %+v", ps.Name, count, deadline.Sub(t)) + framework.Logf("Verifying statefulset %v doesn't scale past %d for another %+v", ps.Name, count, deadline.Sub(t)) time.Sleep(1 * time.Second) } } -func (p *petSetTester) waitForRunning(numPets int32, ps *apps.PetSet) { - pollErr := wait.PollImmediate(petsetPoll, petsetTimeout, +func (p *statefulSetTester) waitForRunning(numPets int32, ps *apps.StatefulSet) { + pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { podList := p.getPodList(ps) if int32(len(podList.Items)) < numPets { @@ -692,7 +692,7 @@ func (p *petSetTester) waitForRunning(numPets int32, ps *apps.PetSet) { p.waitForStatus(ps, numPets) } -func (p *petSetTester) setHealthy(ps *apps.PetSet) { +func (p *statefulSetTester) setHealthy(ps *apps.StatefulSet) { podList := p.getPodList(ps) markedHealthyPod := "" for _, pod := range podList.Items { @@ -706,21 +706,21 @@ func (p *petSetTester) setHealthy(ps *apps.PetSet) { framework.Failf("Found multiple non-healthy pets: %v and %v", pod.Name, markedHealthyPod) } p, err := framework.UpdatePodWithRetries(p.c, pod.Namespace, pod.Name, func(up *api.Pod) { - up.Annotations[petset.PetSetInitAnnotation] = "true" + up.Annotations[petset.StatefulSetInitAnnotation] = "true" }) ExpectNoError(err) - framework.Logf("Set annotation %v to %v on pod %v", petset.PetSetInitAnnotation, p.Annotations[petset.PetSetInitAnnotation], pod.Name) + framework.Logf("Set annotation %v to %v on pod %v", petset.StatefulSetInitAnnotation, p.Annotations[petset.StatefulSetInitAnnotation], pod.Name) markedHealthyPod = pod.Name } } -func (p *petSetTester) waitForStatus(ps *apps.PetSet, expectedReplicas int32) { - framework.Logf("Waiting for petset status.replicas updated to %d", expectedReplicas) +func (p *statefulSetTester) waitForStatus(ps *apps.StatefulSet, expectedReplicas int32) { + framework.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas) ns, name := ps.Namespace, ps.Name - pollErr := wait.PollImmediate(petsetPoll, petsetTimeout, + pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { - psGet, err := p.c.Apps().PetSets(ns).Get(name) + psGet, err := p.c.Apps().StatefulSets(ns).Get(name) if err != nil { return false, err } @@ -735,30 +735,30 @@ func (p *petSetTester) waitForStatus(ps *apps.PetSet, expectedReplicas int32) { } } -func deleteAllPetSets(c clientset.Interface, ns string) { - pst := &petSetTester{c: c} - psList, err := c.Apps().PetSets(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) +func deleteAllStatefulSets(c clientset.Interface, ns string) { + pst := &statefulSetTester{c: c} + psList, err := c.Apps().StatefulSets(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) ExpectNoError(err) - // Scale down each petset, then delete it completely. + // Scale down each statefulset, then delete it completely. // Deleting a pvc without doing this will leak volumes, #25101. errList := []string{} for _, ps := range psList.Items { - framework.Logf("Scaling petset %v to 0", ps.Name) + framework.Logf("Scaling statefulset %v to 0", ps.Name) if err := pst.scale(&ps, 0); err != nil { errList = append(errList, fmt.Sprintf("%v", err)) } pst.waitForStatus(&ps, 0) - framework.Logf("Deleting petset %v", ps.Name) - if err := c.Apps().PetSets(ps.Namespace).Delete(ps.Name, nil); err != nil { + framework.Logf("Deleting statefulset %v", ps.Name) + if err := c.Apps().StatefulSets(ps.Namespace).Delete(ps.Name, nil); err != nil { errList = append(errList, fmt.Sprintf("%v", err)) } } - // pvs are global, so we need to wait for the exact ones bound to the petset pvcs. + // pvs are global, so we need to wait for the exact ones bound to the statefulset pvcs. pvNames := sets.NewString() - // TODO: Don't assume all pvcs in the ns belong to a petset - pvcPollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) { + // TODO: Don't assume all pvcs in the ns belong to a statefulset + pvcPollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { pvcList, err := c.Core().PersistentVolumeClaims(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) if err != nil { framework.Logf("WARNING: Failed to list pvcs, retrying %v", err) @@ -778,7 +778,7 @@ func deleteAllPetSets(c clientset.Interface, ns string) { errList = append(errList, fmt.Sprintf("Timeout waiting for pvc deletion.")) } - pollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) { + pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { pvList, err := c.Core().PersistentVolumes().List(api.ListOptions{LabelSelector: labels.Everything()}) if err != nil { framework.Logf("WARNING: Failed to list pvs, retrying %v", err) @@ -793,7 +793,7 @@ func deleteAllPetSets(c clientset.Interface, ns string) { if len(waitingFor) == 0 { return true, nil } - framework.Logf("Still waiting for pvs of petset to disappear:\n%v", strings.Join(waitingFor, "\n")) + framework.Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n")) return false, nil }) if pollErr != nil { @@ -826,13 +826,13 @@ func pollReadWithTimeout(pet petTester, petNumber int, key, expectedVal string) } func isInitialized(pod api.Pod) bool { - initialized, ok := pod.Annotations[petset.PetSetInitAnnotation] + initialized, ok := pod.Annotations[petset.StatefulSetInitAnnotation] if !ok { return false } inited, err := strconv.ParseBool(initialized) if err != nil { - framework.Failf("Couldn't parse petset init annotations %v", initialized) + framework.Failf("Couldn't parse statefulset init annotations %v", initialized) } return inited } @@ -862,7 +862,7 @@ func newPVC(name string) api.PersistentVolumeClaim { } } -func newPetSet(name, ns, governingSvcName string, replicas int32, petMounts []api.VolumeMount, podMounts []api.VolumeMount, labels map[string]string) *apps.PetSet { +func newStatefulSet(name, ns, governingSvcName string, replicas int32, petMounts []api.VolumeMount, podMounts []api.VolumeMount, labels map[string]string) *apps.StatefulSet { mounts := append(petMounts, podMounts...) claims := []api.PersistentVolumeClaim{} for _, m := range petMounts { @@ -881,16 +881,16 @@ func newPetSet(name, ns, governingSvcName string, replicas int32, petMounts []ap }) } - return &apps.PetSet{ + return &apps.StatefulSet{ TypeMeta: unversioned.TypeMeta{ - Kind: "PetSet", + Kind: "StatefulSet", APIVersion: "apps/v1beta1", }, ObjectMeta: api.ObjectMeta{ Name: name, Namespace: ns, }, - Spec: apps.PetSetSpec{ + Spec: apps.StatefulSetSpec{ Selector: &unversioned.LabelSelector{ MatchLabels: labels, }, diff --git a/test/e2e/testing-manifests/petset/mysql-galera/petset.yaml b/test/e2e/testing-manifests/petset/mysql-galera/petset.yaml index 6da58d6aa05..378fcd60b45 100644 --- a/test/e2e/testing-manifests/petset/mysql-galera/petset.yaml +++ b/test/e2e/testing-manifests/petset/mysql-galera/petset.yaml @@ -1,5 +1,5 @@ apiVersion: apps/v1alpha1 -kind: PetSet +kind: StatefulSet metadata: name: mysql spec: diff --git a/test/e2e/testing-manifests/petset/redis/petset.yaml b/test/e2e/testing-manifests/petset/redis/petset.yaml index 08f68e344bf..27a6b845b2a 100644 --- a/test/e2e/testing-manifests/petset/redis/petset.yaml +++ b/test/e2e/testing-manifests/petset/redis/petset.yaml @@ -1,5 +1,5 @@ apiVersion: apps/v1alpha1 -kind: PetSet +kind: StatefulSet metadata: name: rd spec: diff --git a/test/e2e/testing-manifests/petset/zookeeper/petset.yaml b/test/e2e/testing-manifests/petset/zookeeper/petset.yaml index 2d4e2ee255d..c3f47435a8f 100644 --- a/test/e2e/testing-manifests/petset/zookeeper/petset.yaml +++ b/test/e2e/testing-manifests/petset/zookeeper/petset.yaml @@ -1,5 +1,5 @@ apiVersion: apps/v1alpha1 -kind: PetSet +kind: StatefulSet metadata: name: zoo spec: