mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-29 21:29:24 +00:00
cmd/kube-controller-manager
This commit is contained in:
@@ -22,11 +22,11 @@ import (
|
||||
|
||||
inf "gopkg.in/inf.v0"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
api_pod "k8s.io/kubernetes/pkg/api/pod"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
api_pod "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
@@ -36,34 +36,34 @@ func dec(i int64, exponent int) *inf.Dec {
|
||||
return inf.NewDec(i, inf.Scale(-exponent))
|
||||
}
|
||||
|
||||
func newPVC(name string) api.PersistentVolumeClaim {
|
||||
return api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func newPVC(name string) v1.PersistentVolumeClaim {
|
||||
return v1.PersistentVolumeClaim{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newStatefulSetWithVolumes(replicas int, name string, petMounts []api.VolumeMount, podMounts []api.VolumeMount) *apps.StatefulSet {
|
||||
func newStatefulSetWithVolumes(replicas int, name string, petMounts []v1.VolumeMount, podMounts []v1.VolumeMount) *apps.StatefulSet {
|
||||
mounts := append(petMounts, podMounts...)
|
||||
claims := []api.PersistentVolumeClaim{}
|
||||
claims := []v1.PersistentVolumeClaim{}
|
||||
for _, m := range petMounts {
|
||||
claims = append(claims, newPVC(m.Name))
|
||||
}
|
||||
|
||||
vols := []api.Volume{}
|
||||
vols := []v1.Volume{}
|
||||
for _, m := range podMounts {
|
||||
vols = append(vols, api.Volume{
|
||||
vols = append(vols, v1.Volume{
|
||||
Name: m.Name,
|
||||
VolumeSource: api.VolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: fmt.Sprintf("/tmp/%v", m.Name),
|
||||
},
|
||||
},
|
||||
@@ -75,19 +75,19 @@ func newStatefulSetWithVolumes(replicas int, name string, petMounts []api.Volume
|
||||
Kind: "StatefulSet",
|
||||
APIVersion: "apps/v1beta1",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
UID: types.UID("test"),
|
||||
},
|
||||
Spec: apps.StatefulSetSpec{
|
||||
Selector: &unversioned.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Replicas: int32(replicas),
|
||||
Template: api.PodTemplateSpec{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: "nginx",
|
||||
@@ -103,16 +103,16 @@ func newStatefulSetWithVolumes(replicas int, name string, petMounts []api.Volume
|
||||
}
|
||||
}
|
||||
|
||||
func runningPod(ns, name string) *api.Pod {
|
||||
p := &api.Pod{Status: api.PodStatus{Phase: api.PodRunning}}
|
||||
func runningPod(ns, name string) *v1.Pod {
|
||||
p := &v1.Pod{Status: v1.PodStatus{Phase: v1.PodRunning}}
|
||||
p.Namespace = ns
|
||||
p.Name = name
|
||||
return p
|
||||
}
|
||||
|
||||
func newPodList(ps *apps.StatefulSet, num int) []*api.Pod {
|
||||
func newPodList(ps *apps.StatefulSet, num int) []*v1.Pod {
|
||||
// knownPods are pods in the system
|
||||
knownPods := []*api.Pod{}
|
||||
knownPods := []*v1.Pod{}
|
||||
for i := 0; i < num; i++ {
|
||||
k, _ := newPCB(fmt.Sprintf("%v", i), ps)
|
||||
knownPods = append(knownPods, k.pod)
|
||||
@@ -121,16 +121,16 @@ func newPodList(ps *apps.StatefulSet, num int) []*api.Pod {
|
||||
}
|
||||
|
||||
func newStatefulSet(replicas int) *apps.StatefulSet {
|
||||
petMounts := []api.VolumeMount{
|
||||
petMounts := []v1.VolumeMount{
|
||||
{Name: "datadir", MountPath: "/tmp/zookeeper"},
|
||||
}
|
||||
podMounts := []api.VolumeMount{
|
||||
podMounts := []v1.VolumeMount{
|
||||
{Name: "home", MountPath: "/home"},
|
||||
}
|
||||
return newStatefulSetWithVolumes(replicas, "foo", petMounts, podMounts)
|
||||
}
|
||||
|
||||
func checkPodForMount(pod *api.Pod, mountName string) error {
|
||||
func checkPodForMount(pod *v1.Pod, mountName string) error {
|
||||
for _, c := range pod.Spec.Containers {
|
||||
for _, v := range c.VolumeMounts {
|
||||
if v.Name == mountName {
|
||||
@@ -144,7 +144,7 @@ func checkPodForMount(pod *api.Pod, mountName string) error {
|
||||
func newFakePetClient() *fakePetClient {
|
||||
return &fakePetClient{
|
||||
pets: []*pcb{},
|
||||
claims: []api.PersistentVolumeClaim{},
|
||||
claims: []v1.PersistentVolumeClaim{},
|
||||
recorder: &record.FakeRecorder{},
|
||||
petHealthChecker: &defaultPetHealthChecker{},
|
||||
}
|
||||
@@ -152,7 +152,7 @@ func newFakePetClient() *fakePetClient {
|
||||
|
||||
type fakePetClient struct {
|
||||
pets []*pcb
|
||||
claims []api.PersistentVolumeClaim
|
||||
claims []v1.PersistentVolumeClaim
|
||||
petsCreated int
|
||||
petsDeleted int
|
||||
claimsCreated int
|
||||
@@ -168,7 +168,7 @@ func (f *fakePetClient) Delete(p *pcb) error {
|
||||
for i, pet := range f.pets {
|
||||
if p.pod.Name == pet.pod.Name {
|
||||
found = true
|
||||
f.recorder.Eventf(pet.parent, api.EventTypeNormal, "SuccessfulDelete", "pod: %v", pet.pod.Name)
|
||||
f.recorder.Eventf(pet.parent, v1.EventTypeNormal, "SuccessfulDelete", "pod: %v", pet.pod.Name)
|
||||
continue
|
||||
}
|
||||
pets = append(pets, f.pets[i])
|
||||
@@ -199,7 +199,7 @@ func (f *fakePetClient) Create(p *pcb) error {
|
||||
return fmt.Errorf("Create failed: pod %v already exists", p.pod.Name)
|
||||
}
|
||||
}
|
||||
f.recorder.Eventf(p.parent, api.EventTypeNormal, "SuccessfulCreate", "pod: %v", p.pod.Name)
|
||||
f.recorder.Eventf(p.parent, v1.EventTypeNormal, "SuccessfulCreate", "pod: %v", p.pod.Name)
|
||||
f.pets = append(f.pets, p)
|
||||
f.petsCreated++
|
||||
return nil
|
||||
@@ -226,8 +226,8 @@ func (f *fakePetClient) Update(expected, wanted *pcb) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePetClient) getPodList() []*api.Pod {
|
||||
p := []*api.Pod{}
|
||||
func (f *fakePetClient) getPodList() []*v1.Pod {
|
||||
p := []*v1.Pod{}
|
||||
for i, pet := range f.pets {
|
||||
if pet.pod == nil {
|
||||
continue
|
||||
@@ -251,10 +251,10 @@ func (f *fakePetClient) setHealthy(index int) error {
|
||||
if len(f.pets) <= index {
|
||||
return fmt.Errorf("Index out of range, len %v index %v", len(f.pets), index)
|
||||
}
|
||||
f.pets[index].pod.Status.Phase = api.PodRunning
|
||||
f.pets[index].pod.Status.Phase = v1.PodRunning
|
||||
f.pets[index].pod.Annotations[StatefulSetInitAnnotation] = "true"
|
||||
f.pets[index].pod.Status.Conditions = []api.PodCondition{
|
||||
{Type: api.PodReady, Status: api.ConditionTrue},
|
||||
f.pets[index].pod.Status.Conditions = []v1.PodCondition{
|
||||
{Type: v1.PodReady, Status: v1.ConditionTrue},
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -262,7 +262,7 @@ func (f *fakePetClient) setHealthy(index int) error {
|
||||
// isHealthy is a convenience wrapper around the default health checker.
|
||||
// The first invocation returns not-healthy, but marks the pet healthy so
|
||||
// subsequent invocations see it as healthy.
|
||||
func (f *fakePetClient) isHealthy(pod *api.Pod) bool {
|
||||
func (f *fakePetClient) isHealthy(pod *v1.Pod) bool {
|
||||
if f.petHealthChecker.isHealthy(pod) {
|
||||
return true
|
||||
}
|
||||
@@ -280,11 +280,11 @@ func (f *fakePetClient) setDeletionTimestamp(index int) error {
|
||||
// SyncPVCs fakes pvc syncing.
|
||||
func (f *fakePetClient) SyncPVCs(pet *pcb) error {
|
||||
v := pet.pvcs
|
||||
updateClaims := map[string]api.PersistentVolumeClaim{}
|
||||
updateClaims := map[string]v1.PersistentVolumeClaim{}
|
||||
for i, update := range v {
|
||||
updateClaims[update.Name] = v[i]
|
||||
}
|
||||
claimList := []api.PersistentVolumeClaim{}
|
||||
claimList := []v1.PersistentVolumeClaim{}
|
||||
for i, existing := range f.claims {
|
||||
if update, ok := updateClaims[existing.Name]; ok {
|
||||
claimList = append(claimList, update)
|
||||
@@ -296,7 +296,7 @@ func (f *fakePetClient) SyncPVCs(pet *pcb) error {
|
||||
for _, remaining := range updateClaims {
|
||||
claimList = append(claimList, remaining)
|
||||
f.claimsCreated++
|
||||
f.recorder.Eventf(pet.parent, api.EventTypeNormal, "SuccessfulCreate", "pvc: %v", remaining.Name)
|
||||
f.recorder.Eventf(pet.parent, v1.EventTypeNormal, "SuccessfulCreate", "pvc: %v", remaining.Name)
|
||||
}
|
||||
f.claims = claimList
|
||||
return nil
|
||||
@@ -309,12 +309,12 @@ func (f *fakePetClient) DeletePVCs(pet *pcb) error {
|
||||
for _, c := range claimsToDelete {
|
||||
deleteClaimNames.Insert(c.Name)
|
||||
}
|
||||
pvcs := []api.PersistentVolumeClaim{}
|
||||
pvcs := []v1.PersistentVolumeClaim{}
|
||||
for i, existing := range f.claims {
|
||||
if deleteClaimNames.Has(existing.Name) {
|
||||
deleteClaimNames.Delete(existing.Name)
|
||||
f.claimsDeleted++
|
||||
f.recorder.Eventf(pet.parent, api.EventTypeNormal, "SuccessfulDelete", "pvc: %v", existing.Name)
|
||||
f.recorder.Eventf(pet.parent, v1.EventTypeNormal, "SuccessfulDelete", "pvc: %v", existing.Name)
|
||||
continue
|
||||
}
|
||||
pvcs = append(pvcs, f.claims[i])
|
||||
|
||||
@@ -23,9 +23,9 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
podapi "k8s.io/kubernetes/pkg/api/pod"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podapi "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
@@ -41,10 +41,10 @@ type identityMapper interface {
|
||||
// SetIdentity takes an id and assigns the given pet an identity based
|
||||
// on the stateful set spec. The is must be unique amongst members of the
|
||||
// stateful set.
|
||||
SetIdentity(id string, pet *api.Pod)
|
||||
SetIdentity(id string, pet *v1.Pod)
|
||||
|
||||
// Identity returns the identity of the pet.
|
||||
Identity(pod *api.Pod) string
|
||||
Identity(pod *v1.Pod) string
|
||||
}
|
||||
|
||||
func newIdentityMappers(ps *apps.StatefulSet) []identityMapper {
|
||||
@@ -61,19 +61,19 @@ type NetworkIdentityMapper struct {
|
||||
}
|
||||
|
||||
// SetIdentity sets network identity on the pet.
|
||||
func (n *NetworkIdentityMapper) SetIdentity(id string, pet *api.Pod) {
|
||||
func (n *NetworkIdentityMapper) SetIdentity(id string, pet *v1.Pod) {
|
||||
pet.Annotations[podapi.PodHostnameAnnotation] = fmt.Sprintf("%v-%v", n.ps.Name, id)
|
||||
pet.Annotations[podapi.PodSubdomainAnnotation] = n.ps.Spec.ServiceName
|
||||
return
|
||||
}
|
||||
|
||||
// Identity returns the network identity of the pet.
|
||||
func (n *NetworkIdentityMapper) Identity(pet *api.Pod) string {
|
||||
func (n *NetworkIdentityMapper) Identity(pet *v1.Pod) string {
|
||||
return n.String(pet)
|
||||
}
|
||||
|
||||
// String is a string function for the network identity of the pet.
|
||||
func (n *NetworkIdentityMapper) String(pet *api.Pod) string {
|
||||
func (n *NetworkIdentityMapper) String(pet *v1.Pod) string {
|
||||
hostname := pet.Annotations[podapi.PodHostnameAnnotation]
|
||||
subdomain := pet.Annotations[podapi.PodSubdomainAnnotation]
|
||||
return strings.Join([]string{hostname, subdomain, n.ps.Namespace}, ".")
|
||||
@@ -85,13 +85,13 @@ type VolumeIdentityMapper struct {
|
||||
}
|
||||
|
||||
// SetIdentity sets storge identity on the pet.
|
||||
func (v *VolumeIdentityMapper) SetIdentity(id string, pet *api.Pod) {
|
||||
petVolumes := []api.Volume{}
|
||||
func (v *VolumeIdentityMapper) SetIdentity(id string, pet *v1.Pod) {
|
||||
petVolumes := []v1.Volume{}
|
||||
petClaims := v.GetClaims(id)
|
||||
|
||||
// These volumes will all go down with the pod. If a name matches one of
|
||||
// the claims in the stateful set, it gets clobbered.
|
||||
podVolumes := map[string]api.Volume{}
|
||||
podVolumes := map[string]v1.Volume{}
|
||||
for _, podVol := range pet.Spec.Volumes {
|
||||
podVolumes[podVol.Name] = podVol
|
||||
}
|
||||
@@ -105,10 +105,10 @@ func (v *VolumeIdentityMapper) SetIdentity(id string, pet *api.Pod) {
|
||||
// TODO: Validate and reject this.
|
||||
glog.V(4).Infof("Overwriting existing volume source %v", podVol.Name)
|
||||
}
|
||||
newVol := api.Volume{
|
||||
newVol := v1.Volume{
|
||||
Name: name,
|
||||
VolumeSource: api.VolumeSource{
|
||||
PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: claim.Name,
|
||||
// TODO: Use source definition to set this value when we have one.
|
||||
ReadOnly: false,
|
||||
@@ -129,13 +129,13 @@ func (v *VolumeIdentityMapper) SetIdentity(id string, pet *api.Pod) {
|
||||
}
|
||||
|
||||
// Identity returns the storage identity of the pet.
|
||||
func (v *VolumeIdentityMapper) Identity(pet *api.Pod) string {
|
||||
func (v *VolumeIdentityMapper) Identity(pet *v1.Pod) string {
|
||||
// TODO: Make this a hash?
|
||||
return v.String(pet)
|
||||
}
|
||||
|
||||
// String is a string function for the network identity of the pet.
|
||||
func (v *VolumeIdentityMapper) String(pet *api.Pod) string {
|
||||
func (v *VolumeIdentityMapper) String(pet *v1.Pod) string {
|
||||
ids := []string{}
|
||||
petVols := sets.NewString()
|
||||
for _, petVol := range v.ps.Spec.VolumeClaimTemplates {
|
||||
@@ -160,8 +160,8 @@ func (v *VolumeIdentityMapper) String(pet *api.Pod) string {
|
||||
|
||||
// GetClaims returns the volume claims associated with the given id.
|
||||
// The claims belong to the statefulset. The id should be unique within a statefulset.
|
||||
func (v *VolumeIdentityMapper) GetClaims(id string) map[string]api.PersistentVolumeClaim {
|
||||
petClaims := map[string]api.PersistentVolumeClaim{}
|
||||
func (v *VolumeIdentityMapper) GetClaims(id string) map[string]v1.PersistentVolumeClaim {
|
||||
petClaims := map[string]v1.PersistentVolumeClaim{}
|
||||
for _, pvc := range v.ps.Spec.VolumeClaimTemplates {
|
||||
claim := pvc
|
||||
// TODO: Name length checking in validation.
|
||||
@@ -177,12 +177,12 @@ func (v *VolumeIdentityMapper) GetClaims(id string) map[string]api.PersistentVol
|
||||
}
|
||||
|
||||
// GetClaimsForPet returns the pvcs for the given pet.
|
||||
func (v *VolumeIdentityMapper) GetClaimsForPet(pet *api.Pod) []api.PersistentVolumeClaim {
|
||||
func (v *VolumeIdentityMapper) GetClaimsForPet(pet *v1.Pod) []v1.PersistentVolumeClaim {
|
||||
// Strip out the "-(index)" from the pet name and use it to generate
|
||||
// claim names.
|
||||
id := strings.Split(pet.Name, "-")
|
||||
petID := id[len(id)-1]
|
||||
pvcs := []api.PersistentVolumeClaim{}
|
||||
pvcs := []v1.PersistentVolumeClaim{}
|
||||
for _, pvc := range v.GetClaims(petID) {
|
||||
pvcs = append(pvcs, pvc)
|
||||
}
|
||||
@@ -196,25 +196,25 @@ type NameIdentityMapper struct {
|
||||
}
|
||||
|
||||
// SetIdentity sets the pet namespace and name.
|
||||
func (n *NameIdentityMapper) SetIdentity(id string, pet *api.Pod) {
|
||||
func (n *NameIdentityMapper) SetIdentity(id string, pet *v1.Pod) {
|
||||
pet.Name = fmt.Sprintf("%v-%v", n.ps.Name, id)
|
||||
pet.Namespace = n.ps.Namespace
|
||||
return
|
||||
}
|
||||
|
||||
// Identity returns the name identity of the pet.
|
||||
func (n *NameIdentityMapper) Identity(pet *api.Pod) string {
|
||||
func (n *NameIdentityMapper) Identity(pet *v1.Pod) string {
|
||||
return n.String(pet)
|
||||
}
|
||||
|
||||
// String is a string function for the name identity of the pet.
|
||||
func (n *NameIdentityMapper) String(pet *api.Pod) string {
|
||||
func (n *NameIdentityMapper) String(pet *v1.Pod) string {
|
||||
return fmt.Sprintf("%v/%v", pet.Namespace, pet.Name)
|
||||
}
|
||||
|
||||
// identityHash computes a hash of the pet by running all the above identity
|
||||
// mappers.
|
||||
func identityHash(ps *apps.StatefulSet, pet *api.Pod) string {
|
||||
func identityHash(ps *apps.StatefulSet, pet *v1.Pod) string {
|
||||
id := ""
|
||||
for _, idMapper := range newIdentityMappers(ps) {
|
||||
id += idMapper.Identity(pet)
|
||||
@@ -226,7 +226,7 @@ func identityHash(ps *apps.StatefulSet, pet *api.Pod) string {
|
||||
// Note that this is *not* a literal copy, but a copy of the fields that
|
||||
// contribute to the pet's identity. The returned boolean 'needsUpdate' will
|
||||
// be false if the realPet already has the same identity as the expectedPet.
|
||||
func copyPetID(realPet, expectedPet *pcb) (pod api.Pod, needsUpdate bool, err error) {
|
||||
func copyPetID(realPet, expectedPet *pcb) (pod v1.Pod, needsUpdate bool, err error) {
|
||||
if realPet.pod == nil || expectedPet.pod == nil {
|
||||
return pod, false, fmt.Errorf("Need a valid to and from pet for copy")
|
||||
}
|
||||
|
||||
@@ -23,8 +23,8 @@ import (
|
||||
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
api_pod "k8s.io/kubernetes/pkg/api/pod"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
api_pod "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
)
|
||||
|
||||
func TestPetIDName(t *testing.T) {
|
||||
@@ -150,10 +150,10 @@ func TestPetIDReset(t *testing.T) {
|
||||
if identityHash(ps, firstPCB.pod) == identityHash(ps, secondPCB.pod) {
|
||||
t.Fatalf("Failed to generate uniquey identities:\n%+v\n%+v", firstPCB.pod.Spec, secondPCB.pod.Spec)
|
||||
}
|
||||
userAdded := api.Volume{
|
||||
userAdded := v1.Volume{
|
||||
Name: "test",
|
||||
VolumeSource: api.VolumeSource{
|
||||
EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory},
|
||||
},
|
||||
}
|
||||
firstPCB.pod.Spec.Volumes = append(firstPCB.pod.Spec.Volumes, userAdded)
|
||||
|
||||
@@ -21,8 +21,8 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
)
|
||||
|
||||
@@ -35,7 +35,7 @@ func newPCB(id string, ps *apps.StatefulSet) (*pcb, error) {
|
||||
for _, im := range newIdentityMappers(ps) {
|
||||
im.SetIdentity(id, petPod)
|
||||
}
|
||||
petPVCs := []api.PersistentVolumeClaim{}
|
||||
petPVCs := []v1.PersistentVolumeClaim{}
|
||||
vMapper := &VolumeIdentityMapper{ps}
|
||||
for _, c := range vMapper.GetClaims(id) {
|
||||
petPVCs = append(petPVCs, c)
|
||||
@@ -87,7 +87,7 @@ func (pt *petQueue) empty() bool {
|
||||
}
|
||||
|
||||
// NewPetQueue returns a queue for tracking pets
|
||||
func NewPetQueue(ps *apps.StatefulSet, podList []*api.Pod) *petQueue {
|
||||
func NewPetQueue(ps *apps.StatefulSet, podList []*v1.Pod) *petQueue {
|
||||
pt := petQueue{pets: []*pcb{}, idMapper: &NameIdentityMapper{ps}}
|
||||
// Seed the queue with existing pets. Assume all pets are scheduled for
|
||||
// deletion, enqueuing a pet will "undelete" it. We always want to delete
|
||||
@@ -118,7 +118,7 @@ type statefulSetIterator struct {
|
||||
func (pi *statefulSetIterator) Next() bool {
|
||||
var pet *pcb
|
||||
var err error
|
||||
if pi.petCount < pi.ps.Spec.Replicas {
|
||||
if pi.petCount < *(pi.ps.Spec.Replicas) {
|
||||
pet, err = newPCB(fmt.Sprintf("%d", pi.petCount), pi.ps)
|
||||
if err != nil {
|
||||
pi.errs = append(pi.errs, err)
|
||||
@@ -139,7 +139,7 @@ func (pi *statefulSetIterator) Value() *pcb {
|
||||
|
||||
// NewStatefulSetIterator returns a new iterator. All pods in the given podList
|
||||
// are used to seed the queue of the iterator.
|
||||
func NewStatefulSetIterator(ps *apps.StatefulSet, podList []*api.Pod) *statefulSetIterator {
|
||||
func NewStatefulSetIterator(ps *apps.StatefulSet, podList []*v1.Pod) *statefulSetIterator {
|
||||
pi := &statefulSetIterator{
|
||||
ps: ps,
|
||||
queue: NewPetQueue(ps, podList),
|
||||
@@ -150,7 +150,7 @@ func NewStatefulSetIterator(ps *apps.StatefulSet, podList []*api.Pod) *statefulS
|
||||
}
|
||||
|
||||
// PodsByCreationTimestamp sorts a list of Pods by creation timestamp, using their names as a tie breaker.
|
||||
type PodsByCreationTimestamp []*api.Pod
|
||||
type PodsByCreationTimestamp []*v1.Pod
|
||||
|
||||
func (o PodsByCreationTimestamp) Len() int { return len(o) }
|
||||
func (o PodsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
@@ -21,14 +21,14 @@ import (
|
||||
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
func TestPetQueueCreates(t *testing.T) {
|
||||
replicas := 3
|
||||
ps := newStatefulSet(replicas)
|
||||
q := NewPetQueue(ps, []*api.Pod{})
|
||||
q := NewPetQueue(ps, []*v1.Pod{})
|
||||
for i := 0; i < replicas; i++ {
|
||||
pet, _ := newPCB(fmt.Sprintf("%v", i), ps)
|
||||
q.enqueue(pet)
|
||||
@@ -107,7 +107,7 @@ func TestStatefulSetIteratorRelist(t *testing.T) {
|
||||
knownPods := newPodList(ps, 5)
|
||||
for i := range knownPods {
|
||||
knownPods[i].Spec.NodeName = fmt.Sprintf("foo-node-%v", i)
|
||||
knownPods[i].Status.Phase = api.PodRunning
|
||||
knownPods[i].Status.Phase = v1.PodRunning
|
||||
}
|
||||
pi := NewStatefulSetIterator(ps, knownPods)
|
||||
|
||||
@@ -128,7 +128,7 @@ func TestStatefulSetIteratorRelist(t *testing.T) {
|
||||
}
|
||||
|
||||
// Scale to 0 should delete all pods in system
|
||||
ps.Spec.Replicas = 0
|
||||
*(ps.Spec.Replicas) = 0
|
||||
pi = NewStatefulSetIterator(ps, knownPods)
|
||||
i = 0
|
||||
for pi.Next() {
|
||||
@@ -143,7 +143,7 @@ func TestStatefulSetIteratorRelist(t *testing.T) {
|
||||
}
|
||||
|
||||
// Relist with 0 replicas should no-op
|
||||
pi = NewStatefulSetIterator(ps, []*api.Pod{})
|
||||
pi = NewStatefulSetIterator(ps, []*v1.Pod{})
|
||||
if pi.Next() != false {
|
||||
t.Errorf("Unexpected iteration without any replicas or pods in system")
|
||||
}
|
||||
|
||||
@@ -20,10 +20,10 @@ import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
|
||||
@@ -52,9 +52,9 @@ const (
|
||||
// and parent fields to pass it around safely.
|
||||
type pcb struct {
|
||||
// pod is the desired pet pod.
|
||||
pod *api.Pod
|
||||
pod *v1.Pod
|
||||
// pvcs is a list of desired persistent volume claims for the pet pod.
|
||||
pvcs []api.PersistentVolumeClaim
|
||||
pvcs []v1.PersistentVolumeClaim
|
||||
// event is the lifecycle event associated with this update.
|
||||
event petLifeCycleEvent
|
||||
// id is the identity index of this pet.
|
||||
@@ -106,7 +106,7 @@ func (p *petSyncer) Sync(pet *pcb) error {
|
||||
return err
|
||||
}
|
||||
// if pet failed - we need to remove old one because of consistent naming
|
||||
if exists && realPet.pod.Status.Phase == api.PodFailed {
|
||||
if exists && realPet.pod.Status.Phase == v1.PodFailed {
|
||||
glog.V(2).Infof("Deleting evicted pod %v/%v", realPet.pod.Namespace, realPet.pod.Name)
|
||||
if err := p.petClient.Delete(realPet); err != nil {
|
||||
return err
|
||||
@@ -175,7 +175,7 @@ type petClient interface {
|
||||
|
||||
// apiServerPetClient is a statefulset aware Kubernetes client.
|
||||
type apiServerPetClient struct {
|
||||
c internalclientset.Interface
|
||||
c clientset.Interface
|
||||
recorder record.EventRecorder
|
||||
petHealthChecker
|
||||
}
|
||||
@@ -242,12 +242,12 @@ func (p *apiServerPetClient) DeletePVCs(pet *pcb) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *apiServerPetClient) getPVC(pvcName, pvcNamespace string) (*api.PersistentVolumeClaim, error) {
|
||||
func (p *apiServerPetClient) getPVC(pvcName, pvcNamespace string) (*v1.PersistentVolumeClaim, error) {
|
||||
pvc, err := p.c.Core().PersistentVolumeClaims(pvcNamespace).Get(pvcName)
|
||||
return pvc, err
|
||||
}
|
||||
|
||||
func (p *apiServerPetClient) createPVC(pvc *api.PersistentVolumeClaim) error {
|
||||
func (p *apiServerPetClient) createPVC(pvc *v1.PersistentVolumeClaim) error {
|
||||
_, err := p.c.Core().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
|
||||
return err
|
||||
}
|
||||
@@ -280,17 +280,17 @@ func (p *apiServerPetClient) SyncPVCs(pet *pcb) error {
|
||||
// event formats an event for the given runtime object.
|
||||
func (p *apiServerPetClient) event(obj runtime.Object, reason, msg string, err error) {
|
||||
if err != nil {
|
||||
p.recorder.Eventf(obj, api.EventTypeWarning, fmt.Sprintf("Failed%v", reason), fmt.Sprintf("%v, error: %v", msg, err))
|
||||
p.recorder.Eventf(obj, v1.EventTypeWarning, fmt.Sprintf("Failed%v", reason), fmt.Sprintf("%v, error: %v", msg, err))
|
||||
} else {
|
||||
p.recorder.Eventf(obj, api.EventTypeNormal, fmt.Sprintf("Successful%v", reason), msg)
|
||||
p.recorder.Eventf(obj, v1.EventTypeNormal, fmt.Sprintf("Successful%v", reason), msg)
|
||||
}
|
||||
}
|
||||
|
||||
// petHealthChecker is an interface to check pet health. It makes a boolean
|
||||
// decision based on the given pod.
|
||||
type petHealthChecker interface {
|
||||
isHealthy(*api.Pod) bool
|
||||
isDying(*api.Pod) bool
|
||||
isHealthy(*v1.Pod) bool
|
||||
isDying(*v1.Pod) bool
|
||||
}
|
||||
|
||||
// defaultPetHealthChecks does basic health checking.
|
||||
@@ -299,11 +299,11 @@ type defaultPetHealthChecker struct{}
|
||||
|
||||
// isHealthy returns true if the pod is ready & running. If the pod has the
|
||||
// "pod.alpha.kubernetes.io/initialized" annotation set to "false", pod state is ignored.
|
||||
func (d *defaultPetHealthChecker) isHealthy(pod *api.Pod) bool {
|
||||
if pod == nil || pod.Status.Phase != api.PodRunning {
|
||||
func (d *defaultPetHealthChecker) isHealthy(pod *v1.Pod) bool {
|
||||
if pod == nil || pod.Status.Phase != v1.PodRunning {
|
||||
return false
|
||||
}
|
||||
podReady := api.IsPodReady(pod)
|
||||
podReady := v1.IsPodReady(pod)
|
||||
|
||||
// User may have specified a pod readiness override through a debug annotation.
|
||||
initialized, ok := pod.Annotations[StatefulSetInitAnnotation]
|
||||
@@ -321,6 +321,6 @@ func (d *defaultPetHealthChecker) isHealthy(pod *api.Pod) bool {
|
||||
// isDying returns true if the pod has a non-nil deletion timestamp. Since the
|
||||
// timestamp can only decrease, once this method returns true for a given pet, it
|
||||
// will never return false.
|
||||
func (d *defaultPetHealthChecker) isDying(pod *api.Pod) bool {
|
||||
func (d *defaultPetHealthChecker) isDying(pod *v1.Pod) bool {
|
||||
return pod != nil && pod.DeletionTimestamp != nil
|
||||
}
|
||||
|
||||
@@ -22,12 +22,12 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@@ -52,7 +52,7 @@ const (
|
||||
|
||||
// StatefulSetController controls statefulsets.
|
||||
type StatefulSetController struct {
|
||||
kubeClient internalclientset.Interface
|
||||
kubeClient clientset.Interface
|
||||
|
||||
// newSyncer returns an interface capable of syncing a single pet.
|
||||
// Abstracted out for testing.
|
||||
@@ -83,11 +83,11 @@ type StatefulSetController struct {
|
||||
}
|
||||
|
||||
// NewStatefulSetController creates a new statefulset controller.
|
||||
func NewStatefulSetController(podInformer cache.SharedIndexInformer, kubeClient internalclientset.Interface, resyncPeriod time.Duration) *StatefulSetController {
|
||||
func NewStatefulSetController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod time.Duration) *StatefulSetController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "statefulset"})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "statefulset"})
|
||||
pc := &apiServerPetClient{kubeClient, recorder, &defaultPetHealthChecker{}}
|
||||
|
||||
psc := &StatefulSetController{
|
||||
@@ -112,11 +112,11 @@ func NewStatefulSetController(podInformer cache.SharedIndexInformer, kubeClient
|
||||
|
||||
psc.psStore.Store, psc.psController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return psc.kubeClient.Apps().StatefulSets(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return psc.kubeClient.Apps().StatefulSets(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return psc.kubeClient.Apps().StatefulSets(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return psc.kubeClient.Apps().StatefulSets(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&apps.StatefulSet{},
|
||||
@@ -156,7 +156,7 @@ func (psc *StatefulSetController) Run(workers int, stopCh <-chan struct{}) {
|
||||
|
||||
// addPod adds the statefulset for the pod to the sync queue
|
||||
func (psc *StatefulSetController) addPod(obj interface{}) {
|
||||
pod := obj.(*api.Pod)
|
||||
pod := obj.(*v1.Pod)
|
||||
glog.V(4).Infof("Pod %s created, labels: %+v", pod.Name, pod.Labels)
|
||||
ps := psc.getStatefulSetForPod(pod)
|
||||
if ps == nil {
|
||||
@@ -168,8 +168,8 @@ func (psc *StatefulSetController) addPod(obj interface{}) {
|
||||
// updatePod adds the statefulset for the current and old pods to the sync queue.
|
||||
// If the labels of the pod didn't change, this method enqueues a single statefulset.
|
||||
func (psc *StatefulSetController) updatePod(old, cur interface{}) {
|
||||
curPod := cur.(*api.Pod)
|
||||
oldPod := old.(*api.Pod)
|
||||
curPod := cur.(*v1.Pod)
|
||||
oldPod := old.(*v1.Pod)
|
||||
if curPod.ResourceVersion == oldPod.ResourceVersion {
|
||||
// Periodic resync will send update events for all known pods.
|
||||
// Two different versions of the same pod will always have different RVs.
|
||||
@@ -189,7 +189,7 @@ func (psc *StatefulSetController) updatePod(old, cur interface{}) {
|
||||
|
||||
// deletePod enqueues the statefulset for the pod accounting for deletion tombstones.
|
||||
func (psc *StatefulSetController) deletePod(obj interface{}) {
|
||||
pod, ok := obj.(*api.Pod)
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
|
||||
// When a delete is dropped, the relist will notice a pod in the store not
|
||||
// in the list, leading to the insertion of a tombstone object which contains
|
||||
@@ -201,7 +201,7 @@ func (psc *StatefulSetController) deletePod(obj interface{}) {
|
||||
glog.Errorf("couldn't get object from tombstone %+v", obj)
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*api.Pod)
|
||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("tombstone contained object that is not a pod %+v", obj)
|
||||
return
|
||||
@@ -214,18 +214,18 @@ func (psc *StatefulSetController) deletePod(obj interface{}) {
|
||||
}
|
||||
|
||||
// getPodsForStatefulSets returns the pods that match the selectors of the given statefulset.
|
||||
func (psc *StatefulSetController) getPodsForStatefulSet(ps *apps.StatefulSet) ([]*api.Pod, error) {
|
||||
func (psc *StatefulSetController) getPodsForStatefulSet(ps *apps.StatefulSet) ([]*v1.Pod, error) {
|
||||
// TODO: Do we want the statefulset to fight with RCs? check parent statefulset annoation, or name prefix?
|
||||
sel, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector)
|
||||
if err != nil {
|
||||
return []*api.Pod{}, err
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
pods, err := psc.podStore.Pods(ps.Namespace).List(sel)
|
||||
if err != nil {
|
||||
return []*api.Pod{}, err
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
// TODO: Do we need to copy?
|
||||
result := make([]*api.Pod, 0, len(pods))
|
||||
result := make([]*v1.Pod, 0, len(pods))
|
||||
for i := range pods {
|
||||
result = append(result, &(*pods[i]))
|
||||
}
|
||||
@@ -233,7 +233,7 @@ func (psc *StatefulSetController) getPodsForStatefulSet(ps *apps.StatefulSet) ([
|
||||
}
|
||||
|
||||
// getStatefulSetForPod returns the pet set managing the given pod.
|
||||
func (psc *StatefulSetController) getStatefulSetForPod(pod *api.Pod) *apps.StatefulSet {
|
||||
func (psc *StatefulSetController) getStatefulSetForPod(pod *v1.Pod) *apps.StatefulSet {
|
||||
ps, err := psc.psStore.GetPodStatefulSets(pod)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("No StatefulSets found for pod %v, StatefulSet controller will avoid syncing", pod.Name)
|
||||
@@ -320,7 +320,7 @@ func (psc *StatefulSetController) Sync(key string) error {
|
||||
}
|
||||
|
||||
// syncStatefulSet syncs a tuple of (statefulset, pets).
|
||||
func (psc *StatefulSetController) syncStatefulSet(ps *apps.StatefulSet, pets []*api.Pod) (int, error) {
|
||||
func (psc *StatefulSetController) syncStatefulSet(ps *apps.StatefulSet, pets []*v1.Pod) (int, error) {
|
||||
glog.V(2).Infof("Syncing StatefulSet %v/%v with %d pods", ps.Namespace, ps.Name, len(pets))
|
||||
|
||||
it := NewStatefulSetIterator(ps, pets)
|
||||
|
||||
@@ -22,12 +22,12 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
fake_internal "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake"
|
||||
fake_internal "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1/fake"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/errors"
|
||||
)
|
||||
@@ -50,7 +50,7 @@ func checkPets(ps *apps.StatefulSet, creates, deletes int, fc *fakePetClient, t
|
||||
if fc.petsCreated != creates || fc.petsDeleted != deletes {
|
||||
t.Errorf("Found (creates: %d, deletes: %d), expected (creates: %d, deletes: %d)", fc.petsCreated, fc.petsDeleted, creates, deletes)
|
||||
}
|
||||
gotClaims := map[string]api.PersistentVolumeClaim{}
|
||||
gotClaims := map[string]v1.PersistentVolumeClaim{}
|
||||
for _, pvc := range fc.claims {
|
||||
gotClaims[pvc.Name] = pvc
|
||||
}
|
||||
@@ -88,7 +88,7 @@ func scaleStatefulSet(t *testing.T, ps *apps.StatefulSet, psc *StatefulSetContro
|
||||
}
|
||||
|
||||
func saturateStatefulSet(t *testing.T, ps *apps.StatefulSet, psc *StatefulSetController, fc *fakePetClient) {
|
||||
err := scaleStatefulSet(t, ps, psc, fc, int(ps.Spec.Replicas))
|
||||
err := scaleStatefulSet(t, ps, psc, fc, int(*(ps.Spec.Replicas)))
|
||||
if err != nil {
|
||||
t.Errorf("Error scaleStatefulSet: %v", err)
|
||||
}
|
||||
@@ -119,7 +119,7 @@ func TestStatefulSetControllerDeletes(t *testing.T) {
|
||||
|
||||
// Drain
|
||||
errs := []error{}
|
||||
ps.Spec.Replicas = 0
|
||||
*(ps.Spec.Replicas) = 0
|
||||
knownPods := fc.getPodList()
|
||||
for i := replicas - 1; i >= 0; i-- {
|
||||
if len(fc.pets) != i+1 {
|
||||
@@ -143,7 +143,7 @@ func TestStatefulSetControllerRespectsTermination(t *testing.T) {
|
||||
saturateStatefulSet(t, ps, psc, fc)
|
||||
|
||||
fc.setDeletionTimestamp(replicas - 1)
|
||||
ps.Spec.Replicas = 2
|
||||
*(ps.Spec.Replicas) = 2
|
||||
_, err := psc.syncStatefulSet(ps, fc.getPodList())
|
||||
if err != nil {
|
||||
t.Errorf("Error syncing StatefulSet: %v", err)
|
||||
@@ -169,7 +169,7 @@ func TestStatefulSetControllerRespectsOrder(t *testing.T) {
|
||||
saturateStatefulSet(t, ps, psc, fc)
|
||||
|
||||
errs := []error{}
|
||||
ps.Spec.Replicas = 0
|
||||
*(ps.Spec.Replicas) = 0
|
||||
// Shuffle known list and check that pets are deleted in reverse
|
||||
knownPods := fc.getPodList()
|
||||
for i := range knownPods {
|
||||
@@ -285,16 +285,16 @@ type fakeClient struct {
|
||||
statefulsetClient *fakeStatefulSetClient
|
||||
}
|
||||
|
||||
func (c *fakeClient) Apps() internalversion.AppsInterface {
|
||||
return &fakeApps{c, &fake.FakeApps{}}
|
||||
func (c *fakeClient) Apps() v1beta1.AppsV1beta1Interface {
|
||||
return &fakeApps{c, &fake.FakeAppsV1beta1{}}
|
||||
}
|
||||
|
||||
type fakeApps struct {
|
||||
*fakeClient
|
||||
*fake.FakeApps
|
||||
*fake.FakeAppsV1beta1
|
||||
}
|
||||
|
||||
func (c *fakeApps) StatefulSets(namespace string) internalversion.StatefulSetInterface {
|
||||
func (c *fakeApps) StatefulSets(namespace string) v1beta1.StatefulSetInterface {
|
||||
c.statefulsetClient.Namespace = namespace
|
||||
return c.statefulsetClient
|
||||
}
|
||||
|
||||
@@ -20,10 +20,10 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
appsclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion"
|
||||
appsclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@@ -51,7 +51,7 @@ func updatePetCount(psClient appsclientset.StatefulSetsGetter, ps apps.StatefulS
|
||||
var getErr error
|
||||
for i, ps := 0, &ps; ; i++ {
|
||||
glog.V(4).Infof(fmt.Sprintf("Updating replica count for StatefulSet: %s/%s, ", ps.Namespace, ps.Name) +
|
||||
fmt.Sprintf("replicas %d->%d (need %d), ", ps.Status.Replicas, numPets, ps.Spec.Replicas))
|
||||
fmt.Sprintf("replicas %d->%d (need %d), ", ps.Status.Replicas, numPets, *(ps.Spec.Replicas)))
|
||||
|
||||
ps.Status = apps.StatefulSetStatus{Replicas: int32(numPets)}
|
||||
_, updateErr = psClient.StatefulSets(ps.Namespace).UpdateStatus(ps)
|
||||
@@ -72,7 +72,7 @@ type unhealthyPetTracker struct {
|
||||
}
|
||||
|
||||
// Get returns a previously recorded blocking pet for the given statefulset.
|
||||
func (u *unhealthyPetTracker) Get(ps *apps.StatefulSet, knownPets []*api.Pod) (*pcb, error) {
|
||||
func (u *unhealthyPetTracker) Get(ps *apps.StatefulSet, knownPets []*v1.Pod) (*pcb, error) {
|
||||
u.storeLock.Lock()
|
||||
defer u.storeLock.Unlock()
|
||||
|
||||
|
||||
@@ -21,11 +21,11 @@ import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@@ -39,10 +39,10 @@ func newPetClient(client *clientset.Clientset) *apiServerPetClient {
|
||||
}
|
||||
|
||||
func makeTwoDifferntPCB() (pcb1, pcb2 *pcb) {
|
||||
userAdded := api.Volume{
|
||||
userAdded := v1.Volume{
|
||||
Name: "test",
|
||||
VolumeSource: api.VolumeSource{
|
||||
EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory},
|
||||
},
|
||||
}
|
||||
ps := newStatefulSet(2)
|
||||
@@ -88,14 +88,14 @@ func TestUpdatePetWithoutRetry(t *testing.T) {
|
||||
}
|
||||
|
||||
for k, tc := range testCases {
|
||||
body := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Pod{ObjectMeta: api.ObjectMeta{Name: "empty_pod"}})
|
||||
body := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "empty_pod"}})
|
||||
fakeHandler := utiltesting.FakeHandler{
|
||||
StatusCode: 200,
|
||||
ResponseBody: string(body),
|
||||
}
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
petClient := newPetClient(client)
|
||||
err := petClient.Update(tc.realPet, tc.expectedPet)
|
||||
|
||||
@@ -115,7 +115,7 @@ func TestUpdatePetWithFailure(t *testing.T) {
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
petClient := newPetClient(client)
|
||||
|
||||
pcb1, pcb2 := makeTwoDifferntPCB()
|
||||
|
||||
Reference in New Issue
Block a user