make kubelet.HandlePodsDeletion aware of api.Pod

This commit is contained in:
Chao Xu 2016-01-31 15:56:55 -08:00
parent fdf6a0f61c
commit a6d96a04d0
3 changed files with 33 additions and 19 deletions

View File

@ -141,6 +141,14 @@ type Pod struct {
Containers []*Container Containers []*Container
} }
// PodPair contains both runtime#Pod and api#Pod
type PodPair struct {
// APIPod is the api.Pod
APIPod *api.Pod
// RunningPod is the pod defined defined in pkg/kubelet/container/runtime#Pod
RunningPod *Pod
}
// ContainerID is a type that identifies a container. // ContainerID is a type that identifies a container.
type ContainerID struct { type ContainerID struct {
// The type of the container runtime. e.g. 'docker', 'rkt'. // The type of the container runtime. e.g. 'docker', 'rkt'.

View File

@ -450,7 +450,7 @@ func NewMainKubelet(
klet.podWorkers = newPodWorkers(klet.syncPod, recorder, klet.workQueue, klet.resyncInterval, backOffPeriod, klet.podCache) klet.podWorkers = newPodWorkers(klet.syncPod, recorder, klet.workQueue, klet.resyncInterval, backOffPeriod, klet.podCache)
klet.backOff = util.NewBackOff(backOffPeriod, MaxContainerBackOff) klet.backOff = util.NewBackOff(backOffPeriod, MaxContainerBackOff)
klet.podKillingCh = make(chan *kubecontainer.Pod, podKillingChannelCapacity) klet.podKillingCh = make(chan *kubecontainer.PodPair, podKillingChannelCapacity)
klet.sourcesSeen = sets.NewString() klet.sourcesSeen = sets.NewString()
return klet, nil return klet, nil
} }
@ -632,7 +632,7 @@ type Kubelet struct {
backOff *util.Backoff backOff *util.Backoff
// Channel for sending pods to kill. // Channel for sending pods to kill.
podKillingCh chan *kubecontainer.Pod podKillingCh chan *kubecontainer.PodPair
// The configuration file used as the base to generate the container's // The configuration file used as the base to generate the container's
// DNS resolver configuration file. This can be used in conjunction with // DNS resolver configuration file. This can be used in conjunction with
@ -1961,13 +1961,16 @@ func (kl *Kubelet) removeOrphanedPodStatuses(pods []*api.Pod, mirrorPods []*api.
kl.statusManager.RemoveOrphanedStatuses(podUIDs) kl.statusManager.RemoveOrphanedStatuses(podUIDs)
} }
func (kl *Kubelet) deletePod(uid types.UID) error { func (kl *Kubelet) deletePod(pod *api.Pod) error {
if pod == nil {
return fmt.Errorf("deletePod does not allow nil pod")
}
if !kl.allSourcesReady() { if !kl.allSourcesReady() {
// If the sources aren't ready, skip deletion, as we may accidentally delete pods // If the sources aren't ready, skip deletion, as we may accidentally delete pods
// for sources that haven't reported yet. // for sources that haven't reported yet.
return fmt.Errorf("skipping delete because sources aren't ready yet") return fmt.Errorf("skipping delete because sources aren't ready yet")
} }
kl.podWorkers.ForgetWorker(uid) kl.podWorkers.ForgetWorker(pod.UID)
// Runtime cache may not have been updated to with the pod, but it's okay // Runtime cache may not have been updated to with the pod, but it's okay
// because the periodic cleanup routine will attempt to delete again later. // because the periodic cleanup routine will attempt to delete again later.
@ -1975,12 +1978,13 @@ func (kl *Kubelet) deletePod(uid types.UID) error {
if err != nil { if err != nil {
return fmt.Errorf("error listing containers: %v", err) return fmt.Errorf("error listing containers: %v", err)
} }
pod := kubecontainer.Pods(runningPods).FindPod("", uid) runningPod := kubecontainer.Pods(runningPods).FindPod("", pod.UID)
if pod.IsEmpty() { if runningPod.IsEmpty() {
return fmt.Errorf("pod not found") return fmt.Errorf("pod not found")
} }
podPair := kubecontainer.PodPair{pod, &runningPod}
kl.podKillingCh <- &pod kl.podKillingCh <- &podPair
// TODO: delete the mirror pod here? // TODO: delete the mirror pod here?
// We leave the volume/directory cleanup to the periodic cleanup routine. // We leave the volume/directory cleanup to the periodic cleanup routine.
@ -2023,7 +2027,7 @@ func (kl *Kubelet) HandlePodCleanups() error {
} }
for _, pod := range runningPods { for _, pod := range runningPods {
if _, found := desiredPods[pod.ID]; !found { if _, found := desiredPods[pod.ID]; !found {
kl.podKillingCh <- pod kl.podKillingCh <- &kubecontainer.PodPair{nil, pod}
} }
} }
@ -2082,25 +2086,27 @@ func (kl *Kubelet) podKiller() {
defer close(resultCh) defer close(resultCh)
for { for {
select { select {
case pod, ok := <-kl.podKillingCh: case podPair, ok := <-kl.podKillingCh:
runningPod := podPair.RunningPod
apiPod := podPair.APIPod
if !ok { if !ok {
return return
} }
if killing.Has(string(pod.ID)) { if killing.Has(string(runningPod.ID)) {
// The pod is already being killed. // The pod is already being killed.
break break
} }
killing.Insert(string(pod.ID)) killing.Insert(string(runningPod.ID))
go func(pod *kubecontainer.Pod, ch chan types.UID) { go func(apiPod *api.Pod, runningPod *kubecontainer.Pod, ch chan types.UID) {
defer func() { defer func() {
ch <- pod.ID ch <- runningPod.ID
}() }()
glog.V(2).Infof("Killing unwanted pod %q", pod.Name) glog.V(2).Infof("Killing unwanted pod %q", runningPod.Name)
err := kl.killPod(nil, pod, nil) err := kl.killPod(apiPod, runningPod, nil)
if err != nil { if err != nil {
glog.Errorf("Failed killing the pod %q: %v", pod.Name, err) glog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err)
} }
}(pod, resultCh) }(apiPod, runningPod, resultCh)
case podID := <-resultCh: case podID := <-resultCh:
killing.Delete(string(podID)) killing.Delete(string(podID))
@ -2388,7 +2394,7 @@ func (kl *Kubelet) HandlePodDeletions(pods []*api.Pod) {
} }
// Deletion is allowed to fail because the periodic cleanup routine // Deletion is allowed to fail because the periodic cleanup routine
// will trigger deletion again. // will trigger deletion again.
if err := kl.deletePod(pod.UID); err != nil { if err := kl.deletePod(pod); err != nil {
glog.V(2).Infof("Failed to delete pod %q, err: %v", format.Pod(pod), err) glog.V(2).Infof("Failed to delete pod %q, err: %v", format.Pod(pod), err)
} }
kl.probeManager.RemovePod(pod) kl.probeManager.RemovePod(pod)

View File

@ -176,7 +176,7 @@ func newTestKubelet(t *testing.T) *TestKubelet {
fakeClock := &util.FakeClock{Time: time.Now()} fakeClock := &util.FakeClock{Time: time.Now()}
kubelet.backOff = util.NewBackOff(time.Second, time.Minute) kubelet.backOff = util.NewBackOff(time.Second, time.Minute)
kubelet.backOff.Clock = fakeClock kubelet.backOff.Clock = fakeClock
kubelet.podKillingCh = make(chan *kubecontainer.Pod, 20) kubelet.podKillingCh = make(chan *kubecontainer.PodPair, 20)
kubelet.resyncInterval = 10 * time.Second kubelet.resyncInterval = 10 * time.Second
kubelet.reservation = kubetypes.Reservation{ kubelet.reservation = kubetypes.Reservation{
Kubernetes: api.ResourceList{ Kubernetes: api.ResourceList{