mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 14:37:00 +00:00
Merge pull request #20202 from caesarxuchao/skip-update
skip update when deleting with grace-period=0
This commit is contained in:
commit
ae2aece9af
@ -673,7 +673,8 @@ func (t *Tester) testDeleteGracefulImmediate(obj runtime.Object, setFn SetFunc,
|
|||||||
t.Errorf("unexpected error, object should be deleted immediately: %v", err)
|
t.Errorf("unexpected error, object should be deleted immediately: %v", err)
|
||||||
}
|
}
|
||||||
objectMeta = t.getObjectMetaOrFail(out)
|
objectMeta = t.getObjectMetaOrFail(out)
|
||||||
if objectMeta.DeletionTimestamp == nil || objectMeta.DeletionGracePeriodSeconds == nil || *objectMeta.DeletionGracePeriodSeconds != 0 {
|
// the second delete shouldn't update the object, so the objectMeta.DeletionGracePeriodSeconds should eqaul to the value set in the first delete.
|
||||||
|
if objectMeta.DeletionTimestamp == nil || objectMeta.DeletionGracePeriodSeconds == nil || *objectMeta.DeletionGracePeriodSeconds != expectedGrace {
|
||||||
t.Errorf("unexpected deleted meta: %#v", objectMeta)
|
t.Errorf("unexpected deleted meta: %#v", objectMeta)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -141,6 +141,14 @@ type Pod struct {
|
|||||||
Containers []*Container
|
Containers []*Container
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PodPair contains both runtime#Pod and api#Pod
|
||||||
|
type PodPair struct {
|
||||||
|
// APIPod is the api.Pod
|
||||||
|
APIPod *api.Pod
|
||||||
|
// RunningPod is the pod defined defined in pkg/kubelet/container/runtime#Pod
|
||||||
|
RunningPod *Pod
|
||||||
|
}
|
||||||
|
|
||||||
// ContainerID is a type that identifies a container.
|
// ContainerID is a type that identifies a container.
|
||||||
type ContainerID struct {
|
type ContainerID struct {
|
||||||
// The type of the container runtime. e.g. 'docker', 'rkt'.
|
// The type of the container runtime. e.g. 'docker', 'rkt'.
|
||||||
|
@ -450,7 +450,7 @@ func NewMainKubelet(
|
|||||||
klet.podWorkers = newPodWorkers(klet.syncPod, recorder, klet.workQueue, klet.resyncInterval, backOffPeriod, klet.podCache)
|
klet.podWorkers = newPodWorkers(klet.syncPod, recorder, klet.workQueue, klet.resyncInterval, backOffPeriod, klet.podCache)
|
||||||
|
|
||||||
klet.backOff = util.NewBackOff(backOffPeriod, MaxContainerBackOff)
|
klet.backOff = util.NewBackOff(backOffPeriod, MaxContainerBackOff)
|
||||||
klet.podKillingCh = make(chan *kubecontainer.Pod, podKillingChannelCapacity)
|
klet.podKillingCh = make(chan *kubecontainer.PodPair, podKillingChannelCapacity)
|
||||||
klet.sourcesSeen = sets.NewString()
|
klet.sourcesSeen = sets.NewString()
|
||||||
return klet, nil
|
return klet, nil
|
||||||
}
|
}
|
||||||
@ -632,7 +632,7 @@ type Kubelet struct {
|
|||||||
backOff *util.Backoff
|
backOff *util.Backoff
|
||||||
|
|
||||||
// Channel for sending pods to kill.
|
// Channel for sending pods to kill.
|
||||||
podKillingCh chan *kubecontainer.Pod
|
podKillingCh chan *kubecontainer.PodPair
|
||||||
|
|
||||||
// The configuration file used as the base to generate the container's
|
// The configuration file used as the base to generate the container's
|
||||||
// DNS resolver configuration file. This can be used in conjunction with
|
// DNS resolver configuration file. This can be used in conjunction with
|
||||||
@ -1961,13 +1961,16 @@ func (kl *Kubelet) removeOrphanedPodStatuses(pods []*api.Pod, mirrorPods []*api.
|
|||||||
kl.statusManager.RemoveOrphanedStatuses(podUIDs)
|
kl.statusManager.RemoveOrphanedStatuses(podUIDs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kl *Kubelet) deletePod(uid types.UID) error {
|
func (kl *Kubelet) deletePod(pod *api.Pod) error {
|
||||||
|
if pod == nil {
|
||||||
|
return fmt.Errorf("deletePod does not allow nil pod")
|
||||||
|
}
|
||||||
if !kl.allSourcesReady() {
|
if !kl.allSourcesReady() {
|
||||||
// If the sources aren't ready, skip deletion, as we may accidentally delete pods
|
// If the sources aren't ready, skip deletion, as we may accidentally delete pods
|
||||||
// for sources that haven't reported yet.
|
// for sources that haven't reported yet.
|
||||||
return fmt.Errorf("skipping delete because sources aren't ready yet")
|
return fmt.Errorf("skipping delete because sources aren't ready yet")
|
||||||
}
|
}
|
||||||
kl.podWorkers.ForgetWorker(uid)
|
kl.podWorkers.ForgetWorker(pod.UID)
|
||||||
|
|
||||||
// Runtime cache may not have been updated to with the pod, but it's okay
|
// Runtime cache may not have been updated to with the pod, but it's okay
|
||||||
// because the periodic cleanup routine will attempt to delete again later.
|
// because the periodic cleanup routine will attempt to delete again later.
|
||||||
@ -1975,12 +1978,13 @@ func (kl *Kubelet) deletePod(uid types.UID) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error listing containers: %v", err)
|
return fmt.Errorf("error listing containers: %v", err)
|
||||||
}
|
}
|
||||||
pod := kubecontainer.Pods(runningPods).FindPod("", uid)
|
runningPod := kubecontainer.Pods(runningPods).FindPod("", pod.UID)
|
||||||
if pod.IsEmpty() {
|
if runningPod.IsEmpty() {
|
||||||
return fmt.Errorf("pod not found")
|
return fmt.Errorf("pod not found")
|
||||||
}
|
}
|
||||||
|
podPair := kubecontainer.PodPair{pod, &runningPod}
|
||||||
|
|
||||||
kl.podKillingCh <- &pod
|
kl.podKillingCh <- &podPair
|
||||||
// TODO: delete the mirror pod here?
|
// TODO: delete the mirror pod here?
|
||||||
|
|
||||||
// We leave the volume/directory cleanup to the periodic cleanup routine.
|
// We leave the volume/directory cleanup to the periodic cleanup routine.
|
||||||
@ -2023,7 +2027,7 @@ func (kl *Kubelet) HandlePodCleanups() error {
|
|||||||
}
|
}
|
||||||
for _, pod := range runningPods {
|
for _, pod := range runningPods {
|
||||||
if _, found := desiredPods[pod.ID]; !found {
|
if _, found := desiredPods[pod.ID]; !found {
|
||||||
kl.podKillingCh <- pod
|
kl.podKillingCh <- &kubecontainer.PodPair{nil, pod}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2082,25 +2086,27 @@ func (kl *Kubelet) podKiller() {
|
|||||||
defer close(resultCh)
|
defer close(resultCh)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case pod, ok := <-kl.podKillingCh:
|
case podPair, ok := <-kl.podKillingCh:
|
||||||
|
runningPod := podPair.RunningPod
|
||||||
|
apiPod := podPair.APIPod
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if killing.Has(string(pod.ID)) {
|
if killing.Has(string(runningPod.ID)) {
|
||||||
// The pod is already being killed.
|
// The pod is already being killed.
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
killing.Insert(string(pod.ID))
|
killing.Insert(string(runningPod.ID))
|
||||||
go func(pod *kubecontainer.Pod, ch chan types.UID) {
|
go func(apiPod *api.Pod, runningPod *kubecontainer.Pod, ch chan types.UID) {
|
||||||
defer func() {
|
defer func() {
|
||||||
ch <- pod.ID
|
ch <- runningPod.ID
|
||||||
}()
|
}()
|
||||||
glog.V(2).Infof("Killing unwanted pod %q", pod.Name)
|
glog.V(2).Infof("Killing unwanted pod %q", runningPod.Name)
|
||||||
err := kl.killPod(nil, pod, nil)
|
err := kl.killPod(apiPod, runningPod, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Failed killing the pod %q: %v", pod.Name, err)
|
glog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err)
|
||||||
}
|
}
|
||||||
}(pod, resultCh)
|
}(apiPod, runningPod, resultCh)
|
||||||
|
|
||||||
case podID := <-resultCh:
|
case podID := <-resultCh:
|
||||||
killing.Delete(string(podID))
|
killing.Delete(string(podID))
|
||||||
@ -2388,7 +2394,7 @@ func (kl *Kubelet) HandlePodDeletions(pods []*api.Pod) {
|
|||||||
}
|
}
|
||||||
// Deletion is allowed to fail because the periodic cleanup routine
|
// Deletion is allowed to fail because the periodic cleanup routine
|
||||||
// will trigger deletion again.
|
// will trigger deletion again.
|
||||||
if err := kl.deletePod(pod.UID); err != nil {
|
if err := kl.deletePod(pod); err != nil {
|
||||||
glog.V(2).Infof("Failed to delete pod %q, err: %v", format.Pod(pod), err)
|
glog.V(2).Infof("Failed to delete pod %q, err: %v", format.Pod(pod), err)
|
||||||
}
|
}
|
||||||
kl.probeManager.RemovePod(pod)
|
kl.probeManager.RemovePod(pod)
|
||||||
|
@ -176,7 +176,7 @@ func newTestKubelet(t *testing.T) *TestKubelet {
|
|||||||
fakeClock := util.NewFakeClock(time.Now())
|
fakeClock := util.NewFakeClock(time.Now())
|
||||||
kubelet.backOff = util.NewBackOff(time.Second, time.Minute)
|
kubelet.backOff = util.NewBackOff(time.Second, time.Minute)
|
||||||
kubelet.backOff.Clock = fakeClock
|
kubelet.backOff.Clock = fakeClock
|
||||||
kubelet.podKillingCh = make(chan *kubecontainer.Pod, 20)
|
kubelet.podKillingCh = make(chan *kubecontainer.PodPair, 20)
|
||||||
kubelet.resyncInterval = 10 * time.Second
|
kubelet.resyncInterval = 10 * time.Second
|
||||||
kubelet.reservation = kubetypes.Reservation{
|
kubelet.reservation = kubetypes.Reservation{
|
||||||
Kubernetes: api.ResourceList{
|
Kubernetes: api.ResourceList{
|
||||||
|
@ -387,7 +387,7 @@ func (e *Etcd) Delete(ctx api.Context, name string, options *api.DeleteOptions)
|
|||||||
if pendingGraceful {
|
if pendingGraceful {
|
||||||
return e.finalizeDelete(obj, false)
|
return e.finalizeDelete(obj, false)
|
||||||
}
|
}
|
||||||
if graceful {
|
if graceful && *options.GracePeriodSeconds > 0 {
|
||||||
out := e.NewFunc()
|
out := e.NewFunc()
|
||||||
lastGraceful := int64(0)
|
lastGraceful := int64(0)
|
||||||
err := e.Storage.GuaranteedUpdate(
|
err := e.Storage.GuaranteedUpdate(
|
||||||
|
@ -343,8 +343,12 @@ var _ = Describe("Pods", func() {
|
|||||||
Fail("Timeout while waiting for pod creation")
|
Fail("Timeout while waiting for pod creation")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We need to wait for the pod to be scheduled, otherwise the deletion
|
||||||
|
// will be carried out immediately rather than gracefully.
|
||||||
|
expectNoError(framework.WaitForPodRunning(pod.Name))
|
||||||
|
|
||||||
By("deleting the pod gracefully")
|
By("deleting the pod gracefully")
|
||||||
if err := podClient.Delete(pod.Name, nil); err != nil {
|
if err := podClient.Delete(pod.Name, api.NewDeleteOptions(30)); err != nil {
|
||||||
Failf("Failed to delete pod: %v", err)
|
Failf("Failed to delete pod: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -352,7 +356,7 @@ var _ = Describe("Pods", func() {
|
|||||||
deleted := false
|
deleted := false
|
||||||
timeout := false
|
timeout := false
|
||||||
var lastPod *api.Pod
|
var lastPod *api.Pod
|
||||||
timer := time.After(podStartTimeout)
|
timer := time.After(30 * time.Second)
|
||||||
for !deleted && !timeout {
|
for !deleted && !timeout {
|
||||||
select {
|
select {
|
||||||
case event, _ := <-w.ResultChan():
|
case event, _ := <-w.ResultChan():
|
||||||
|
Loading…
Reference in New Issue
Block a user