diff --git a/pkg/kubelet/prober/worker.go b/pkg/kubelet/prober/worker.go index 30aa113d38e..cdefc1da2c6 100644 --- a/pkg/kubelet/prober/worker.go +++ b/pkg/kubelet/prober/worker.go @@ -240,7 +240,7 @@ func (w *worker) doProbe() (keepGoing bool) { // chance of hitting #21751, where running `docker exec` when a // container is being stopped may lead to corrupted container state. w.onHold = true - w.resultRun = 1 + w.resultRun = 0 } return true diff --git a/pkg/kubelet/prober/worker_test.go b/pkg/kubelet/prober/worker_test.go index a36c0afde60..d7473a35bc6 100644 --- a/pkg/kubelet/prober/worker_test.go +++ b/pkg/kubelet/prober/worker_test.go @@ -352,7 +352,7 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) { expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Success, msg) if w.resultRun != 1 { - t.Errorf("Prober resultRun should 1") + t.Errorf("Prober resultRun should be 1") } m.prober.exec = fakeExecProber{probe.Failure, nil} @@ -360,7 +360,7 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) { expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Success, msg) if w.resultRun != 1 { - t.Errorf("Prober resultRun should 1") + t.Errorf("Prober resultRun should be 1") } m.prober.exec = fakeExecProber{probe.Failure, nil} @@ -372,13 +372,13 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) { } // Exceeding FailureThreshold should cause resultRun to - // reset to 1 so that the probe on the restarted pod + // reset to 0 so that the probe on the restarted pod // also gets FailureThreshold attempts to succeed. m.prober.exec = fakeExecProber{probe.Failure, nil} msg = "3rd probe failure, result failure" expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Failure, msg) - if w.resultRun != 1 { - t.Errorf("Prober resultRun should be reset to 1") + if w.resultRun != 0 { + t.Errorf("Prober resultRun should be reset to 0") } }