remove ReallyCrashForTesting and cleaned up some references to HandleCrash behavior

This commit is contained in:
Sergey Kanzhelev 2021-05-04 00:10:11 +00:00
parent 9a75e7b0fd
commit a11453efbc
10 changed files with 5 additions and 58 deletions

View File

@ -54,9 +54,6 @@ type KubeletFlags struct {
KubeConfig string
BootstrapKubeconfig string
// Crash immediately, rather than eating panics.
ReallyCrashForTesting bool
// HostnameOverride is the hostname used to identify the kubelet instead
// of the actual hostname.
HostnameOverride string
@ -352,8 +349,6 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) {
fs.MarkDeprecated("non-masquerade-cidr", "will be removed in a future version")
fs.BoolVar(&f.KeepTerminatedPodVolumes, "keep-terminated-pod-volumes", f.KeepTerminatedPodVolumes, "Keep terminated pod volumes mounted to the node after the pod terminates. Can be useful for debugging volume related issues.")
fs.MarkDeprecated("keep-terminated-pod-volumes", "will be removed in a future version")
fs.BoolVar(&f.ReallyCrashForTesting, "really-crash-for-testing", f.ReallyCrashForTesting, "If true, when panics occur crash. Intended for testing.")
fs.MarkDeprecated("really-crash-for-testing", "will be removed in a future version.")
fs.StringVar(&f.ExperimentalMounterPath, "experimental-mounter-path", f.ExperimentalMounterPath, "[Experimental] Path of mounter binary. Leave empty to use the default mount.")
fs.MarkDeprecated("experimental-mounter-path", "will be removed in 1.23. in favor of using CSI.")
fs.BoolVar(&f.ExperimentalCheckNodeCapabilitiesBeforeMount, "experimental-check-node-capabilities-before-mount", f.ExperimentalCheckNodeCapabilitiesBeforeMount, "[Experimental] if set true, the kubelet will check the underlying node for required components (binaries, etc.) before performing the mount")

View File

@ -45,7 +45,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilnet "k8s.io/apimachinery/pkg/util/net"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
genericapiserver "k8s.io/apiserver/pkg/server"
@ -791,8 +790,6 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
}
}
utilruntime.ReallyCrash = s.ReallyCrashForTesting
// TODO(vmarmol): Do this through container config.
oomAdjuster := kubeDeps.OOMAdjuster
if err := oomAdjuster.ApplyOOMScoreAdj(0, int(s.OOMScoreAdj)); err != nil {

View File

@ -39,7 +39,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/fake"
@ -87,7 +86,6 @@ import (
)
func init() {
utilruntime.ReallyCrash = true
}
const (

View File

@ -25,7 +25,6 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -34,7 +33,6 @@ import (
)
func init() {
runtime.ReallyCrash = true
}
var defaultProbe = &v1.Probe{

View File

@ -23,20 +23,16 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/status"
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
"k8s.io/kubernetes/pkg/probe"
"k8s.io/utils/exec"
)
func init() {
runtime.ReallyCrash = true
}
func TestDoProbe(t *testing.T) {
@ -296,27 +292,6 @@ func TestCleanUp(t *testing.T) {
}
}
func TestHandleCrash(t *testing.T) {
runtime.ReallyCrash = false // Test that we *don't* really crash.
m := newTestManager()
w := newTestWorker(m, readiness, v1.Probe{})
m.statusManager.SetPodStatus(w.pod, getTestRunningStatus())
expectContinue(t, w, w.doProbe(), "Initial successful probe.")
expectResult(t, w, results.Success, "Initial successful probe.")
// Prober starts crashing.
m.prober = &prober{
recorder: &record.FakeRecorder{},
exec: crashingExecProber{},
}
// doProbe should recover from the crash, and keep going.
expectContinue(t, w, w.doProbe(), "Crashing probe.")
expectResult(t, w, results.Success, "Crashing probe unchanged.")
}
func expectResult(t *testing.T, w *worker, expectedResult results.Result, msg string) {
result, ok := resultsManager(w.probeManager, w.probeType).Get(w.containerID)
if !ok {
@ -345,12 +320,6 @@ func resultsManager(m *manager, probeType probeType) results.Manager {
panic(fmt.Errorf("Unhandled case: %v", probeType))
}
type crashingExecProber struct{}
func (p crashingExecProber) Probe(_ exec.Cmd) (probe.Result, string, error) {
panic("Intentional Probe crash.")
}
func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) {
m := newTestManager()

View File

@ -33,7 +33,6 @@ import (
discovery "k8s.io/api/discovery/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/proxy"
ipttest "k8s.io/kubernetes/pkg/util/iptables/testing"
@ -186,9 +185,6 @@ var tcpServerPort int32
var udpServerPort int32
func TestMain(m *testing.M) {
// Don't handle panics
runtime.ReallyCrash = true
// TCP setup.
tcp := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)

View File

@ -33,7 +33,6 @@ import (
discovery "k8s.io/api/discovery/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/proxy"
netshtest "k8s.io/kubernetes/pkg/util/netsh/testing"
netutils "k8s.io/utils/net"
@ -115,9 +114,6 @@ var tcpServerPort int32
var udpServerPort int32
func TestMain(m *testing.M) {
// Don't handle panics
runtime.ReallyCrash = true
// TCP setup.
tcp := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)

View File

@ -27,9 +27,10 @@ import (
)
var (
// ReallyCrash controls the behavior of HandleCrash and now defaults
// true. It's still exposed so components can optionally set to false
// to restore prior behavior.
// ReallyCrash controls the behavior of HandleCrash and defaults to
// true. It's exposed so components can optionally set to false
// to restore prior behavior. This flag is mostly used for tests to validate
// crash conditions.
ReallyCrash = true
)

View File

@ -41,8 +41,7 @@ func WithPanicRecovery(handler http.Handler, resolver request.RequestInfoResolve
// the client sees an interrupted response but the server doesn't log
// an error, panic with the value ErrAbortHandler.
//
// Note that the ReallyCrash variable controls the behaviour of the HandleCrash function
// So it might actually crash, after calling the handlers
// Note that HandleCrash function is actually crashing, after calling the handlers
if info, err := resolver.NewRequestInfo(req); err != nil {
metrics.RecordRequestAbort(req, nil)
} else {

View File

@ -39,7 +39,6 @@ import (
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtimeutils "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/component-base/logs"
"k8s.io/component-base/version"
@ -98,7 +97,6 @@ var _ = ginkgo.SynchronizedAfterSuite(func() {
// generated in this directory, and cluster logs will also be saved.
// This function is called on each Ginkgo node in parallel mode.
func RunE2ETests(t *testing.T) {
runtimeutils.ReallyCrash = true
logs.InitLogs()
defer logs.FlushLogs()