mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #42805 from deads2k/client-01-flake-debug
Automatic merge from submit-queue add debugging to the client watch test Adds debugging information for https://github.com/kubernetes/kubernetes/issues/42724. I suspect that the watch is closing early, but I'd like proof before I consider things like retrying the list and doing another watch to observe the delete. I'm not even sure that would satisfy the test It seems like a flaky way to build the test. Why wouldn't we delete non-gracefully? @kubernetes/sig-api-machinery-misc @caesarxuchao @wojtek-t saw you just hit this if you wanted to take a quick look at the debugging I added.
This commit is contained in:
commit
7b4bec038c
@ -100,12 +100,19 @@ func observeCreation(w watch.Interface) {
|
||||
}
|
||||
|
||||
func observeObjectDeletion(w watch.Interface) (obj runtime.Object) {
|
||||
// output to give us a duration to failure. Maybe we aren't getting the
|
||||
// full timeout for some reason. My guess would be watch failure
|
||||
framework.Logf("Starting to observe pod deletion")
|
||||
deleted := false
|
||||
timeout := false
|
||||
timer := time.After(60 * time.Second)
|
||||
for !deleted && !timeout {
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
case event, normal := <-w.ResultChan():
|
||||
if !normal {
|
||||
framework.Failf("The channel was closed unexpectedly")
|
||||
return
|
||||
}
|
||||
if event.Type == watch.Deleted {
|
||||
obj = event.Object
|
||||
deleted = true
|
||||
|
Loading…
Reference in New Issue
Block a user