mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Merge pull request #66224 from nikhita/fix-scheduler-panic
Automatic merge from submit-queue (batch tested with PRs 66203, 66224). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.
scheduler: fix panic while removing node from imageStates cache
Currently, when I run `hack/local-up-cluster.sh`, the scheduler encounters a panic. From `/tmp/kube-scheduler.log `:
```
panic: runtime error: invalid memory address or nil pointer dereference [recovered]
panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x8 pc=0x15e9988]
goroutine 55 [running]:
k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/runtime.HandleCrash(0x0, 0x0, 0x0)
/home/nraghunath/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go:58 +0x107
panic(0x4242880, 0x877d870)
/usr/local/go/src/runtime/panic.go:502 +0x229
k8s.io/kubernetes/pkg/scheduler/cache.(*schedulerCache).removeNodeImageStates(0xc4203dfe50, 0xc420ae3b80)
/home/nraghunath/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/pkg/scheduler/cache/cache.go:510 +0xe8
k8s.io/kubernetes/pkg/scheduler/cache.(*schedulerCache).UpdateNode(0xc4203dfe50, 0xc420ae3b80, 0xc420415340, 0x0, 0x0)
/home/nraghunath/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/pkg/scheduler/cache/cache.go:442 +0xcd
k8s.io/kubernetes/pkg/scheduler/factory.(*configFactory).updateNodeInCache(0xc420d2ca00, 0x4b680c0, 0xc420ae3b80, 0x4b680c0, 0xc420415340)
/home/nraghunath/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/pkg/scheduler/factory/factory.go:794 +0x9a
k8s.io/kubernetes/pkg/scheduler/factory.(*configFactory).(k8s.io/kubernetes/pkg/scheduler/factory.updateNodeInCache)-fm(0x4b680c0, 0xc420ae3b80, 0x4b680c0, 0xc420415340)
/home/nraghunath/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/pkg/scheduler/factory/factory.go:248 +0x52
k8s.io/kubernetes/vendor/k8s.io/client-go/tools/cache.ResourceEventHandlerFuncs.OnUpdate(0xc4209fc8f0, 0xc4209fc900, 0xc4209fc910, 0x4b680c0, 0xc420ae3b80, 0x4b680c0, 0xc420415340)
/home/nraghunath/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/client-go/tools/cache/controller.go:202 +0x5d
k8s.io/kubernetes/vendor/k8s.io/client-go/tools/cache.(*processorListener).run.func1.1(0x42cf8f, 0xc4215035a0, 0x0)
/home/nraghunath/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/client-go/tools/cache/shared_informer.go:552 +0x18a
k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ExponentialBackoff(0x989680, 0x3ff0000000000000, 0x3fb999999999999a, 0x5, 0xc4214addf0, 0x42cad9, 0xc421598f30)
/home/nraghunath/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:203 +0x9c
k8s.io/kubernetes/vendor/k8s.io/client-go/tools/cache.(*processorListener).run.func1()
/home/nraghunath/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/client-go/tools/cache/shared_informer.go:548 +0x81
k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.JitterUntil.func1(0xc421503768)
/home/nraghunath/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:133 +0x54
k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.JitterUntil(0xc4214adf68, 0xdf8475800, 0x0, 0x40fdd01, 0xc4215d2360)
/home/nraghunath/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:134 +0xbd
k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Until(0xc421503768, 0xdf8475800, 0xc4215d2360)
/home/nraghunath/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:88 +0x4d
k8s.io/kubernetes/vendor/k8s.io/client-go/tools/cache.(*processorListener).run(0xc420187100)
/home/nraghunath/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/client-go/tools/cache/shared_informer.go:546 +0x78
k8s.io/kubernetes/vendor/k8s.io/client-go/tools/cache.(*processorListener).(k8s.io/kubernetes/vendor/k8s.io/client-go/tools/cache.run)-fm()
/home/nraghunath/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/client-go/tools/cache/shared_informer.go:390 +0x2a
k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.(*Group).Start.func1(0xc4209b4840, 0xc42025e370)
/home/nraghunath/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:71 +0x4f
created by k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.(*Group).Start
/home/nraghunath/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:69 +0x62
```
https://github.com/kubernetes/kubernetes/pull/65745 was merged recently which introduced
c861ceb41a/pkg/scheduler/cache/cache.go (L506-L510)
If `!ok` i.e. `state` is nil, `state.nodes` ends up in a panic.
**Release note**:
```release-note
NONE
```
This commit is contained in:
commit
72440a10e9
12
pkg/scheduler/cache/cache.go
vendored
12
pkg/scheduler/cache/cache.go
vendored
@ -506,12 +506,12 @@ func (cache *schedulerCache) removeNodeImageStates(node *v1.Node) {
|
||||
state, ok := cache.imageStates[name]
|
||||
if ok {
|
||||
state.nodes.Delete(node.Name)
|
||||
}
|
||||
if len(state.nodes) == 0 {
|
||||
// Remove the unused image to make sure the length of
|
||||
// imageStates represents the total number of different
|
||||
// images on all nodes
|
||||
delete(cache.imageStates, name)
|
||||
if len(state.nodes) == 0 {
|
||||
// Remove the unused image to make sure the length of
|
||||
// imageStates represents the total number of different
|
||||
// images on all nodes
|
||||
delete(cache.imageStates, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user