mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 10:19:50 +00:00
Merge pull request #4169 from brendandburns/shell_sucks
Fix a regression where we never cleared out failed nodes.
This commit is contained in:
commit
7a13c2f0e3
@ -179,7 +179,8 @@ func FilterActivePods(pods []api.Pod) []api.Pod {
|
|||||||
var result []api.Pod
|
var result []api.Pod
|
||||||
for _, value := range pods {
|
for _, value := range pods {
|
||||||
if api.PodSucceeded != value.Status.Phase &&
|
if api.PodSucceeded != value.Status.Phase &&
|
||||||
api.PodFailed != value.Status.Phase {
|
api.PodFailed != value.Status.Phase &&
|
||||||
|
api.PodUnknown != value.Status.Phase {
|
||||||
result = append(result, value)
|
result = append(result, value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -135,6 +135,12 @@ func (p *PodCache) getNodeStatus(name string) (*api.NodeStatus, error) {
|
|||||||
return &node.Status, nil
|
return &node.Status, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *PodCache) clearNodeStatus() {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
p.currentNodes = map[objKey]api.NodeStatus{}
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: once Host gets moved to spec, this can take a podSpec + metadata instead of an
|
// TODO: once Host gets moved to spec, this can take a podSpec + metadata instead of an
|
||||||
// entire pod?
|
// entire pod?
|
||||||
func (p *PodCache) updatePodStatus(pod *api.Pod) error {
|
func (p *PodCache) updatePodStatus(pod *api.Pod) error {
|
||||||
@ -221,6 +227,10 @@ func (p *PodCache) GarbageCollectPodStatus() {
|
|||||||
// calling again, or risk having new info getting clobbered by delayed
|
// calling again, or risk having new info getting clobbered by delayed
|
||||||
// old info.
|
// old info.
|
||||||
func (p *PodCache) UpdateAllContainers() {
|
func (p *PodCache) UpdateAllContainers() {
|
||||||
|
// TODO: this is silly, we should pro-actively update the pod status when
|
||||||
|
// the API server makes changes.
|
||||||
|
p.clearNodeStatus()
|
||||||
|
|
||||||
ctx := api.NewContext()
|
ctx := api.NewContext()
|
||||||
pods, err := p.pods.ListPods(ctx, labels.Everything())
|
pods, err := p.pods.ListPods(ctx, labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -254,6 +254,35 @@ func makeUnhealthyNode(name string) *api.Node {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPodUpdateAllContainersClearsNodeStatus(t *testing.T) {
|
||||||
|
node := makeHealthyNode("machine", "1.2.3.5")
|
||||||
|
pod1 := makePod(api.NamespaceDefault, "foo", "machine", "bar")
|
||||||
|
pod2 := makePod(api.NamespaceDefault, "baz", "machine", "qux")
|
||||||
|
config := podCacheTestConfig{
|
||||||
|
kubeletContainerInfo: api.PodStatus{
|
||||||
|
Info: api.PodInfo{"bar": api.ContainerStatus{}}},
|
||||||
|
nodes: []api.Node{*node},
|
||||||
|
pods: []api.Pod{*pod1, *pod2},
|
||||||
|
}
|
||||||
|
cache := config.Construct()
|
||||||
|
|
||||||
|
if len(cache.currentNodes) != 0 {
|
||||||
|
t.Errorf("unexpected node cache: %v", cache.currentNodes)
|
||||||
|
}
|
||||||
|
key := objKey{"", "machine"}
|
||||||
|
cache.currentNodes[key] = makeUnhealthyNode("machine").Status
|
||||||
|
|
||||||
|
cache.UpdateAllContainers()
|
||||||
|
|
||||||
|
if len(cache.currentNodes) != 1 {
|
||||||
|
t.Errorf("unexpected empty node cache: %v", cache.currentNodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(cache.currentNodes[key], node.Status) {
|
||||||
|
t.Errorf("unexpected status:\n%#v\nexpected:\n%#v\n", cache.currentNodes[key], node.Status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestPodUpdateAllContainers(t *testing.T) {
|
func TestPodUpdateAllContainers(t *testing.T) {
|
||||||
pod1 := makePod(api.NamespaceDefault, "foo", "machine", "bar")
|
pod1 := makePod(api.NamespaceDefault, "foo", "machine", "bar")
|
||||||
pod2 := makePod(api.NamespaceDefault, "baz", "machine", "qux")
|
pod2 := makePod(api.NamespaceDefault, "baz", "machine", "qux")
|
||||||
|
Loading…
Reference in New Issue
Block a user