mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
fixup! Make scheduler cache generation number monotonic to avoid collision
This commit is contained in:
parent
b90892fa95
commit
3cfed68c7a
@ -992,6 +992,7 @@ func TestNodeOperators(t *testing.T) {
|
|||||||
t.Errorf("Failed to find node %v in schedulercache.", node.Name)
|
t.Errorf("Failed to find node %v in schedulercache.", node.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Generations are globally unique. We check in our unit tests that they are incremented correctly.
|
||||||
expected.generation = got.generation
|
expected.generation = got.generation
|
||||||
if !reflect.DeepEqual(got, expected) {
|
if !reflect.DeepEqual(got, expected) {
|
||||||
t.Errorf("Failed to add node into schedulercache:\n got: %+v \nexpected: %+v", got, expected)
|
t.Errorf("Failed to add node into schedulercache:\n got: %+v \nexpected: %+v", got, expected)
|
||||||
|
@ -32,8 +32,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
emptyResource = Resource{}
|
emptyResource = Resource{}
|
||||||
generation int64 = 0
|
generation int64
|
||||||
)
|
)
|
||||||
|
|
||||||
// NodeInfo is node level aggregated information.
|
// NodeInfo is node level aggregated information.
|
||||||
@ -78,9 +78,12 @@ func initializeNodeTransientInfo() nodeTransientInfo {
|
|||||||
return nodeTransientInfo{AllocatableVolumesCount: 0, RequestedVolumes: 0}
|
return nodeTransientInfo{AllocatableVolumesCount: 0, RequestedVolumes: 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
func incrementGeneration() int64 {
|
// nextGeneration: Let's make sure history never forgets the name...
|
||||||
atomic.AddInt64(&generation, 1)
|
// Increments the generation number monotonically ensuring that generation numbers never collide.
|
||||||
return generation
|
// Collision of the generation numbers would be particularly problematic if a node was deleted and
|
||||||
|
// added back with the same name. See issue#63262.
|
||||||
|
func nextGeneration() int64 {
|
||||||
|
return atomic.AddInt64(&generation, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// nodeTransientInfo contains transient node information while scheduling.
|
// nodeTransientInfo contains transient node information while scheduling.
|
||||||
@ -221,7 +224,7 @@ func NewNodeInfo(pods ...*v1.Pod) *NodeInfo {
|
|||||||
nonzeroRequest: &Resource{},
|
nonzeroRequest: &Resource{},
|
||||||
allocatableResource: &Resource{},
|
allocatableResource: &Resource{},
|
||||||
TransientInfo: newTransientSchedulerInfo(),
|
TransientInfo: newTransientSchedulerInfo(),
|
||||||
generation: incrementGeneration(),
|
generation: nextGeneration(),
|
||||||
usedPorts: make(util.HostPortInfo),
|
usedPorts: make(util.HostPortInfo),
|
||||||
}
|
}
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
@ -329,7 +332,7 @@ func (n *NodeInfo) AllocatableResource() Resource {
|
|||||||
// SetAllocatableResource sets the allocatableResource information of given node.
|
// SetAllocatableResource sets the allocatableResource information of given node.
|
||||||
func (n *NodeInfo) SetAllocatableResource(allocatableResource *Resource) {
|
func (n *NodeInfo) SetAllocatableResource(allocatableResource *Resource) {
|
||||||
n.allocatableResource = allocatableResource
|
n.allocatableResource = allocatableResource
|
||||||
n.generation = incrementGeneration()
|
n.generation = nextGeneration()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clone returns a copy of this node.
|
// Clone returns a copy of this node.
|
||||||
@ -401,7 +404,7 @@ func (n *NodeInfo) AddPod(pod *v1.Pod) {
|
|||||||
// Consume ports when pods added.
|
// Consume ports when pods added.
|
||||||
n.updateUsedPorts(pod, true)
|
n.updateUsedPorts(pod, true)
|
||||||
|
|
||||||
n.generation = incrementGeneration()
|
n.generation = nextGeneration()
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemovePod subtracts pod information from this NodeInfo.
|
// RemovePod subtracts pod information from this NodeInfo.
|
||||||
@ -452,7 +455,7 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
|
|||||||
// Release ports when remove Pods.
|
// Release ports when remove Pods.
|
||||||
n.updateUsedPorts(pod, false)
|
n.updateUsedPorts(pod, false)
|
||||||
|
|
||||||
n.generation = incrementGeneration()
|
n.generation = nextGeneration()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -509,7 +512,7 @@ func (n *NodeInfo) SetNode(node *v1.Node) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
n.TransientInfo = newTransientSchedulerInfo()
|
n.TransientInfo = newTransientSchedulerInfo()
|
||||||
n.generation = incrementGeneration()
|
n.generation = nextGeneration()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -525,7 +528,7 @@ func (n *NodeInfo) RemoveNode(node *v1.Node) error {
|
|||||||
n.memoryPressureCondition = v1.ConditionUnknown
|
n.memoryPressureCondition = v1.ConditionUnknown
|
||||||
n.diskPressureCondition = v1.ConditionUnknown
|
n.diskPressureCondition = v1.ConditionUnknown
|
||||||
n.pidPressureCondition = v1.ConditionUnknown
|
n.pidPressureCondition = v1.ConditionUnknown
|
||||||
n.generation = incrementGeneration()
|
n.generation = nextGeneration()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user