mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 18:31:15 +00:00
Merge pull request #79937 from mm4tt/deployment_improve
Deployment Controller - avoid unnecessary copying of pod objects in getPodMapForDeployment
This commit is contained in:
commit
abe94ce9b3
@ -366,7 +366,7 @@ func (dc *DeploymentController) deletePod(obj interface{}) {
|
||||
}
|
||||
numPods := 0
|
||||
for _, podList := range podMap {
|
||||
numPods += len(podList.Items)
|
||||
numPods += len(podList)
|
||||
}
|
||||
if numPods == 0 {
|
||||
dc.enqueueDeployment(d)
|
||||
@ -525,7 +525,9 @@ func (dc *DeploymentController) getReplicaSetsForDeployment(d *apps.Deployment)
|
||||
//
|
||||
// It returns a map from ReplicaSet UID to a list of Pods controlled by that RS,
|
||||
// according to the Pod's ControllerRef.
|
||||
func (dc *DeploymentController) getPodMapForDeployment(d *apps.Deployment, rsList []*apps.ReplicaSet) (map[types.UID]*v1.PodList, error) {
|
||||
// NOTE: The pod pointers returned by this method point the the pod objects in the cache and thus
|
||||
// shouldn't be modified in any way.
|
||||
func (dc *DeploymentController) getPodMapForDeployment(d *apps.Deployment, rsList []*apps.ReplicaSet) (map[types.UID][]*v1.Pod, error) {
|
||||
// Get all Pods that potentially belong to this Deployment.
|
||||
selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
|
||||
if err != nil {
|
||||
@ -536,9 +538,9 @@ func (dc *DeploymentController) getPodMapForDeployment(d *apps.Deployment, rsLis
|
||||
return nil, err
|
||||
}
|
||||
// Group Pods by their controller (if it's in rsList).
|
||||
podMap := make(map[types.UID]*v1.PodList, len(rsList))
|
||||
podMap := make(map[types.UID][]*v1.Pod, len(rsList))
|
||||
for _, rs := range rsList {
|
||||
podMap[rs.UID] = &v1.PodList{}
|
||||
podMap[rs.UID] = []*v1.Pod{}
|
||||
}
|
||||
for _, pod := range pods {
|
||||
// Do not ignore inactive Pods because Recreate Deployments need to verify that no
|
||||
@ -548,8 +550,8 @@ func (dc *DeploymentController) getPodMapForDeployment(d *apps.Deployment, rsLis
|
||||
continue
|
||||
}
|
||||
// Only append if we care about this UID.
|
||||
if podList, ok := podMap[controllerRef.UID]; ok {
|
||||
podList.Items = append(podList.Items, *pod)
|
||||
if _, ok := podMap[controllerRef.UID]; ok {
|
||||
podMap[controllerRef.UID] = append(podMap[controllerRef.UID], pod)
|
||||
}
|
||||
}
|
||||
return podMap, nil
|
||||
|
@ -633,7 +633,7 @@ func TestGetPodMapForReplicaSets(t *testing.T) {
|
||||
}
|
||||
podCount := 0
|
||||
for _, podList := range podMap {
|
||||
podCount += len(podList.Items)
|
||||
podCount += len(podList)
|
||||
}
|
||||
if got, want := podCount, 3; got != want {
|
||||
t.Errorf("podCount = %v, want %v", got, want)
|
||||
@ -642,19 +642,19 @@ func TestGetPodMapForReplicaSets(t *testing.T) {
|
||||
if got, want := len(podMap), 2; got != want {
|
||||
t.Errorf("len(podMap) = %v, want %v", got, want)
|
||||
}
|
||||
if got, want := len(podMap[rs1.UID].Items), 2; got != want {
|
||||
if got, want := len(podMap[rs1.UID]), 2; got != want {
|
||||
t.Errorf("len(podMap[rs1]) = %v, want %v", got, want)
|
||||
}
|
||||
expect := map[string]struct{}{"rs1-pod": {}, "pod4": {}}
|
||||
for _, pod := range podMap[rs1.UID].Items {
|
||||
for _, pod := range podMap[rs1.UID] {
|
||||
if _, ok := expect[pod.Name]; !ok {
|
||||
t.Errorf("unexpected pod name for rs1: %s", pod.Name)
|
||||
}
|
||||
}
|
||||
if got, want := len(podMap[rs2.UID].Items), 1; got != want {
|
||||
if got, want := len(podMap[rs2.UID]), 1; got != want {
|
||||
t.Errorf("len(podMap[rs2]) = %v, want %v", got, want)
|
||||
}
|
||||
if got, want := podMap[rs2.UID].Items[0].Name, "rs2-pod"; got != want {
|
||||
if got, want := podMap[rs2.UID][0].Name, "rs2-pod"; got != want {
|
||||
t.Errorf("podMap[rs2] = [%v], want [%v]", got, want)
|
||||
}
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
// rolloutRecreate implements the logic for recreating a replica set.
|
||||
func (dc *DeploymentController) rolloutRecreate(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
func (dc *DeploymentController) rolloutRecreate(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID][]*v1.Pod) error {
|
||||
// Don't create a new RS if not already existed, so that we avoid scaling up before scaling down.
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, false)
|
||||
if err != nil {
|
||||
@ -95,7 +95,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*app
|
||||
}
|
||||
|
||||
// oldPodsRunning returns whether there are old pods running or any of the old ReplicaSets thinks that it runs pods.
|
||||
func oldPodsRunning(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) bool {
|
||||
func oldPodsRunning(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet, podMap map[types.UID][]*v1.Pod) bool {
|
||||
if oldPods := util.GetActualReplicaCountForReplicaSets(oldRSs); oldPods > 0 {
|
||||
return true
|
||||
}
|
||||
@ -104,7 +104,7 @@ func oldPodsRunning(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet, podMap ma
|
||||
if newRS != nil && newRS.UID == rsUID {
|
||||
continue
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
for _, pod := range podList {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
// Don't count pods in terminal state.
|
||||
|
@ -88,7 +88,7 @@ func TestOldPodsRunning(t *testing.T) {
|
||||
|
||||
newRS *apps.ReplicaSet
|
||||
oldRSs []*apps.ReplicaSet
|
||||
podMap map[types.UID]*v1.PodList
|
||||
podMap map[types.UID][]*v1.Pod
|
||||
|
||||
hasOldPodsRunning bool
|
||||
}{
|
||||
@ -115,9 +115,8 @@ func TestOldPodsRunning(t *testing.T) {
|
||||
{
|
||||
name: "old RSs with zero status replicas but pods in terminal state are present",
|
||||
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
podMap: map[types.UID][]*v1.Pod{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodFailed,
|
||||
@ -130,15 +129,13 @@ func TestOldPodsRunning(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hasOldPodsRunning: false,
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas but pod in unknown phase present",
|
||||
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
podMap: map[types.UID][]*v1.Pod{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodUnknown,
|
||||
@ -146,15 +143,13 @@ func TestOldPodsRunning(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas with pending pod present",
|
||||
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
podMap: map[types.UID][]*v1.Pod{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodPending,
|
||||
@ -162,15 +157,13 @@ func TestOldPodsRunning(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas with running pod present",
|
||||
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
podMap: map[types.UID][]*v1.Pod{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
@ -178,15 +171,13 @@ func TestOldPodsRunning(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas but pods in terminal state and pending are present",
|
||||
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
podMap: map[types.UID][]*v1.Pod{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodFailed,
|
||||
@ -198,12 +189,8 @@ func TestOldPodsRunning(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"uid-2": {
|
||||
Items: []v1.Pod{},
|
||||
},
|
||||
"uid-2": {},
|
||||
"uid-3": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodPending,
|
||||
@ -211,7 +198,6 @@ func TestOldPodsRunning(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
}
|
||||
@ -232,14 +218,12 @@ func rsWithUID(uid string) *apps.ReplicaSet {
|
||||
return rs
|
||||
}
|
||||
|
||||
func podMapWithUIDs(uids []string) map[types.UID]*v1.PodList {
|
||||
podMap := make(map[types.UID]*v1.PodList)
|
||||
func podMapWithUIDs(uids []string) map[types.UID][]*v1.Pod {
|
||||
podMap := make(map[types.UID][]*v1.Pod)
|
||||
for _, uid := range uids {
|
||||
podMap[types.UID(uid)] = &v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
podMap[types.UID(uid)] = []*v1.Pod{
|
||||
{ /* supposedly a pod */ },
|
||||
{ /* supposedly another pod pod */ },
|
||||
},
|
||||
}
|
||||
}
|
||||
return podMap
|
||||
|
Loading…
Reference in New Issue
Block a user