mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 06:27:05 +00:00
Merge pull request #45574 from zhangxiaoyu-zidif/format-daemondset-for
Automatic merge from submit-queue (batch tested with PRs 44337, 45775, 45832, 45574, 45758) daemoncontroller.go:format for **What this PR does / why we need it**: format for. delete redundant para. make code clean. **Release note**: ```release-note NONE ```
This commit is contained in:
commit
c03e4952a3
@ -414,8 +414,7 @@ func (dsc *DaemonSetsController) addNode(obj interface{}) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
node := obj.(*v1.Node)
|
node := obj.(*v1.Node)
|
||||||
for i := range dsList {
|
for _, ds := range dsList {
|
||||||
ds := dsList[i]
|
|
||||||
_, shouldSchedule, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
_, shouldSchedule, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
@ -439,8 +438,7 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
|
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
|
||||||
for i := range dsList {
|
for _, ds := range dsList {
|
||||||
ds := dsList[i]
|
|
||||||
_, oldShouldSchedule, oldShouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(oldNode, ds)
|
_, oldShouldSchedule, oldShouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(oldNode, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
@ -538,8 +536,7 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
|
|||||||
}
|
}
|
||||||
var nodesNeedingDaemonPods, podsToDelete []string
|
var nodesNeedingDaemonPods, podsToDelete []string
|
||||||
var failedPodsObserved int
|
var failedPodsObserved int
|
||||||
for i := range nodeList {
|
for _, node := range nodeList {
|
||||||
node := nodeList[i]
|
|
||||||
_, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
_, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
@ -555,8 +552,7 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
|
|||||||
// If a daemon pod failed, delete it
|
// If a daemon pod failed, delete it
|
||||||
// If there's no daemon pods left on this node, we will create it in the next sync loop
|
// If there's no daemon pods left on this node, we will create it in the next sync loop
|
||||||
var daemonPodsRunning []*v1.Pod
|
var daemonPodsRunning []*v1.Pod
|
||||||
for i := range daemonPods {
|
for _, pod := range daemonPods {
|
||||||
pod := daemonPods[i]
|
|
||||||
if pod.Status.Phase == v1.PodFailed {
|
if pod.Status.Phase == v1.PodFailed {
|
||||||
msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, node.Name, pod.Name)
|
msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, node.Name, pod.Name)
|
||||||
glog.V(2).Infof(msg)
|
glog.V(2).Infof(msg)
|
||||||
@ -578,8 +574,8 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
|
|||||||
}
|
}
|
||||||
case !shouldContinueRunning && exists:
|
case !shouldContinueRunning && exists:
|
||||||
// If daemon pod isn't supposed to run on node, but it is, delete all daemon pods on node.
|
// If daemon pod isn't supposed to run on node, but it is, delete all daemon pods on node.
|
||||||
for i := range daemonPods {
|
for _, pod := range daemonPods {
|
||||||
podsToDelete = append(podsToDelete, daemonPods[i].Name)
|
podsToDelete = append(podsToDelete, pod.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -716,8 +712,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
|
|||||||
}
|
}
|
||||||
|
|
||||||
var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable int
|
var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable int
|
||||||
for i := range nodeList {
|
for _, node := range nodeList {
|
||||||
node := nodeList[i]
|
|
||||||
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -881,8 +876,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, false, false, err
|
return false, false, false, err
|
||||||
}
|
}
|
||||||
for i := range podList {
|
for _, pod := range podList {
|
||||||
pod := podList[i]
|
|
||||||
if pod.Spec.NodeName != node.Name {
|
if pod.Spec.NodeName != node.Name {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user