mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-06 02:34:03 +00:00
Merge pull request #47416 from allencloud/simplify-if-else
Automatic merge from submit-queue simplify if and else for code Signed-off-by: allencloud <allen.sun@daocloud.io> **What this PR does / why we need it**: This PR tries to simplify the code of if and else, and this could make code a little bit cleaner. **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes # NONE **Special notes for your reviewer**: NONE **Release note**: ```release-note NONE ```
This commit is contained in:
commit
58819b0204
@ -426,7 +426,8 @@ func (kl *Kubelet) setNodeAddress(node *v1.Node) error {
|
|||||||
if kl.externalCloudProvider {
|
if kl.externalCloudProvider {
|
||||||
// We rely on the external cloud provider to supply the addresses.
|
// We rely on the external cloud provider to supply the addresses.
|
||||||
return nil
|
return nil
|
||||||
} else if kl.cloud != nil {
|
}
|
||||||
|
if kl.cloud != nil {
|
||||||
instances, ok := kl.cloud.Instances()
|
instances, ok := kl.cloud.Instances()
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("failed to get instances from cloud provider")
|
return fmt.Errorf("failed to get instances from cloud provider")
|
||||||
@ -603,20 +604,21 @@ func (kl *Kubelet) setNodeStatusVersionInfo(node *v1.Node) {
|
|||||||
verinfo, err := kl.cadvisor.VersionInfo()
|
verinfo, err := kl.cadvisor.VersionInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Error getting version info: %v", err)
|
glog.Errorf("Error getting version info: %v", err)
|
||||||
} else {
|
return
|
||||||
node.Status.NodeInfo.KernelVersion = verinfo.KernelVersion
|
|
||||||
node.Status.NodeInfo.OSImage = verinfo.ContainerOsVersion
|
|
||||||
|
|
||||||
runtimeVersion := "Unknown"
|
|
||||||
if runtimeVer, err := kl.containerRuntime.Version(); err == nil {
|
|
||||||
runtimeVersion = runtimeVer.String()
|
|
||||||
}
|
|
||||||
node.Status.NodeInfo.ContainerRuntimeVersion = fmt.Sprintf("%s://%s", kl.containerRuntime.Type(), runtimeVersion)
|
|
||||||
|
|
||||||
node.Status.NodeInfo.KubeletVersion = version.Get().String()
|
|
||||||
// TODO: kube-proxy might be different version from kubelet in the future
|
|
||||||
node.Status.NodeInfo.KubeProxyVersion = version.Get().String()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
node.Status.NodeInfo.KernelVersion = verinfo.KernelVersion
|
||||||
|
node.Status.NodeInfo.OSImage = verinfo.ContainerOsVersion
|
||||||
|
|
||||||
|
runtimeVersion := "Unknown"
|
||||||
|
if runtimeVer, err := kl.containerRuntime.Version(); err == nil {
|
||||||
|
runtimeVersion = runtimeVer.String()
|
||||||
|
}
|
||||||
|
node.Status.NodeInfo.ContainerRuntimeVersion = fmt.Sprintf("%s://%s", kl.containerRuntime.Type(), runtimeVersion)
|
||||||
|
|
||||||
|
node.Status.NodeInfo.KubeletVersion = version.Get().String()
|
||||||
|
// TODO: kube-proxy might be different version from kubelet in the future
|
||||||
|
node.Status.NodeInfo.KubeProxyVersion = version.Get().String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set daemonEndpoints for the node.
|
// Set daemonEndpoints for the node.
|
||||||
@ -631,25 +633,27 @@ func (kl *Kubelet) setNodeStatusImages(node *v1.Node) {
|
|||||||
containerImages, err := kl.imageManager.GetImageList()
|
containerImages, err := kl.imageManager.GetImageList()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Error getting image list: %v", err)
|
glog.Errorf("Error getting image list: %v", err)
|
||||||
} else {
|
node.Status.Images = imagesOnNode
|
||||||
// sort the images from max to min, and only set top N images into the node status.
|
return
|
||||||
sort.Sort(sliceutils.ByImageSize(containerImages))
|
|
||||||
if maxImagesInNodeStatus < len(containerImages) {
|
|
||||||
containerImages = containerImages[0:maxImagesInNodeStatus]
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, image := range containerImages {
|
|
||||||
names := append(image.RepoDigests, image.RepoTags...)
|
|
||||||
// Report up to maxNamesPerImageInNodeStatus names per image.
|
|
||||||
if len(names) > maxNamesPerImageInNodeStatus {
|
|
||||||
names = names[0:maxNamesPerImageInNodeStatus]
|
|
||||||
}
|
|
||||||
imagesOnNode = append(imagesOnNode, v1.ContainerImage{
|
|
||||||
Names: names,
|
|
||||||
SizeBytes: image.Size,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
// sort the images from max to min, and only set top N images into the node status.
|
||||||
|
sort.Sort(sliceutils.ByImageSize(containerImages))
|
||||||
|
if maxImagesInNodeStatus < len(containerImages) {
|
||||||
|
containerImages = containerImages[0:maxImagesInNodeStatus]
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, image := range containerImages {
|
||||||
|
names := append(image.RepoDigests, image.RepoTags...)
|
||||||
|
// Report up to maxNamesPerImageInNodeStatus names per image.
|
||||||
|
if len(names) > maxNamesPerImageInNodeStatus {
|
||||||
|
names = names[0:maxNamesPerImageInNodeStatus]
|
||||||
|
}
|
||||||
|
imagesOnNode = append(imagesOnNode, v1.ContainerImage{
|
||||||
|
Names: names,
|
||||||
|
SizeBytes: image.Size,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
node.Status.Images = imagesOnNode
|
node.Status.Images = imagesOnNode
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -779,14 +783,12 @@ func (kl *Kubelet) setNodeMemoryPressureCondition(node *v1.Node) {
|
|||||||
condition.LastTransitionTime = currentTime
|
condition.LastTransitionTime = currentTime
|
||||||
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasInsufficientMemory")
|
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasInsufficientMemory")
|
||||||
}
|
}
|
||||||
} else {
|
} else if condition.Status != v1.ConditionFalse {
|
||||||
if condition.Status != v1.ConditionFalse {
|
condition.Status = v1.ConditionFalse
|
||||||
condition.Status = v1.ConditionFalse
|
condition.Reason = "KubeletHasSufficientMemory"
|
||||||
condition.Reason = "KubeletHasSufficientMemory"
|
condition.Message = "kubelet has sufficient memory available"
|
||||||
condition.Message = "kubelet has sufficient memory available"
|
condition.LastTransitionTime = currentTime
|
||||||
condition.LastTransitionTime = currentTime
|
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasSufficientMemory")
|
||||||
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasSufficientMemory")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if newCondition {
|
if newCondition {
|
||||||
@ -837,14 +839,12 @@ func (kl *Kubelet) setNodeDiskPressureCondition(node *v1.Node) {
|
|||||||
condition.LastTransitionTime = currentTime
|
condition.LastTransitionTime = currentTime
|
||||||
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasDiskPressure")
|
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasDiskPressure")
|
||||||
}
|
}
|
||||||
} else {
|
} else if condition.Status != v1.ConditionFalse {
|
||||||
if condition.Status != v1.ConditionFalse {
|
condition.Status = v1.ConditionFalse
|
||||||
condition.Status = v1.ConditionFalse
|
condition.Reason = "KubeletHasNoDiskPressure"
|
||||||
condition.Reason = "KubeletHasNoDiskPressure"
|
condition.Message = "kubelet has no disk pressure"
|
||||||
condition.Message = "kubelet has no disk pressure"
|
condition.LastTransitionTime = currentTime
|
||||||
condition.LastTransitionTime = currentTime
|
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasNoDiskPressure")
|
||||||
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasNoDiskPressure")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if newCondition {
|
if newCondition {
|
||||||
|
Loading…
Reference in New Issue
Block a user