mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 09:22:44 +00:00
Merge pull request #119754 from pbxqdown/kubelet-fix-typo
Fix some typos in kubelet component source code
This commit is contained in:
commit
49768134e5
@ -204,7 +204,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ContainerLogsDir can be overwrited for testing usage
|
// ContainerLogsDir can be overwritten for testing usage
|
||||||
ContainerLogsDir = DefaultContainerLogsDir
|
ContainerLogsDir = DefaultContainerLogsDir
|
||||||
etcHostsPath = getContainerEtcHostsPath()
|
etcHostsPath = getContainerEtcHostsPath()
|
||||||
)
|
)
|
||||||
@ -2235,7 +2235,7 @@ func (kl *Kubelet) deletePod(pod *v1.Pod) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// rejectPod records an event about the pod with the given reason and message,
|
// rejectPod records an event about the pod with the given reason and message,
|
||||||
// and updates the pod to the failed phase in the status manage.
|
// and updates the pod to the failed phase in the status manager.
|
||||||
func (kl *Kubelet) rejectPod(pod *v1.Pod, reason, message string) {
|
func (kl *Kubelet) rejectPod(pod *v1.Pod, reason, message string) {
|
||||||
kl.recorder.Eventf(pod, v1.EventTypeWarning, reason, message)
|
kl.recorder.Eventf(pod, v1.EventTypeWarning, reason, message)
|
||||||
kl.statusManager.SetPodStatus(pod, v1.PodStatus{
|
kl.statusManager.SetPodStatus(pod, v1.PodStatus{
|
||||||
|
@ -477,7 +477,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(ctx context.Context, pod *v1.Pod,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
// nodename will be equals to hostname if SetHostnameAsFQDN is nil or false. If SetHostnameFQDN
|
// nodename will be equal to hostname if SetHostnameAsFQDN is nil or false. If SetHostnameFQDN
|
||||||
// is true and hostDomainName is defined, nodename will be the FQDN (hostname.hostDomainName)
|
// is true and hostDomainName is defined, nodename will be the FQDN (hostname.hostDomainName)
|
||||||
nodename, err := util.GetNodenameForKernel(hostname, hostDomainName, pod.Spec.SetHostnameAsFQDN)
|
nodename, err := util.GetNodenameForKernel(hostname, hostDomainName, pod.Spec.SetHostnameAsFQDN)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -981,7 +981,7 @@ func (kl *Kubelet) isAdmittedPodTerminal(pod *v1.Pod) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// a pod that has been marked terminal within the Kubelet is considered
|
// a pod that has been marked terminal within the Kubelet is considered
|
||||||
// inactive (may have been rejected by Kubelet admision)
|
// inactive (may have been rejected by Kubelet admission)
|
||||||
if status, ok := kl.statusManager.GetPodStatus(pod.UID); ok {
|
if status, ok := kl.statusManager.GetPodStatus(pod.UID); ok {
|
||||||
if status.Phase == v1.PodSucceeded || status.Phase == v1.PodFailed {
|
if status.Phase == v1.PodSucceeded || status.Phase == v1.PodFailed {
|
||||||
return true
|
return true
|
||||||
|
@ -133,7 +133,7 @@ func (bus *DBusCon) ReloadLogindConf() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MonitorShutdown detects the a node shutdown by watching for "PrepareForShutdown" logind events.
|
// MonitorShutdown detects the node shutdown by watching for "PrepareForShutdown" logind events.
|
||||||
// see https://www.freedesktop.org/wiki/Software/systemd/inhibit/ for more details.
|
// see https://www.freedesktop.org/wiki/Software/systemd/inhibit/ for more details.
|
||||||
func (bus *DBusCon) MonitorShutdown() (<-chan bool, error) {
|
func (bus *DBusCon) MonitorShutdown() (<-chan bool, error) {
|
||||||
err := bus.SystemBus.AddMatchSignal(dbus.WithMatchInterface(logindInterface), dbus.WithMatchMember("PrepareForShutdown"), dbus.WithMatchObjectPath("/org/freedesktop/login1"))
|
err := bus.SystemBus.AddMatchSignal(dbus.WithMatchInterface(logindInterface), dbus.WithMatchMember("PrepareForShutdown"), dbus.WithMatchObjectPath("/org/freedesktop/login1"))
|
||||||
|
@ -43,7 +43,7 @@ import (
|
|||||||
// errors are simply logged and the goroutine is terminated without updating
|
// errors are simply logged and the goroutine is terminated without updating
|
||||||
// actualStateOfWorld.
|
// actualStateOfWorld.
|
||||||
type OperationExecutor interface {
|
type OperationExecutor interface {
|
||||||
// RegisterPlugin registers the given plugin using the a handler in the plugin handler map.
|
// RegisterPlugin registers the given plugin using a handler in the plugin handler map.
|
||||||
// It then updates the actual state of the world to reflect that.
|
// It then updates the actual state of the world to reflect that.
|
||||||
RegisterPlugin(socketPath string, timestamp time.Time, pluginHandlers map[string]cache.PluginHandler, actualStateOfWorld ActualStateOfWorldUpdater) error
|
RegisterPlugin(socketPath string, timestamp time.Time, pluginHandlers map[string]cache.PluginHandler, actualStateOfWorld ActualStateOfWorldUpdater) error
|
||||||
|
|
||||||
|
@ -184,7 +184,7 @@ func TestPluginReRegistration(t *testing.T) {
|
|||||||
// The updated plugin should be in the desired state of world cache
|
// The updated plugin should be in the desired state of world cache
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
// Stop the plugin; the plugin should be removed from the desired state of world cache
|
// Stop the plugin; the plugin should be removed from the desired state of world cache
|
||||||
// The plugin removel doesn't work when running the unit tests locally: event.Op of plugin watcher won't pick up the delete event
|
// The plugin removal doesn't work when running the unit tests locally: event.Op of plugin watcher won't pick up the delete event
|
||||||
require.NoError(t, p.Stop())
|
require.NoError(t, p.Stop())
|
||||||
waitForUnregistration(t, pluginInfo.SocketPath, dsw)
|
waitForUnregistration(t, pluginInfo.SocketPath, dsw)
|
||||||
|
|
||||||
|
@ -111,7 +111,7 @@ func (kl *Kubelet) runOnce(ctx context.Context, pods []*v1.Pod, retryDelay time.
|
|||||||
return results, err
|
return results, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// runPod runs a single pod and wait until all containers are running.
|
// runPod runs a single pod and waits until all containers are running.
|
||||||
func (kl *Kubelet) runPod(ctx context.Context, pod *v1.Pod, retryDelay time.Duration) error {
|
func (kl *Kubelet) runPod(ctx context.Context, pod *v1.Pod, retryDelay time.Duration) error {
|
||||||
var isTerminal bool
|
var isTerminal bool
|
||||||
delay := retryDelay
|
delay := retryDelay
|
||||||
|
@ -533,7 +533,7 @@ func hasPodInitialized(pod *v1.Pod) bool {
|
|||||||
|
|
||||||
// initializedContainers returns all status except for suffix of containers that are in Waiting
|
// initializedContainers returns all status except for suffix of containers that are in Waiting
|
||||||
// state, which is the set of containers that have attempted to start at least once. If all containers
|
// state, which is the set of containers that have attempted to start at least once. If all containers
|
||||||
// are Watiing, the first container is always returned.
|
// are Waiting, the first container is always returned.
|
||||||
func initializedContainers(containers []v1.ContainerStatus) []v1.ContainerStatus {
|
func initializedContainers(containers []v1.ContainerStatus) []v1.ContainerStatus {
|
||||||
for i := len(containers) - 1; i >= 0; i-- {
|
for i := len(containers) - 1; i >= 0; i-- {
|
||||||
if containers[i].State.Waiting == nil || containers[i].LastTerminationState.Terminated != nil {
|
if containers[i].State.Waiting == nil || containers[i].LastTerminationState.Terminated != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user