diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index 808734c6f4d..35cb99cf8c7 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -114,7 +114,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) { s.NodeLabels = make(map[string]string) bindableNodeLabels := utilconfig.ConfigurationMap(s.NodeLabels) fs.Var(&bindableNodeLabels, "node-labels", " Labels to add when registering the node in the cluster. Labels must be key=value pairs separated by ','.") - fs.DurationVar(&s.ImageMinimumGCAge.Duration, "minimum-image-ttl-duration", s.ImageMinimumGCAge.Duration, "Minimum age for a unused image before it is garbage collected. Examples: '300ms', '10s' or '2h45m'. Default: '2m'") + fs.DurationVar(&s.ImageMinimumGCAge.Duration, "minimum-image-ttl-duration", s.ImageMinimumGCAge.Duration, "Minimum age for an unused image before it is garbage collected. Examples: '300ms', '10s' or '2h45m'. Default: '2m'") fs.Int32Var(&s.ImageGCHighThresholdPercent, "image-gc-high-threshold", s.ImageGCHighThresholdPercent, "The percent of disk usage after which image garbage collection is always run. Default: 90%") fs.Int32Var(&s.ImageGCLowThresholdPercent, "image-gc-low-threshold", s.ImageGCLowThresholdPercent, "The percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. Default: 80%") fs.Int32Var(&s.LowDiskSpaceThresholdMB, "low-diskspace-threshold-mb", s.LowDiskSpaceThresholdMB, "The absolute free disk space, in MB, to maintain. When disk space falls below this threshold, new pods would be rejected. Default: 256") diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index a02d7fdb2ce..cc03ee55f55 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -304,9 +304,9 @@ func run(s *options.KubeletServer, kcfg *KubeletConfig) (err error) { done := make(chan struct{}) if s.LockFilePath != "" { - glog.Infof("aquiring lock on %q", s.LockFilePath) + glog.Infof("acquiring lock on %q", s.LockFilePath) if err := flock.Acquire(s.LockFilePath); err != nil { - return fmt.Errorf("unable to aquire file lock on %q: %v", s.LockFilePath, err) + return fmt.Errorf("unable to acquire file lock on %q: %v", s.LockFilePath, err) } if s.ExitOnLockContention { glog.Infof("watching for inotify events for: %v", s.LockFilePath) diff --git a/pkg/kubelet/api/v1alpha1/runtime/api.pb.go b/pkg/kubelet/api/v1alpha1/runtime/api.pb.go index 0bf67cd9752..09540859d5b 100644 --- a/pkg/kubelet/api/v1alpha1/runtime/api.pb.go +++ b/pkg/kubelet/api/v1alpha1/runtime/api.pb.go @@ -542,13 +542,13 @@ type PodSandboxConfig struct { // By default the log of a container going into the LogDirectory will be // hooked up to STDOUT and STDERR. However, the LogDirectory may contain // binary log files with structured logging data from the individual - // containers. For example the files might be newline seperated JSON + // containers. For example, the files might be newline separated JSON // structured logs, systemd-journald journal files, gRPC trace files, etc. // E.g., // PodSandboxConfig.LogDirectory = `/var/log/pods//` // ContainerConfig.LogPath = `containerName_Instance#.log` // - // WARNING: Log managment and how kubelet should interface with the + // WARNING: Log management and how kubelet should interface with the // container logs are under active discussion in // https://issues.k8s.io/24677. There *may* be future change of direction // for logging as the discussion carries on. @@ -561,7 +561,7 @@ type PodSandboxConfig struct { // aggregate cpu/memory resources limits of all containers). // Note: On a Linux host, kubelet will create a pod-level cgroup and pass // it as the cgroup parent for the PodSandbox. For some runtimes, this is - // sufficent. For others, e.g., hypervisor-based runtimes, explicit + // sufficient. For others, e.g., hypervisor-based runtimes, explicit // resource limits for the sandbox are needed at creation time. Resources *PodSandboxResources `protobuf:"bytes,6,opt,name=resources" json:"resources,omitempty"` // Labels are key value pairs that may be used to scope and select individual resources. @@ -1326,7 +1326,7 @@ type ContainerConfig struct { // PodSandboxConfig.LogDirectory = `/var/log/pods//` // ContainerConfig.LogPath = `containerName_Instance#.log` // - // WARNING: Log managment and how kubelet should interface with the + // WARNING: Log management and how kubelet should interface with the // container logs are under active discussion in // https://issues.k8s.io/24677. There *may* be future change of direction // for logging as the discussion carries on. @@ -2777,7 +2777,7 @@ type ImageServiceClient interface { ListImages(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) // ImageStatus returns the status of the image. ImageStatus(ctx context.Context, in *ImageStatusRequest, opts ...grpc.CallOption) (*ImageStatusResponse, error) - // PullImage pulls a image with authentication config. + // PullImage pulls an image with authentication config. PullImage(ctx context.Context, in *PullImageRequest, opts ...grpc.CallOption) (*PullImageResponse, error) // RemoveImage removes the image. // It should return success if the image has already been removed. @@ -2835,7 +2835,7 @@ type ImageServiceServer interface { ListImages(context.Context, *ListImagesRequest) (*ListImagesResponse, error) // ImageStatus returns the status of the image. ImageStatus(context.Context, *ImageStatusRequest) (*ImageStatusResponse, error) - // PullImage pulls a image with authentication config. + // PullImage pulls an image with authentication config. PullImage(context.Context, *PullImageRequest) (*PullImageResponse, error) // RemoveImage removes the image. // It should return success if the image has already been removed. diff --git a/pkg/kubelet/api/v1alpha1/runtime/api.proto b/pkg/kubelet/api/v1alpha1/runtime/api.proto index 82b751dd270..27907e477b5 100644 --- a/pkg/kubelet/api/v1alpha1/runtime/api.proto +++ b/pkg/kubelet/api/v1alpha1/runtime/api.proto @@ -48,7 +48,7 @@ service ImageService { rpc ListImages(ListImagesRequest) returns (ListImagesResponse) {} // ImageStatus returns the status of the image. rpc ImageStatus(ImageStatusRequest) returns (ImageStatusResponse) {} - // PullImage pulls a image with authentication config. + // PullImage pulls an image with authentication config. rpc PullImage(PullImageRequest) returns (PullImageResponse) {} // RemoveImage removes the image. // It should return success if the image has already been removed. @@ -166,13 +166,13 @@ message PodSandboxConfig { // By default the log of a container going into the LogDirectory will be // hooked up to STDOUT and STDERR. However, the LogDirectory may contain // binary log files with structured logging data from the individual - // containers. For example the files might be newline seperated JSON + // containers. For example, the files might be newline separated JSON // structured logs, systemd-journald journal files, gRPC trace files, etc. // E.g., // PodSandboxConfig.LogDirectory = `/var/log/pods//` // ContainerConfig.LogPath = `containerName_Instance#.log` // - // WARNING: Log managment and how kubelet should interface with the + // WARNING: Log management and how kubelet should interface with the // container logs are under active discussion in // https://issues.k8s.io/24677. There *may* be future change of direction // for logging as the discussion carries on. @@ -185,7 +185,7 @@ message PodSandboxConfig { // aggregate cpu/memory resources limits of all containers). // Note: On a Linux host, kubelet will create a pod-level cgroup and pass // it as the cgroup parent for the PodSandbox. For some runtimes, this is - // sufficent. For others, e.g., hypervisor-based runtimes, explicit + // sufficient. For others, e.g., hypervisor-based runtimes, explicit // resource limits for the sandbox are needed at creation time. optional PodSandboxResources resources = 6; // Labels are key value pairs that may be used to scope and select individual resources. @@ -421,7 +421,7 @@ message ContainerConfig { // PodSandboxConfig.LogDirectory = `/var/log/pods//` // ContainerConfig.LogPath = `containerName_Instance#.log` // - // WARNING: Log managment and how kubelet should interface with the + // WARNING: Log management and how kubelet should interface with the // container logs are under active discussion in // https://issues.k8s.io/24677. There *may* be future change of direction // for logging as the discussion carries on. diff --git a/pkg/kubelet/api/v1alpha1/stats/types.go b/pkg/kubelet/api/v1alpha1/stats/types.go index a752ab30b79..bded04bf045 100644 --- a/pkg/kubelet/api/v1alpha1/stats/types.go +++ b/pkg/kubelet/api/v1alpha1/stats/types.go @@ -212,6 +212,6 @@ type UserDefinedMetric struct { // The time at which these stats were updated. Time unversioned.Time `json:"time"` // Value of the metric. Float64s have 53 bit precision. - // We do not forsee any metrics exceeding that value. + // We do not foresee any metrics exceeding that value. Value float64 `json:"value"` } diff --git a/pkg/kubelet/cm/cgroup_manager_linux.go b/pkg/kubelet/cm/cgroup_manager_linux.go index 644ceb44a8a..59da8592831 100644 --- a/pkg/kubelet/cm/cgroup_manager_linux.go +++ b/pkg/kubelet/cm/cgroup_manager_linux.go @@ -105,7 +105,7 @@ var supportedSubsystems []subsystem = []subsystem{ // setSupportedSubsytems sets cgroup resource limits only on the supported // subsytems. ie. cpu and memory. We don't use libcontainer's cgroup/fs/Set() -// method as it dosn't allow us to skip updates on the devices cgroup +// method as it doesn't allow us to skip updates on the devices cgroup // Allowing or denying all devices by writing 'a' to devices.allow or devices.deny is // not possible once the device cgroups has children. Once the pod level cgroup are // created under the QOS level cgroup we cannot update the QOS level device cgroup. diff --git a/pkg/kubelet/config/http.go b/pkg/kubelet/config/http.go index c061714a5a2..0d229ea2ff5 100644 --- a/pkg/kubelet/config/http.go +++ b/pkg/kubelet/config/http.go @@ -49,7 +49,7 @@ func NewSourceURL(url string, header http.Header, nodeName string, period time.D updates: updates, data: nil, // Timing out requests leads to retries. This client is only used to - // read the the manifest URL passed to kubelet. + // read the manifest URL passed to kubelet. client: &http.Client{Timeout: 10 * time.Second}, } glog.V(1).Infof("Watching URL %s", url) diff --git a/pkg/kubelet/container/interface.go b/pkg/kubelet/container/interface.go index 198545ecd20..8b08a4f8f31 100644 --- a/pkg/kubelet/container/interface.go +++ b/pkg/kubelet/container/interface.go @@ -61,7 +61,7 @@ type PodSandboxConfig struct { // aggregate cpu/memory resources limits of all containers). // Note: On a Linux host, kubelet will create a pod-level cgroup and pass // it as the cgroup parent for the PodSandbox. For some runtimes, this is - // sufficent. For others, e.g., hypervisor-based runtimes, explicit + // sufficient. For others, e.g., hypervisor-based runtimes, explicit // resource limits for the sandbox are needed at creation time. Resources PodSandboxResources // Path to the directory on the host in which container log files are @@ -69,13 +69,13 @@ type PodSandboxConfig struct { // By default the Log of a container going into the LogDirectory will be // hooked up to STDOUT and STDERR. However, the LogDirectory may contain // binary log files with structured logging data from the individual - // containers. For example the files might be newline seperated JSON + // containers. For example, the files might be newline separated JSON // structured logs, systemd-journald journal files, gRPC trace files, etc. // E.g., // PodSandboxConfig.LogDirectory = `/var/log/pods//` // ContainerConfig.LogPath = `containerName_Instance#.log` // - // WARNING: Log managment and how kubelet should interface with the + // WARNING: Log management and how kubelet should interface with the // container logs are under active discussion in // https://issues.k8s.io/24677. There *may* be future change of direction // for logging as the discussion carries on. @@ -278,7 +278,7 @@ type ContainerConfig struct { // PodSandboxConfig.LogDirectory = `/var/log/pods//` // ContainerConfig.LogPath = `containerName_Instance#.log` // - // WARNING: Log managment and how kubelet should interface with the + // WARNING: Log management and how kubelet should interface with the // container logs are under active discussion in // https://issues.k8s.io/24677. There *may* be future change of direction // for logging as the discussion carries on. diff --git a/pkg/kubelet/container/runtime.go b/pkg/kubelet/container/runtime.go index c407bd97828..bfb14ce4bfc 100644 --- a/pkg/kubelet/container/runtime.go +++ b/pkg/kubelet/container/runtime.go @@ -110,7 +110,7 @@ type Runtime interface { // by all containers in the pod. GetNetNS(containerID ContainerID) (string, error) // Returns the container ID that represents the Pod, as passed to network - // plugins. For example if the runtime uses an infra container, returns + // plugins. For example, if the runtime uses an infra container, returns // the infra container's ContainerID. // TODO: Change ContainerID to a Pod ID, see GetNetNS() GetPodContainerID(*Pod) (ContainerID, error) diff --git a/pkg/kubelet/dockershim/docker_image.go b/pkg/kubelet/dockershim/docker_image.go index 395b54870bc..8bdd24bdf9f 100644 --- a/pkg/kubelet/dockershim/docker_image.go +++ b/pkg/kubelet/dockershim/docker_image.go @@ -63,7 +63,7 @@ func (ds *dockerService) ImageStatus(image *runtimeApi.ImageSpec) (*runtimeApi.I return images[0], nil } -// PullImage pulls a image with authentication config. +// PullImage pulls an image with authentication config. func (ds *dockerService) PullImage(image *runtimeApi.ImageSpec, auth *runtimeApi.AuthConfig) error { // TODO: add default tags for images or should this be done by kubelet? return ds.client.PullImage(image.GetImage(), diff --git a/pkg/kubelet/dockertools/docker.go b/pkg/kubelet/dockertools/docker.go index e71f93d96b6..8f84ae9f53a 100644 --- a/pkg/kubelet/dockertools/docker.go +++ b/pkg/kubelet/dockertools/docker.go @@ -250,7 +250,7 @@ func (p throttledDockerPuller) IsImagePresent(name string) (bool, error) { } // Creates a name which can be reversed to identify both full pod name and container name. -// This function returns stable name, unique name and an unique id. +// This function returns stable name, unique name and a unique id. // Although rand.Uint32() is not really unique, but it's enough for us because error will // only occur when instances of the same container in the same pod have the same UID. The // chance is really slim. diff --git a/pkg/kubelet/dockertools/docker_manager.go b/pkg/kubelet/dockertools/docker_manager.go index f4902049933..c55d3e6cc46 100644 --- a/pkg/kubelet/dockertools/docker_manager.go +++ b/pkg/kubelet/dockertools/docker_manager.go @@ -72,7 +72,7 @@ const ( DockerType = "docker" // https://docs.docker.com/engine/reference/api/docker_remote_api/ - // docker verison should be at least 1.9.x + // docker version should be at least 1.9.x minimumDockerAPIVersion = "1.21" // Remote API version for docker daemon version v1.10 @@ -619,12 +619,12 @@ func (dm *DockerManager) runContainer( _, containerName, cid := BuildDockerName(dockerName, container) if opts.PodContainerDir != "" && len(container.TerminationMessagePath) != 0 { // Because the PodContainerDir contains pod uid and container name which is unique enough, - // here we just add an unique container id to make the path unique for different instances + // here we just add a unique container id to make the path unique for different instances // of the same container. containerLogPath := path.Join(opts.PodContainerDir, cid) fs, err := os.Create(containerLogPath) if err != nil { - // TODO: Clean up the previouly created dir? return the error? + // TODO: Clean up the previously created dir? return the error? glog.Errorf("Error on creating termination-log file %q: %v", containerLogPath, err) } else { fs.Close() // Close immediately; we're just doing a `touch` here @@ -2304,7 +2304,7 @@ func (dm *DockerManager) isImageRoot(image string) (bool, error) { return uid == 0, nil } -// getUidFromUser splits the uid out of a uid:gid string. +// getUidFromUser splits the uid out of an uid:gid string. func getUidFromUser(id string) string { if id == "" { return id diff --git a/pkg/kubelet/eviction/types.go b/pkg/kubelet/eviction/types.go index 053c47809da..50085439dd5 100644 --- a/pkg/kubelet/eviction/types.go +++ b/pkg/kubelet/eviction/types.go @@ -32,7 +32,7 @@ const ( SignalMemoryAvailable Signal = "memory.available" // SignalNodeFsAvailable is amount of storage available on filesystem that kubelet uses for volumes, daemon logs, etc. SignalNodeFsAvailable Signal = "nodefs.available" - // SignalImageFsAvailable is amount of storage available on filesystem that container runtime uses for for storing images and container writable layers. + // SignalImageFsAvailable is amount of storage available on filesystem that container runtime uses for storing images and container writable layers. SignalImageFsAvailable Signal = "imagefs.available" ) diff --git a/pkg/kubelet/image_manager.go b/pkg/kubelet/image_manager.go index 7d21892dd36..19e5513e4b5 100644 --- a/pkg/kubelet/image_manager.go +++ b/pkg/kubelet/image_manager.go @@ -65,7 +65,7 @@ type ImageGCPolicy struct { // This is the lowest threshold we will try to garbage collect to. LowThresholdPercent int - // Minimum age at which a image can be garbage collected. + // Minimum age at which an image can be garbage collected. MinAge time.Duration } diff --git a/pkg/kubelet/images/parallel_image_puller_test.go b/pkg/kubelet/images/parallel_image_puller_test.go index f7e2efd8e6b..24414f64e66 100644 --- a/pkg/kubelet/images/parallel_image_puller_test.go +++ b/pkg/kubelet/images/parallel_image_puller_test.go @@ -56,7 +56,7 @@ func TestPuller(t *testing.T) { pullerErr: nil, expectedErr: []error{nil}}, - { // image present, dont pull + { // image present, don't pull containerImage: "present_image", policy: api.PullIfNotPresent, calledFunctions: []string{"IsImagePresent"}, diff --git a/pkg/kubelet/images/serialized_image_puller_test.go b/pkg/kubelet/images/serialized_image_puller_test.go index 731850873c9..ef6eb1264b0 100644 --- a/pkg/kubelet/images/serialized_image_puller_test.go +++ b/pkg/kubelet/images/serialized_image_puller_test.go @@ -56,7 +56,7 @@ func TestSerializedPuller(t *testing.T) { pullerErr: nil, expectedErr: []error{nil}}, - { // image present, dont pull + { // image present, don't pull containerImage: "present_image", policy: api.PullIfNotPresent, calledFunctions: []string{"IsImagePresent"}, diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 03c5c25191d..68c34844670 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -1031,7 +1031,7 @@ func (kl *Kubelet) relabelVolumes(pod *api.Pod, volumes kubecontainer.VolumeMap) func makeMounts(pod *api.Pod, podDir string, container *api.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap) ([]kubecontainer.Mount, error) { // Kubernetes only mounts on /etc/hosts if : // - container does not use hostNetwork and - // - container is not a infrastructure(pause) container + // - container is not an infrastructure(pause) container // - container is not already mounting on /etc/hosts // When the pause container is being created, its IP is still unknown. Hence, PodIP will not have been set. mountEtcHostsFile := (pod.Spec.SecurityContext == nil || !pod.Spec.SecurityContext.HostNetwork) && len(podIP) > 0 @@ -1041,7 +1041,7 @@ func makeMounts(pod *api.Pod, podDir string, container *api.Container, hostName, mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath) vol, ok := podVolumes[mount.Name] if !ok { - glog.Warningf("Mount cannot be satisified for container %q, because the volume is missing: %q", container.Name, mount) + glog.Warningf("Mount cannot be satisfied for container %q, because the volume is missing: %q", container.Name, mount) continue } @@ -1449,7 +1449,7 @@ func (kl *Kubelet) GetClusterDNS(pod *api.Pod) ([]string, []string, error) { if !useClusterFirstPolicy { // When the kubelet --resolv-conf flag is set to the empty string, use // DNS settings that override the docker default (which is to use - // /etc/resolv.conf) and effectivly disable DNS lookups. According to + // /etc/resolv.conf) and effectively disable DNS lookups. According to // the bind documentation, the behavior of the DNS client library when // "nameservers" are not specified is to "use the nameserver on the // local machine". A nameserver setting of localhost is equivalent to @@ -2373,7 +2373,7 @@ func (kl *Kubelet) PLEGHealthCheck() (bool, error) { } // validateContainerLogStatus returns the container ID for the desired container to retrieve logs for, based on the state -// of the container. The previous flag will only return the logs for the the last terminated container, otherwise, the current +// of the container. The previous flag will only return the logs for the last terminated container, otherwise, the current // running container is preferred over a previous termination. If info about the container is not available then a specific // error is returned to the end user. func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *api.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) { diff --git a/pkg/kubelet/kubelet_network.go b/pkg/kubelet/kubelet_network.go index 1a34efa5b70..3a303487436 100644 --- a/pkg/kubelet/kubelet_network.go +++ b/pkg/kubelet/kubelet_network.go @@ -168,7 +168,7 @@ func parseResolvConf(reader io.Reader, dnsScrubber dnsScrubber) (nameservers []s } // cleanupBandwidthLimits updates the status of bandwidth-limited containers -// and ensures that only the the appropriate CIDRs are active on the node. +// and ensures that only the appropriate CIDRs are active on the node. func (kl *Kubelet) cleanupBandwidthLimits(allPods []*api.Pod) error { if kl.shaper == nil { return nil diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 80182d4416b..58621fc7c31 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -733,7 +733,7 @@ func (kl *Kubelet) setNodeVolumesInUseStatus(node *api.Node) { // setNodeStatus fills in the Status fields of the given Node, overwriting // any fields that are currently set. // TODO(madhusudancs): Simplify the logic for setting node conditions and -// refactor the node status condtion code out to a different file. +// refactor the node status condition code out to a different file. func (kl *Kubelet) setNodeStatus(node *api.Node) error { for _, f := range kl.setNodeStatusFuncs { if err := f(node); err != nil { diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 03969a88749..c6a5a737dda 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -3785,7 +3785,7 @@ type testPodSyncHandler struct { podsToEvict []*api.Pod // the reason for the eviction reason string - // the mesage for the eviction + // the message for the eviction message string } diff --git a/pkg/kubelet/lifecycle/handlers.go b/pkg/kubelet/lifecycle/handlers.go index 6ba3effc6ea..f43305bb2b2 100644 --- a/pkg/kubelet/lifecycle/handlers.go +++ b/pkg/kubelet/lifecycle/handlers.go @@ -81,7 +81,7 @@ func (hr *HandlerRunner) Run(containerID kubecontainer.ContainerID, pod *api.Pod } } -// resolvePort attempts to turn a IntOrString port reference into a concrete port number. +// resolvePort attempts to turn an IntOrString port reference into a concrete port number. // If portReference has an int value, it is treated as a literal, and simply returns that value. // If portReference is a string, an attempt is first made to parse it as an integer. If that fails, // an attempt is made to find a port with the same name in the container spec. diff --git a/pkg/kubelet/pleg/generic.go b/pkg/kubelet/pleg/generic.go index 27007b0c105..375de16cf1b 100644 --- a/pkg/kubelet/pleg/generic.go +++ b/pkg/kubelet/pleg/generic.go @@ -31,7 +31,7 @@ import ( ) // GenericPLEG is an extremely simple generic PLEG that relies solely on -// periodic listing to discover container changes. It should be be used +// periodic listing to discover container changes. It should be used // as temporary replacement for container runtimes do not support a proper // event generator yet. // @@ -41,7 +41,7 @@ import ( // container. In the case of relisting failure, the window may become longer. // Note that this assumption is not unique -- many kubelet internal components // rely on terminated containers as tombstones for bookkeeping purposes. The -// garbage collector is implemented to work with such situtations. However, to +// garbage collector is implemented to work with such situations. However, to // guarantee that kubelet can handle missing container events, it is // recommended to set the relist period short and have an auxiliary, longer // periodic sync in kubelet as the safety net. diff --git a/pkg/kubelet/pod/pod_manager_test.go b/pkg/kubelet/pod/pod_manager_test.go index 4ff42841cf2..1d3e61b303c 100644 --- a/pkg/kubelet/pod/pod_manager_test.go +++ b/pkg/kubelet/pod/pod_manager_test.go @@ -70,7 +70,7 @@ func TestGetSetPods(t *testing.T) { podManager, _ := newTestManager() podManager.SetPods(updates) - // Tests that all regular pods are recorded corrrectly. + // Tests that all regular pods are recorded correctly. actualPods := podManager.GetPods() if len(actualPods) != len(expectedPods) { t.Errorf("expected %d pods, got %d pods; expected pods %#v, got pods %#v", len(expectedPods), len(actualPods), diff --git a/pkg/kubelet/pod_workers.go b/pkg/kubelet/pod_workers.go index 2c493dde545..95715c76eb4 100644 --- a/pkg/kubelet/pod_workers.go +++ b/pkg/kubelet/pod_workers.go @@ -292,7 +292,7 @@ func killPodNow(podWorkers PodWorkers) eviction.KillPodFunc { gracePeriod = *pod.Spec.TerminationGracePeriodSeconds } - // we timeout and return an error if we dont get a callback within a reasonable time. + // we timeout and return an error if we don't get a callback within a reasonable time. // the default timeout is relative to the grace period (we settle on 2s to wait for kubelet->runtime traffic to complete in sigkill) timeout := int64(gracePeriod + (gracePeriod / 2)) minTimeout := int64(2) diff --git a/pkg/kubelet/prober/prober.go b/pkg/kubelet/prober/prober.go index 650532b6538..42172086d55 100644 --- a/pkg/kubelet/prober/prober.go +++ b/pkg/kubelet/prober/prober.go @@ -132,7 +132,7 @@ func (pb *prober) runProbeWithRetries(p *api.Probe, pod *api.Pod, status api.Pod } // buildHeaderMap takes a list of HTTPHeader string -// pairs and returns a a populated string->[]string http.Header map. +// pairs and returns a populated string->[]string http.Header map. func buildHeader(headerList []api.HTTPHeader) http.Header { headers := make(http.Header) for _, header := range headerList { diff --git a/pkg/kubelet/prober/results/results_manager_test.go b/pkg/kubelet/prober/results/results_manager_test.go index 9fc347acbe6..99fb0064ad9 100644 --- a/pkg/kubelet/prober/results/results_manager_test.go +++ b/pkg/kubelet/prober/results/results_manager_test.go @@ -56,7 +56,7 @@ func TestUpdates(t *testing.T) { select { case u := <-m.Updates(): if expected != u { - t.Errorf("Expected update %v, recieved %v: %s", expected, u, msg) + t.Errorf("Expected update %v, received %v: %s", expected, u, msg) } case <-time.After(wait.ForeverTestTimeout): t.Errorf("Timed out waiting for update %v: %s", expected, msg) diff --git a/pkg/kubelet/rkt/image.go b/pkg/kubelet/rkt/image.go index e8be07e9c51..2b96ce5cc23 100644 --- a/pkg/kubelet/rkt/image.go +++ b/pkg/kubelet/rkt/image.go @@ -196,7 +196,7 @@ func (r *Runtime) getImageManifest(image string) (*appcschema.ImageManifest, err return &manifest, json.Unmarshal(images[0].Manifest, &manifest) } -// TODO(yifan): This is very racy, unefficient, and unsafe, we need to provide +// TODO(yifan): This is very racy, inefficient, and unsafe, we need to provide // different namespaces. See: https://github.com/coreos/rkt/issues/836. func (r *Runtime) writeDockerAuthConfig(image string, credsSlice []credentialprovider.LazyAuthConfiguration, userConfigDir string) error { if len(credsSlice) == 0 { diff --git a/pkg/kubelet/rkt/rkt.go b/pkg/kubelet/rkt/rkt.go index 12e5e654cf6..31ef2462100 100644 --- a/pkg/kubelet/rkt/rkt.go +++ b/pkg/kubelet/rkt/rkt.go @@ -724,7 +724,7 @@ func (r *Runtime) makeContainerLogMount(opts *kubecontainer.RunContainerOptions, // In docker runtime, the container log path contains the container ID. // However, for rkt runtime, we cannot get the container ID before the // the container is launched, so here we generate a random uuid to enable - // us to map a container's termination message path to an unique log file + // us to map a container's termination message path to a unique log file // on the disk. randomUID := uuid.NewUUID() containerLogPath := path.Join(opts.PodContainerDir, string(randomUID)) diff --git a/pkg/kubelet/runonce_test.go b/pkg/kubelet/runonce_test.go index bb434fb7197..56a8bd6fa6d 100644 --- a/pkg/kubelet/runonce_test.go +++ b/pkg/kubelet/runonce_test.go @@ -140,7 +140,7 @@ func TestRunOnce(t *testing.T) { } podManager.SetPods(pods) // The original test here is totally meaningless, because fakeruntime will always return an empty podStatus. While - // the originial logic of isPodRunning happens to return true when podstatus is empty, so the test can always pass. + // the original logic of isPodRunning happens to return true when podstatus is empty, so the test can always pass. // Now the logic in isPodRunning is changed, to let the test pass, we set the podstatus directly in fake runtime. // This is also a meaningless test, because the isPodRunning will also always return true after setting this. However, // because runonce is never used in kubernetes now, we should deprioritize the cleanup work. diff --git a/pkg/kubelet/status/status_manager.go b/pkg/kubelet/status/status_manager.go index 382f18d0035..0e29173a322 100644 --- a/pkg/kubelet/status/status_manager.go +++ b/pkg/kubelet/status/status_manager.go @@ -504,10 +504,10 @@ func (m *manager) needsReconcile(uid types.UID, status api.PodStatus) bool { } // We add this function, because apiserver only supports *RFC3339* now, which means that the timestamp returned by -// apiserver has no nanosecond infromation. However, the timestamp returned by unversioned.Now() contains nanosecond, +// apiserver has no nanosecond information. However, the timestamp returned by unversioned.Now() contains nanosecond, // so when we do comparison between status from apiserver and cached status, isStatusEqual() will always return false. // There is related issue #15262 and PR #15263 about this. -// In fact, the best way to solve this is to do it on api side. However for now, we normalize the status locally in +// In fact, the best way to solve this is to do it on api side. However, for now, we normalize the status locally in // kubelet temporarily. // TODO(random-liu): Remove timestamp related logic after apiserver supports nanosecond or makes it consistent. func normalizeStatus(pod *api.Pod, status *api.PodStatus) *api.PodStatus { diff --git a/pkg/kubelet/status/status_manager_test.go b/pkg/kubelet/status/status_manager_test.go index 705c3b0039b..ce641a24be5 100644 --- a/pkg/kubelet/status/status_manager_test.go +++ b/pkg/kubelet/status/status_manager_test.go @@ -616,11 +616,11 @@ func TestSetContainerReadiness(t *testing.T) { status = expectPodStatus(t, m, pod) verifyReadiness("all ready", &status, true, true, true) - t.Log("Setting non-existant container readiness should fail.") + t.Log("Setting non-existent container readiness should fail.") m.SetContainerReadiness(pod.UID, kubecontainer.ContainerID{Type: "test", ID: "foo"}, true) verifyUpdates(t, m, 0) status = expectPodStatus(t, m, pod) - verifyReadiness("ignore non-existant", &status, true, true, true) + verifyReadiness("ignore non-existent", &status, true, true, true) } func TestSyncBatchCleanupVersions(t *testing.T) { diff --git a/pkg/kubelet/util/cache/object_cache.go b/pkg/kubelet/util/cache/object_cache.go index a87592122a8..05f336af13b 100644 --- a/pkg/kubelet/util/cache/object_cache.go +++ b/pkg/kubelet/util/cache/object_cache.go @@ -24,21 +24,21 @@ import ( // ObjectCache is a simple wrapper of expiration cache that // 1. use string type key -// 2. has a updater to get value directly if it is expired +// 2. has an updater to get value directly if it is expired // 3. then update the cache type ObjectCache struct { cache expirationCache.Store updater func() (interface{}, error) } -// objectEntry is a object with string type key. +// objectEntry is an object with string type key. type objectEntry struct { key string obj interface{} } -// NewObjectCache creates ObjectCache with a updater. -// updater returns a object to cache. +// NewObjectCache creates ObjectCache with an updater. +// updater returns an object to cache. func NewObjectCache(f func() (interface{}, error), ttl time.Duration) *ObjectCache { return &ObjectCache{ updater: f, diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index 66ce288cc61..6aac183a7e7 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -180,7 +180,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { if runningContainers { glog.V(5).Infof( - "Pod %q has been removed from pod manager. However, it still has one or more containers in the non-exited state. Therefore it will not be removed from volume manager.", + "Pod %q has been removed from pod manager. However, it still has one or more containers in the non-exited state. Therefore, it will not be removed from volume manager.", format.Pod(volumeToMount.Pod)) continue } @@ -369,7 +369,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( return pvc.Spec.VolumeName, pvc.UID, nil } -// getPVSpec fetches the PV object with the given name from the the API server +// getPVSpec fetches the PV object with the given name from the API server // and returns a volume.Spec representing it. // An error is returned if the call to fetch the PV object fails. func (dswp *desiredStateOfWorldPopulator) getPVSpec(