diff --git a/pkg/kubelet/cm/cpumanager/state/state_file.go b/pkg/kubelet/cm/cpumanager/state/state_file.go index 90d16693dc3..603467c1c03 100644 --- a/pkg/kubelet/cm/cpumanager/state/state_file.go +++ b/pkg/kubelet/cm/cpumanager/state/state_file.go @@ -20,10 +20,11 @@ import ( "encoding/json" "fmt" "io/ioutil" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "os" "sync" + + "k8s.io/klog" + "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" ) type stateFileData struct { @@ -144,7 +145,6 @@ func (sf *stateFile) storeState() { if err = ioutil.WriteFile(sf.stateFilePath, content, 0644); err != nil { panic("[cpumanager] state file not written") } - return } func (sf *stateFile) GetCPUSet(containerID string) (cpuset.CPUSet, bool) { diff --git a/pkg/kubelet/cm/devicemanager/manager_test.go b/pkg/kubelet/cm/devicemanager/manager_test.go index dc62a2302ba..aa169aefeb6 100644 --- a/pkg/kubelet/cm/devicemanager/manager_test.go +++ b/pkg/kubelet/cm/devicemanager/manager_test.go @@ -109,8 +109,8 @@ func TestDevicePluginReRegistration(t *testing.T) { t.Fatalf("timeout while waiting for manager update") } capacity, allocatable, _ := m.GetCapacity() - resourceCapacity, _ := capacity[v1.ResourceName(testResourceName)] - resourceAllocatable, _ := allocatable[v1.ResourceName(testResourceName)] + resourceCapacity := capacity[v1.ResourceName(testResourceName)] + resourceAllocatable := allocatable[v1.ResourceName(testResourceName)] require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable") require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.") @@ -125,8 +125,8 @@ func TestDevicePluginReRegistration(t *testing.T) { t.Fatalf("timeout while waiting for manager update") } capacity, allocatable, _ = m.GetCapacity() - resourceCapacity, _ = capacity[v1.ResourceName(testResourceName)] - resourceAllocatable, _ = allocatable[v1.ResourceName(testResourceName)] + resourceCapacity = capacity[v1.ResourceName(testResourceName)] + resourceAllocatable = allocatable[v1.ResourceName(testResourceName)] require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable") require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices shouldn't change.") @@ -142,8 +142,8 @@ func TestDevicePluginReRegistration(t *testing.T) { t.Fatalf("timeout while waiting for manager update") } capacity, allocatable, _ = m.GetCapacity() - resourceCapacity, _ = capacity[v1.ResourceName(testResourceName)] - resourceAllocatable, _ = allocatable[v1.ResourceName(testResourceName)] + resourceCapacity = capacity[v1.ResourceName(testResourceName)] + resourceAllocatable = allocatable[v1.ResourceName(testResourceName)] require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable") require.Equal(t, int64(1), resourceAllocatable.Value(), "Devices of plugin previously registered should be removed.") p2.Stop() @@ -178,8 +178,8 @@ func TestDevicePluginReRegistrationProbeMode(t *testing.T) { t.FailNow() } capacity, allocatable, _ := m.GetCapacity() - resourceCapacity, _ := capacity[v1.ResourceName(testResourceName)] - resourceAllocatable, _ := allocatable[v1.ResourceName(testResourceName)] + resourceCapacity := capacity[v1.ResourceName(testResourceName)] + resourceAllocatable := allocatable[v1.ResourceName(testResourceName)] require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable") require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.") @@ -194,8 +194,8 @@ func TestDevicePluginReRegistrationProbeMode(t *testing.T) { } capacity, allocatable, _ = m.GetCapacity() - resourceCapacity, _ = capacity[v1.ResourceName(testResourceName)] - resourceAllocatable, _ = allocatable[v1.ResourceName(testResourceName)] + resourceCapacity = capacity[v1.ResourceName(testResourceName)] + resourceAllocatable = allocatable[v1.ResourceName(testResourceName)] require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable") require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.") @@ -211,8 +211,8 @@ func TestDevicePluginReRegistrationProbeMode(t *testing.T) { } capacity, allocatable, _ = m.GetCapacity() - resourceCapacity, _ = capacity[v1.ResourceName(testResourceName)] - resourceAllocatable, _ = allocatable[v1.ResourceName(testResourceName)] + resourceCapacity = capacity[v1.ResourceName(testResourceName)] + resourceAllocatable = allocatable[v1.ResourceName(testResourceName)] require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable") require.Equal(t, int64(1), resourceAllocatable.Value(), "Devices of previous registered should be removed") p2.Stop() diff --git a/pkg/kubelet/cm/topologymanager/topology_manager.go b/pkg/kubelet/cm/topologymanager/topology_manager.go index 8e2452f4411..111e74dccdb 100644 --- a/pkg/kubelet/cm/topologymanager/topology_manager.go +++ b/pkg/kubelet/cm/topologymanager/topology_manager.go @@ -164,8 +164,8 @@ func (m *manager) calculateAffinity(pod v1.Pod, container v1.Container) Topology // Get the TopologyHints from a provider. hints := provider.GetTopologyHints(pod, container) - // If hints is nil, insert a single, preferred any-socket hint into allProviderHints. - if hints == nil || len(hints) == 0 { + // If hints is empty, insert a single, preferred any-socket hint into allProviderHints. + if len(hints) == 0 { klog.Infof("[topologymanager] Hint Provider has no preference for socket affinity with any resource") affinity, _ := socketmask.NewSocketMask() affinity.Fill() @@ -294,7 +294,7 @@ func (m *manager) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitR for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { result := m.calculateAffinity(*pod, container) admitPod := m.policy.CanAdmitPodResult(result.Preferred) - if admitPod.Admit == false { + if !admitPod.Admit { return admitPod } c[container.Name] = result diff --git a/pkg/kubelet/config/config_test.go b/pkg/kubelet/config/config_test.go index 9ebf5a66044..bbebc6a4092 100644 --- a/pkg/kubelet/config/config_test.go +++ b/pkg/kubelet/config/config_test.go @@ -382,7 +382,7 @@ func TestPodUpdateAnnotations(t *testing.T) { channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) pod := CreateValidPod("foo2", "new") - pod.Annotations = make(map[string]string, 0) + pod.Annotations = make(map[string]string) pod.Annotations["kubernetes.io/blah"] = "blah" clone := pod.DeepCopy() @@ -411,7 +411,7 @@ func TestPodUpdateLabels(t *testing.T) { channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) pod := CreateValidPod("foo2", "new") - pod.Labels = make(map[string]string, 0) + pod.Labels = make(map[string]string) pod.Labels["key"] = "value" clone := pod.DeepCopy() @@ -432,7 +432,7 @@ func TestPodRestore(t *testing.T) { defer os.RemoveAll(tmpDir) pod := CreateValidPod("api-server", "kube-default") - pod.Annotations = make(map[string]string, 0) + pod.Annotations = make(map[string]string) pod.Annotations["kubernetes.io/config.source"] = kubetypes.ApiserverSource pod.Annotations[core.BootstrapCheckpointAnnotationKey] = "true" diff --git a/pkg/kubelet/config/http.go b/pkg/kubelet/config/http.go index 42f51e91c31..8beb3c99835 100644 --- a/pkg/kubelet/config/http.go +++ b/pkg/kubelet/config/http.go @@ -106,7 +106,7 @@ func (s *sourceURL) extractFromURL() error { return fmt.Errorf("zero-length data received from %v", s.url) } // Short circuit if the data has not changed since the last time it was read. - if bytes.Compare(data, s.data) == 0 { + if bytes.Equal(data, s.data) { return nil } s.data = data diff --git a/pkg/kubelet/container/testing/fake_runtime.go b/pkg/kubelet/container/testing/fake_runtime.go index 9141ad48c1d..e7e22e203c2 100644 --- a/pkg/kubelet/container/testing/fake_runtime.go +++ b/pkg/kubelet/container/testing/fake_runtime.go @@ -29,12 +29,12 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/flowcontrol" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - . "k8s.io/kubernetes/pkg/kubelet/container" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/volume" ) type FakePod struct { - Pod *Pod + Pod *kubecontainer.Pod NetnsPath string } @@ -44,14 +44,14 @@ type FakeRuntime struct { CalledFunctions []string PodList []*FakePod AllPodList []*FakePod - ImageList []Image + ImageList []kubecontainer.Image APIPodStatus v1.PodStatus - PodStatus PodStatus + PodStatus kubecontainer.PodStatus StartedPods []string KilledPods []string StartedContainers []string KilledContainers []string - RuntimeStatus *RuntimeStatus + RuntimeStatus *kubecontainer.RuntimeStatus VersionInfo string APIVersionInfo string RuntimeType string @@ -66,10 +66,10 @@ type FakeStreamingRuntime struct { *FakeRuntime } -var _ StreamingRuntime = &FakeStreamingRuntime{} +var _ kubecontainer.StreamingRuntime = &FakeStreamingRuntime{} // FakeRuntime should implement Runtime. -var _ Runtime = &FakeRuntime{} +var _ kubecontainer.Runtime = &FakeRuntime{} type FakeVersion struct { Version string @@ -90,18 +90,18 @@ func (fv *FakeVersion) Compare(other string) (int, error) { } type podsGetter interface { - GetPods(bool) ([]*Pod, error) + GetPods(bool) ([]*kubecontainer.Pod, error) } type FakeRuntimeCache struct { getter podsGetter } -func NewFakeRuntimeCache(getter podsGetter) RuntimeCache { +func NewFakeRuntimeCache(getter podsGetter) kubecontainer.RuntimeCache { return &FakeRuntimeCache{getter} } -func (f *FakeRuntimeCache) GetPods() ([]*Pod, error) { +func (f *FakeRuntimeCache) GetPods() ([]*kubecontainer.Pod, error) { return f.getter.GetPods(false) } @@ -177,7 +177,7 @@ func (f *FakeRuntime) Type() string { return f.RuntimeType } -func (f *FakeRuntime) Version() (Version, error) { +func (f *FakeRuntime) Version() (kubecontainer.Version, error) { f.Lock() defer f.Unlock() @@ -185,7 +185,7 @@ func (f *FakeRuntime) Version() (Version, error) { return &FakeVersion{Version: f.VersionInfo}, f.Err } -func (f *FakeRuntime) APIVersion() (Version, error) { +func (f *FakeRuntime) APIVersion() (kubecontainer.Version, error) { f.Lock() defer f.Unlock() @@ -193,7 +193,7 @@ func (f *FakeRuntime) APIVersion() (Version, error) { return &FakeVersion{Version: f.APIVersionInfo}, f.Err } -func (f *FakeRuntime) Status() (*RuntimeStatus, error) { +func (f *FakeRuntime) Status() (*kubecontainer.RuntimeStatus, error) { f.Lock() defer f.Unlock() @@ -201,11 +201,11 @@ func (f *FakeRuntime) Status() (*RuntimeStatus, error) { return f.RuntimeStatus, f.StatusErr } -func (f *FakeRuntime) GetPods(all bool) ([]*Pod, error) { +func (f *FakeRuntime) GetPods(all bool) ([]*kubecontainer.Pod, error) { f.Lock() defer f.Unlock() - var pods []*Pod + var pods []*kubecontainer.Pod f.CalledFunctions = append(f.CalledFunctions, "GetPods") if all { @@ -220,7 +220,7 @@ func (f *FakeRuntime) GetPods(all bool) ([]*Pod, error) { return pods, f.Err } -func (f *FakeRuntime) SyncPod(pod *v1.Pod, _ *PodStatus, _ []v1.Secret, backOff *flowcontrol.Backoff) (result PodSyncResult) { +func (f *FakeRuntime) SyncPod(pod *v1.Pod, _ *kubecontainer.PodStatus, _ []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { f.Lock() defer f.Unlock() @@ -236,7 +236,7 @@ func (f *FakeRuntime) SyncPod(pod *v1.Pod, _ *PodStatus, _ []v1.Secret, backOff return } -func (f *FakeRuntime) KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error { +func (f *FakeRuntime) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { f.Lock() defer f.Unlock() @@ -274,7 +274,7 @@ func (f *FakeRuntime) KillContainerInPod(container v1.Container, pod *v1.Pod) er return f.Err } -func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error) { +func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) { f.Lock() defer f.Unlock() @@ -283,7 +283,7 @@ func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*PodS return &status, f.Err } -func (f *FakeRuntime) GetContainerLogs(_ context.Context, pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) { +func (f *FakeRuntime) GetContainerLogs(_ context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) { f.Lock() defer f.Unlock() @@ -291,7 +291,7 @@ func (f *FakeRuntime) GetContainerLogs(_ context.Context, pod *v1.Pod, container return f.Err } -func (f *FakeRuntime) PullImage(image ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (f *FakeRuntime) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { f.Lock() defer f.Unlock() @@ -299,7 +299,7 @@ func (f *FakeRuntime) PullImage(image ImageSpec, pullSecrets []v1.Secret, podSan return image.Image, f.Err } -func (f *FakeRuntime) GetImageRef(image ImageSpec) (string, error) { +func (f *FakeRuntime) GetImageRef(image kubecontainer.ImageSpec) (string, error) { f.Lock() defer f.Unlock() @@ -312,7 +312,7 @@ func (f *FakeRuntime) GetImageRef(image ImageSpec) (string, error) { return "", f.InspectErr } -func (f *FakeRuntime) ListImages() ([]Image, error) { +func (f *FakeRuntime) ListImages() ([]kubecontainer.Image, error) { f.Lock() defer f.Unlock() @@ -320,7 +320,7 @@ func (f *FakeRuntime) ListImages() ([]Image, error) { return f.ImageList, f.Err } -func (f *FakeRuntime) RemoveImage(image ImageSpec) error { +func (f *FakeRuntime) RemoveImage(image kubecontainer.ImageSpec) error { f.Lock() defer f.Unlock() @@ -337,7 +337,7 @@ func (f *FakeRuntime) RemoveImage(image ImageSpec) error { return f.Err } -func (f *FakeRuntime) GarbageCollect(gcPolicy ContainerGCPolicy, ready bool, evictNonDeletedPods bool) error { +func (f *FakeRuntime) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, ready bool, evictNonDeletedPods bool) error { f.Lock() defer f.Unlock() @@ -345,7 +345,7 @@ func (f *FakeRuntime) GarbageCollect(gcPolicy ContainerGCPolicy, ready bool, evi return f.Err } -func (f *FakeRuntime) DeleteContainer(containerID ContainerID) error { +func (f *FakeRuntime) DeleteContainer(containerID kubecontainer.ContainerID) error { f.Lock() defer f.Unlock() @@ -353,7 +353,7 @@ func (f *FakeRuntime) DeleteContainer(containerID ContainerID) error { return f.Err } -func (f *FakeRuntime) ImageStats() (*ImageStats, error) { +func (f *FakeRuntime) ImageStats() (*kubecontainer.ImageStats, error) { f.Lock() defer f.Unlock() @@ -361,7 +361,7 @@ func (f *FakeRuntime) ImageStats() (*ImageStats, error) { return nil, f.Err } -func (f *FakeStreamingRuntime) GetExec(id ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) { +func (f *FakeStreamingRuntime) GetExec(id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) { f.Lock() defer f.Unlock() @@ -369,7 +369,7 @@ func (f *FakeStreamingRuntime) GetExec(id ContainerID, cmd []string, stdin, stdo return &url.URL{Host: FakeHost}, f.Err } -func (f *FakeStreamingRuntime) GetAttach(id ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) { +func (f *FakeStreamingRuntime) GetAttach(id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) { f.Lock() defer f.Unlock() @@ -391,13 +391,13 @@ type FakeContainerCommandRunner struct { Err error // actual values when invoked - ContainerID ContainerID + ContainerID kubecontainer.ContainerID Cmd []string } -var _ ContainerCommandRunner = &FakeContainerCommandRunner{} +var _ kubecontainer.ContainerCommandRunner = &FakeContainerCommandRunner{} -func (f *FakeContainerCommandRunner) RunInContainer(containerID ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { +func (f *FakeContainerCommandRunner) RunInContainer(containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { // record invoked values f.ContainerID = containerID f.Cmd = cmd diff --git a/pkg/kubelet/container/testing/runtime_mock.go b/pkg/kubelet/container/testing/runtime_mock.go index 55f7f8d3f08..45c4b485bf2 100644 --- a/pkg/kubelet/container/testing/runtime_mock.go +++ b/pkg/kubelet/container/testing/runtime_mock.go @@ -27,7 +27,7 @@ import ( "k8s.io/client-go/tools/remotecommand" "k8s.io/client-go/util/flowcontrol" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - . "k8s.io/kubernetes/pkg/kubelet/container" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/volume" ) @@ -35,7 +35,7 @@ type Mock struct { mock.Mock } -var _ Runtime = new(Mock) +var _ kubecontainer.Runtime = new(Mock) func (r *Mock) Start() error { args := r.Called() @@ -47,32 +47,32 @@ func (r *Mock) Type() string { return args.Get(0).(string) } -func (r *Mock) Version() (Version, error) { +func (r *Mock) Version() (kubecontainer.Version, error) { args := r.Called() - return args.Get(0).(Version), args.Error(1) + return args.Get(0).(kubecontainer.Version), args.Error(1) } -func (r *Mock) APIVersion() (Version, error) { +func (r *Mock) APIVersion() (kubecontainer.Version, error) { args := r.Called() - return args.Get(0).(Version), args.Error(1) + return args.Get(0).(kubecontainer.Version), args.Error(1) } -func (r *Mock) Status() (*RuntimeStatus, error) { +func (r *Mock) Status() (*kubecontainer.RuntimeStatus, error) { args := r.Called() - return args.Get(0).(*RuntimeStatus), args.Error(0) + return args.Get(0).(*kubecontainer.RuntimeStatus), args.Error(0) } -func (r *Mock) GetPods(all bool) ([]*Pod, error) { +func (r *Mock) GetPods(all bool) ([]*kubecontainer.Pod, error) { args := r.Called(all) - return args.Get(0).([]*Pod), args.Error(1) + return args.Get(0).([]*kubecontainer.Pod), args.Error(1) } -func (r *Mock) SyncPod(pod *v1.Pod, status *PodStatus, secrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult { +func (r *Mock) SyncPod(pod *v1.Pod, status *kubecontainer.PodStatus, secrets []v1.Secret, backOff *flowcontrol.Backoff) kubecontainer.PodSyncResult { args := r.Called(pod, status, secrets, backOff) - return args.Get(0).(PodSyncResult) + return args.Get(0).(kubecontainer.PodSyncResult) } -func (r *Mock) KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error { +func (r *Mock) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { args := r.Called(pod, runningPod, gracePeriodOverride) return args.Error(0) } @@ -87,64 +87,64 @@ func (r *Mock) KillContainerInPod(container v1.Container, pod *v1.Pod) error { return args.Error(0) } -func (r *Mock) GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error) { +func (r *Mock) GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) { args := r.Called(uid, name, namespace) - return args.Get(0).(*PodStatus), args.Error(1) + return args.Get(0).(*kubecontainer.PodStatus), args.Error(1) } -func (r *Mock) ExecInContainer(containerID ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { +func (r *Mock) ExecInContainer(containerID kubecontainer.ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { args := r.Called(containerID, cmd, stdin, stdout, stderr, tty) return args.Error(0) } -func (r *Mock) AttachContainer(containerID ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { +func (r *Mock) AttachContainer(containerID kubecontainer.ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { args := r.Called(containerID, stdin, stdout, stderr, tty) return args.Error(0) } -func (r *Mock) GetContainerLogs(_ context.Context, pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) { +func (r *Mock) GetContainerLogs(_ context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) { args := r.Called(pod, containerID, logOptions, stdout, stderr) return args.Error(0) } -func (r *Mock) PullImage(image ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (r *Mock) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { args := r.Called(image, pullSecrets) return image.Image, args.Error(0) } -func (r *Mock) GetImageRef(image ImageSpec) (string, error) { +func (r *Mock) GetImageRef(image kubecontainer.ImageSpec) (string, error) { args := r.Called(image) return args.Get(0).(string), args.Error(1) } -func (r *Mock) ListImages() ([]Image, error) { +func (r *Mock) ListImages() ([]kubecontainer.Image, error) { args := r.Called() - return args.Get(0).([]Image), args.Error(1) + return args.Get(0).([]kubecontainer.Image), args.Error(1) } -func (r *Mock) RemoveImage(image ImageSpec) error { +func (r *Mock) RemoveImage(image kubecontainer.ImageSpec) error { args := r.Called(image) return args.Error(0) } -func (r *Mock) PortForward(pod *Pod, port uint16, stream io.ReadWriteCloser) error { +func (r *Mock) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error { args := r.Called(pod, port, stream) return args.Error(0) } -func (r *Mock) GarbageCollect(gcPolicy ContainerGCPolicy, ready bool, evictNonDeletedPods bool) error { +func (r *Mock) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, ready bool, evictNonDeletedPods bool) error { args := r.Called(gcPolicy, ready, evictNonDeletedPods) return args.Error(0) } -func (r *Mock) DeleteContainer(containerID ContainerID) error { +func (r *Mock) DeleteContainer(containerID kubecontainer.ContainerID) error { args := r.Called(containerID) return args.Error(0) } -func (r *Mock) ImageStats() (*ImageStats, error) { +func (r *Mock) ImageStats() (*kubecontainer.ImageStats, error) { args := r.Called() - return args.Get(0).(*ImageStats), args.Error(1) + return args.Get(0).(*kubecontainer.ImageStats), args.Error(1) } // UpdatePodCIDR fulfills the cri interface. diff --git a/pkg/kubelet/dockershim/network/hairpin/hairpin.go b/pkg/kubelet/dockershim/network/hairpin/hairpin.go index 3faefd1b86d..a3e455ff10c 100644 --- a/pkg/kubelet/dockershim/network/hairpin/hairpin.go +++ b/pkg/kubelet/dockershim/network/hairpin/hairpin.go @@ -37,7 +37,7 @@ const ( ) var ( - ethtoolOutputRegex = regexp.MustCompile("peer_ifindex: (\\d+)") + ethtoolOutputRegex = regexp.MustCompile(`peer_ifindex: (\d+)`) ) func findPairInterfaceOfContainerInterface(e exec.Interface, containerInterfaceName, containerDesc string, nsenterArgs []string) (string, error) { diff --git a/pkg/kubelet/dockershim/network/hostport/fake_iptables.go b/pkg/kubelet/dockershim/network/hostport/fake_iptables.go index e068e51b6a8..6edb2b27637 100644 --- a/pkg/kubelet/dockershim/network/hostport/fake_iptables.go +++ b/pkg/kubelet/dockershim/network/hostport/fake_iptables.go @@ -43,7 +43,7 @@ type fakeIPTables struct { func NewFakeIPTables() *fakeIPTables { return &fakeIPTables{ - tables: make(map[string]*fakeTable, 0), + tables: make(map[string]*fakeTable), builtinChains: map[string]sets.String{ string(utiliptables.TableFilter): sets.NewString("INPUT", "FORWARD", "OUTPUT"), string(utiliptables.TableNAT): sets.NewString("PREROUTING", "INPUT", "OUTPUT", "POSTROUTING"), @@ -203,7 +203,7 @@ func (f *fakeIPTables) EnsureRule(position utiliptables.RulePosition, tableName ruleArgs := make([]string, 0) for _, arg := range args { // quote args with internal spaces (like comments) - if strings.Index(arg, " ") >= 0 { + if strings.Contains(arg, " ") { arg = fmt.Sprintf("\"%s\"", arg) } ruleArgs = append(ruleArgs, arg) diff --git a/pkg/kubelet/dockershim/network/hostport/hostport_manager.go b/pkg/kubelet/dockershim/network/hostport/hostport_manager.go index f3936bfbb60..1f0bec2a96b 100644 --- a/pkg/kubelet/dockershim/network/hostport/hostport_manager.go +++ b/pkg/kubelet/dockershim/network/hostport/hostport_manager.go @@ -351,7 +351,7 @@ func getExistingHostportIPTablesRules(iptables utiliptables.Interface) (map[util } } - for _, line := range strings.Split(string(iptablesData.Bytes()), "\n") { + for _, line := range strings.Split(iptablesData.String(), "\n") { if strings.HasPrefix(line, fmt.Sprintf("-A %s", kubeHostportChainPrefix)) || strings.HasPrefix(line, fmt.Sprintf("-A %s", string(kubeHostportsChain))) { existingHostportRules = append(existingHostportRules, line) @@ -382,8 +382,6 @@ func filterRules(rules []string, filters []utiliptables.Chain) []string { // filterChains deletes all entries of filter chains from chain map func filterChains(chains map[utiliptables.Chain]string, filterChains []utiliptables.Chain) { for _, chain := range filterChains { - if _, ok := chains[chain]; ok { - delete(chains, chain) - } + delete(chains, chain) } } diff --git a/pkg/kubelet/dockershim/network/hostport/hostport_manager_test.go b/pkg/kubelet/dockershim/network/hostport/hostport_manager_test.go index 644f1d39683..9a9e151f52e 100644 --- a/pkg/kubelet/dockershim/network/hostport/hostport_manager_test.go +++ b/pkg/kubelet/dockershim/network/hostport/hostport_manager_test.go @@ -284,7 +284,7 @@ func TestHostportManager(t *testing.T) { err := iptables.SaveInto(utiliptables.TableNAT, raw) assert.NoError(t, err) - lines := strings.Split(string(raw.Bytes()), "\n") + lines := strings.Split(raw.String(), "\n") expectedLines := map[string]bool{ `*nat`: true, `:KUBE-HOSTPORTS - [0:0]`: true, @@ -331,7 +331,7 @@ func TestHostportManager(t *testing.T) { raw.Reset() err = iptables.SaveInto(utiliptables.TableNAT, raw) assert.NoError(t, err) - lines = strings.Split(string(raw.Bytes()), "\n") + lines = strings.Split(raw.String(), "\n") remainingChains := make(map[string]bool) for _, line := range lines { if strings.HasPrefix(line, ":") { diff --git a/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go b/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go index a1402f5b5b3..6824cc5ac2d 100644 --- a/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go +++ b/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go @@ -507,7 +507,7 @@ func (plugin *kubenetNetworkPlugin) teardown(namespace string, name string, id k portMappings, err := plugin.host.GetPodPortMappings(id.ID) if err != nil { errList = append(errList, err) - } else if portMappings != nil && len(portMappings) > 0 { + } else if len(portMappings) > 0 { if err = plugin.hostportManager.Remove(id.ID, &hostport.PodPortMapping{ Namespace: namespace, Name: name, diff --git a/pkg/kubelet/eviction/memory_threshold_notifier_test.go b/pkg/kubelet/eviction/memory_threshold_notifier_test.go index 8317e051fe6..042949e4401 100644 --- a/pkg/kubelet/eviction/memory_threshold_notifier_test.go +++ b/pkg/kubelet/eviction/memory_threshold_notifier_test.go @@ -135,8 +135,7 @@ func TestUpdateThreshold(t *testing.T) { notifier := &MockCgroupNotifier{} m := newTestMemoryThresholdNotifier(tc.evictionThreshold, notifierFactory, nil) notifierFactory.On("NewCgroupNotifier", testCgroupPath, memoryUsageAttribute, tc.expectedThreshold.Value()).Return(notifier, tc.updateThresholdErr) - var events chan<- struct{} - events = m.events + var events chan<- struct{} = m.events notifier.On("Start", events).Return() err := m.UpdateThreshold(nodeSummary(tc.available, tc.workingSet, tc.usage, isAllocatableEvictionThreshold(tc.evictionThreshold))) if err != nil && !tc.expectErr { @@ -169,8 +168,7 @@ func TestStart(t *testing.T) { wg.Done() }) notifierFactory.On("NewCgroupNotifier", testCgroupPath, memoryUsageAttribute, int64(0)).Return(notifier, nil) - var events chan<- struct{} - events = m.events + var events chan<- struct{} = m.events notifier.On("Start", events).Return() notifier.On("Stop").Return() diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index bfd0b5ba56c..7b69501cf13 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -980,7 +980,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) { updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch()) require.NoError(t, err) - memCapacity, _ := updatedNode.Status.Capacity[v1.ResourceMemory] + memCapacity := updatedNode.Status.Capacity[v1.ResourceMemory] updatedMemoryCapacity, _ := (&memCapacity).AsInt64() assert.Equal(t, newMemoryCapacity, updatedMemoryCapacity, "Memory capacity") @@ -2001,8 +2001,6 @@ func TestRegisterWithApiServerWithTaint(t *testing.T) { utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition), taintutil.TaintExists(got.Spec.Taints, unschedulableTaint), "test unschedulable taint for TaintNodesByCondition") - - return }) } diff --git a/pkg/kubelet/pleg/generic.go b/pkg/kubelet/pleg/generic.go index cc1cec33f98..c81bef81c67 100644 --- a/pkg/kubelet/pleg/generic.go +++ b/pkg/kubelet/pleg/generic.go @@ -253,7 +253,7 @@ func (g *GenericPLEG) relist() { needsReinspection[pid] = pod continue - } else if _, found := g.podsToReinspect[pid]; found { + } else { // this pod was in the list to reinspect and we did so because it had events, so remove it // from the list (we don't want the reinspection code below to inspect it a second time in // this relist execution) diff --git a/pkg/kubelet/pluginmanager/plugin_manager_test.go b/pkg/kubelet/pluginmanager/plugin_manager_test.go index 2e68ec0f83d..0bb703a01f2 100644 --- a/pkg/kubelet/pluginmanager/plugin_manager_test.go +++ b/pkg/kubelet/pluginmanager/plugin_manager_test.go @@ -28,7 +28,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" - pluginwatcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1" registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher" @@ -76,7 +75,6 @@ func (f *fakePluginHandler) DeRegisterPlugin(pluginName string) { f.Lock() defer f.Unlock() f.deregisterPluginCalled = true - return } func init() { @@ -143,7 +141,7 @@ func TestPluginRegistration(t *testing.T) { // Add handler for device plugin fakeHandler := newFakePluginHandler() - pluginManager.AddHandler(pluginwatcherapi.DevicePlugin, fakeHandler) + pluginManager.AddHandler(registerapi.DevicePlugin, fakeHandler) // Add a new plugin socketPath := fmt.Sprintf("%s/plugin.sock", socketDir) diff --git a/pkg/kubelet/pluginmanager/reconciler/reconciler_test.go b/pkg/kubelet/pluginmanager/reconciler/reconciler_test.go index 32cd6fecd64..0a6adb5ae09 100644 --- a/pkg/kubelet/pluginmanager/reconciler/reconciler_test.go +++ b/pkg/kubelet/pluginmanager/reconciler/reconciler_test.go @@ -26,7 +26,6 @@ import ( "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" - pluginwatcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1" registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor" @@ -183,7 +182,7 @@ func Test_Run_Positive_Register(t *testing.T) { dsw, asw, ) - reconciler.AddHandler(pluginwatcherapi.DevicePlugin, cache.PluginHandler(di)) + reconciler.AddHandler(registerapi.DevicePlugin, cache.PluginHandler(di)) // Start the reconciler to fill ASW. stopChan := make(chan struct{}) @@ -228,7 +227,7 @@ func Test_Run_Positive_RegisterThenUnregister(t *testing.T) { dsw, asw, ) - reconciler.AddHandler(pluginwatcherapi.DevicePlugin, cache.PluginHandler(di)) + reconciler.AddHandler(registerapi.DevicePlugin, cache.PluginHandler(di)) // Start the reconciler to fill ASW. stopChan := make(chan struct{}) @@ -283,7 +282,7 @@ func Test_Run_Positive_ReRegister(t *testing.T) { dsw, asw, ) - reconciler.AddHandler(pluginwatcherapi.DevicePlugin, cache.PluginHandler(di)) + reconciler.AddHandler(registerapi.DevicePlugin, cache.PluginHandler(di)) // Start the reconciler to fill ASW. stopChan := make(chan struct{}) diff --git a/pkg/kubelet/pod/mirror_client.go b/pkg/kubelet/pod/mirror_client.go index afd60f5df13..5078bc808db 100644 --- a/pkg/kubelet/pod/mirror_client.go +++ b/pkg/kubelet/pod/mirror_client.go @@ -92,7 +92,6 @@ func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string, uid *types.UID) } klog.V(2).Infof("Deleting a mirror pod %q (uid %#v)", podFullName, uid) var GracePeriodSeconds int64 - GracePeriodSeconds = 0 if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &GracePeriodSeconds, Preconditions: &metav1.Preconditions{UID: uid}}); err != nil { // Unfortunately, there's no generic error for failing a precondition if !(errors.IsNotFound(err) || errors.IsConflict(err)) { diff --git a/pkg/kubelet/pod_workers.go b/pkg/kubelet/pod_workers.go index 56964890932..29dbe8ef5fd 100644 --- a/pkg/kubelet/pod_workers.go +++ b/pkg/kubelet/pod_workers.go @@ -241,9 +241,7 @@ func (p *podWorkers) removeWorker(uid types.UID) { // If there is an undelivered work update for this pod we need to remove it // since per-pod goroutine won't be able to put it to the already closed // channel when it finishes processing the current work update. - if _, cached := p.lastUndeliveredWorkUpdate[uid]; cached { - delete(p.lastUndeliveredWorkUpdate, uid) - } + delete(p.lastUndeliveredWorkUpdate, uid) } } func (p *podWorkers) ForgetWorker(uid types.UID) { diff --git a/pkg/kubelet/types/pod_update.go b/pkg/kubelet/types/pod_update.go index f2f4d93e541..9c3d676cd09 100644 --- a/pkg/kubelet/types/pod_update.go +++ b/pkg/kubelet/types/pod_update.go @@ -164,10 +164,7 @@ func Preemptable(preemptor, preemptee *v1.Pod) bool { // IsCriticalPodBasedOnPriority checks if the given pod is a critical pod based on priority resolved from pod Spec. func IsCriticalPodBasedOnPriority(priority int32) bool { - if priority >= scheduling.SystemCriticalPriority { - return true - } - return false + return priority >= scheduling.SystemCriticalPriority } // IsStaticPod returns true if the pod is a static pod. diff --git a/pkg/kubelet/volumemanager/volume_manager_test.go b/pkg/kubelet/volumemanager/volume_manager_test.go index 8d58cdbb64e..db4e78e24bf 100644 --- a/pkg/kubelet/volumemanager/volume_manager_test.go +++ b/pkg/kubelet/volumemanager/volume_manager_test.go @@ -426,7 +426,6 @@ func delayClaimBecomesBound( Phase: v1.ClaimBound, } kubeClient.CoreV1().PersistentVolumeClaims(namespace).Update(volumeClaim) - return } func runVolumeManager(manager VolumeManager) chan struct{} {