Cleanup more static check issues (S1*,ST*)

This commit is contained in:
Tim Allclair 2019-08-09 12:31:03 -07:00
parent 8a495cb5e4
commit a2c51674cf
21 changed files with 99 additions and 115 deletions

View File

@ -20,10 +20,11 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"os"
"sync"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
)
type stateFileData struct {
@ -144,7 +145,6 @@ func (sf *stateFile) storeState() {
if err = ioutil.WriteFile(sf.stateFilePath, content, 0644); err != nil {
panic("[cpumanager] state file not written")
}
return
}
func (sf *stateFile) GetCPUSet(containerID string) (cpuset.CPUSet, bool) {

View File

@ -109,8 +109,8 @@ func TestDevicePluginReRegistration(t *testing.T) {
t.Fatalf("timeout while waiting for manager update")
}
capacity, allocatable, _ := m.GetCapacity()
resourceCapacity, _ := capacity[v1.ResourceName(testResourceName)]
resourceAllocatable, _ := allocatable[v1.ResourceName(testResourceName)]
resourceCapacity := capacity[v1.ResourceName(testResourceName)]
resourceAllocatable := allocatable[v1.ResourceName(testResourceName)]
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
@ -125,8 +125,8 @@ func TestDevicePluginReRegistration(t *testing.T) {
t.Fatalf("timeout while waiting for manager update")
}
capacity, allocatable, _ = m.GetCapacity()
resourceCapacity, _ = capacity[v1.ResourceName(testResourceName)]
resourceAllocatable, _ = allocatable[v1.ResourceName(testResourceName)]
resourceCapacity = capacity[v1.ResourceName(testResourceName)]
resourceAllocatable = allocatable[v1.ResourceName(testResourceName)]
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices shouldn't change.")
@ -142,8 +142,8 @@ func TestDevicePluginReRegistration(t *testing.T) {
t.Fatalf("timeout while waiting for manager update")
}
capacity, allocatable, _ = m.GetCapacity()
resourceCapacity, _ = capacity[v1.ResourceName(testResourceName)]
resourceAllocatable, _ = allocatable[v1.ResourceName(testResourceName)]
resourceCapacity = capacity[v1.ResourceName(testResourceName)]
resourceAllocatable = allocatable[v1.ResourceName(testResourceName)]
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
require.Equal(t, int64(1), resourceAllocatable.Value(), "Devices of plugin previously registered should be removed.")
p2.Stop()
@ -178,8 +178,8 @@ func TestDevicePluginReRegistrationProbeMode(t *testing.T) {
t.FailNow()
}
capacity, allocatable, _ := m.GetCapacity()
resourceCapacity, _ := capacity[v1.ResourceName(testResourceName)]
resourceAllocatable, _ := allocatable[v1.ResourceName(testResourceName)]
resourceCapacity := capacity[v1.ResourceName(testResourceName)]
resourceAllocatable := allocatable[v1.ResourceName(testResourceName)]
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
@ -194,8 +194,8 @@ func TestDevicePluginReRegistrationProbeMode(t *testing.T) {
}
capacity, allocatable, _ = m.GetCapacity()
resourceCapacity, _ = capacity[v1.ResourceName(testResourceName)]
resourceAllocatable, _ = allocatable[v1.ResourceName(testResourceName)]
resourceCapacity = capacity[v1.ResourceName(testResourceName)]
resourceAllocatable = allocatable[v1.ResourceName(testResourceName)]
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
@ -211,8 +211,8 @@ func TestDevicePluginReRegistrationProbeMode(t *testing.T) {
}
capacity, allocatable, _ = m.GetCapacity()
resourceCapacity, _ = capacity[v1.ResourceName(testResourceName)]
resourceAllocatable, _ = allocatable[v1.ResourceName(testResourceName)]
resourceCapacity = capacity[v1.ResourceName(testResourceName)]
resourceAllocatable = allocatable[v1.ResourceName(testResourceName)]
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
require.Equal(t, int64(1), resourceAllocatable.Value(), "Devices of previous registered should be removed")
p2.Stop()

View File

@ -164,8 +164,8 @@ func (m *manager) calculateAffinity(pod v1.Pod, container v1.Container) Topology
// Get the TopologyHints from a provider.
hints := provider.GetTopologyHints(pod, container)
// If hints is nil, insert a single, preferred any-socket hint into allProviderHints.
if hints == nil || len(hints) == 0 {
// If hints is empty, insert a single, preferred any-socket hint into allProviderHints.
if len(hints) == 0 {
klog.Infof("[topologymanager] Hint Provider has no preference for socket affinity with any resource")
affinity, _ := socketmask.NewSocketMask()
affinity.Fill()
@ -294,7 +294,7 @@ func (m *manager) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitR
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
result := m.calculateAffinity(*pod, container)
admitPod := m.policy.CanAdmitPodResult(result.Preferred)
if admitPod.Admit == false {
if !admitPod.Admit {
return admitPod
}
c[container.Name] = result

View File

@ -382,7 +382,7 @@ func TestPodUpdateAnnotations(t *testing.T) {
channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental)
pod := CreateValidPod("foo2", "new")
pod.Annotations = make(map[string]string, 0)
pod.Annotations = make(map[string]string)
pod.Annotations["kubernetes.io/blah"] = "blah"
clone := pod.DeepCopy()
@ -411,7 +411,7 @@ func TestPodUpdateLabels(t *testing.T) {
channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental)
pod := CreateValidPod("foo2", "new")
pod.Labels = make(map[string]string, 0)
pod.Labels = make(map[string]string)
pod.Labels["key"] = "value"
clone := pod.DeepCopy()
@ -432,7 +432,7 @@ func TestPodRestore(t *testing.T) {
defer os.RemoveAll(tmpDir)
pod := CreateValidPod("api-server", "kube-default")
pod.Annotations = make(map[string]string, 0)
pod.Annotations = make(map[string]string)
pod.Annotations["kubernetes.io/config.source"] = kubetypes.ApiserverSource
pod.Annotations[core.BootstrapCheckpointAnnotationKey] = "true"

View File

@ -106,7 +106,7 @@ func (s *sourceURL) extractFromURL() error {
return fmt.Errorf("zero-length data received from %v", s.url)
}
// Short circuit if the data has not changed since the last time it was read.
if bytes.Compare(data, s.data) == 0 {
if bytes.Equal(data, s.data) {
return nil
}
s.data = data

View File

@ -29,12 +29,12 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/flowcontrol"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
. "k8s.io/kubernetes/pkg/kubelet/container"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/volume"
)
type FakePod struct {
Pod *Pod
Pod *kubecontainer.Pod
NetnsPath string
}
@ -44,14 +44,14 @@ type FakeRuntime struct {
CalledFunctions []string
PodList []*FakePod
AllPodList []*FakePod
ImageList []Image
ImageList []kubecontainer.Image
APIPodStatus v1.PodStatus
PodStatus PodStatus
PodStatus kubecontainer.PodStatus
StartedPods []string
KilledPods []string
StartedContainers []string
KilledContainers []string
RuntimeStatus *RuntimeStatus
RuntimeStatus *kubecontainer.RuntimeStatus
VersionInfo string
APIVersionInfo string
RuntimeType string
@ -66,10 +66,10 @@ type FakeStreamingRuntime struct {
*FakeRuntime
}
var _ StreamingRuntime = &FakeStreamingRuntime{}
var _ kubecontainer.StreamingRuntime = &FakeStreamingRuntime{}
// FakeRuntime should implement Runtime.
var _ Runtime = &FakeRuntime{}
var _ kubecontainer.Runtime = &FakeRuntime{}
type FakeVersion struct {
Version string
@ -90,18 +90,18 @@ func (fv *FakeVersion) Compare(other string) (int, error) {
}
type podsGetter interface {
GetPods(bool) ([]*Pod, error)
GetPods(bool) ([]*kubecontainer.Pod, error)
}
type FakeRuntimeCache struct {
getter podsGetter
}
func NewFakeRuntimeCache(getter podsGetter) RuntimeCache {
func NewFakeRuntimeCache(getter podsGetter) kubecontainer.RuntimeCache {
return &FakeRuntimeCache{getter}
}
func (f *FakeRuntimeCache) GetPods() ([]*Pod, error) {
func (f *FakeRuntimeCache) GetPods() ([]*kubecontainer.Pod, error) {
return f.getter.GetPods(false)
}
@ -177,7 +177,7 @@ func (f *FakeRuntime) Type() string {
return f.RuntimeType
}
func (f *FakeRuntime) Version() (Version, error) {
func (f *FakeRuntime) Version() (kubecontainer.Version, error) {
f.Lock()
defer f.Unlock()
@ -185,7 +185,7 @@ func (f *FakeRuntime) Version() (Version, error) {
return &FakeVersion{Version: f.VersionInfo}, f.Err
}
func (f *FakeRuntime) APIVersion() (Version, error) {
func (f *FakeRuntime) APIVersion() (kubecontainer.Version, error) {
f.Lock()
defer f.Unlock()
@ -193,7 +193,7 @@ func (f *FakeRuntime) APIVersion() (Version, error) {
return &FakeVersion{Version: f.APIVersionInfo}, f.Err
}
func (f *FakeRuntime) Status() (*RuntimeStatus, error) {
func (f *FakeRuntime) Status() (*kubecontainer.RuntimeStatus, error) {
f.Lock()
defer f.Unlock()
@ -201,11 +201,11 @@ func (f *FakeRuntime) Status() (*RuntimeStatus, error) {
return f.RuntimeStatus, f.StatusErr
}
func (f *FakeRuntime) GetPods(all bool) ([]*Pod, error) {
func (f *FakeRuntime) GetPods(all bool) ([]*kubecontainer.Pod, error) {
f.Lock()
defer f.Unlock()
var pods []*Pod
var pods []*kubecontainer.Pod
f.CalledFunctions = append(f.CalledFunctions, "GetPods")
if all {
@ -220,7 +220,7 @@ func (f *FakeRuntime) GetPods(all bool) ([]*Pod, error) {
return pods, f.Err
}
func (f *FakeRuntime) SyncPod(pod *v1.Pod, _ *PodStatus, _ []v1.Secret, backOff *flowcontrol.Backoff) (result PodSyncResult) {
func (f *FakeRuntime) SyncPod(pod *v1.Pod, _ *kubecontainer.PodStatus, _ []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
f.Lock()
defer f.Unlock()
@ -236,7 +236,7 @@ func (f *FakeRuntime) SyncPod(pod *v1.Pod, _ *PodStatus, _ []v1.Secret, backOff
return
}
func (f *FakeRuntime) KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error {
func (f *FakeRuntime) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
f.Lock()
defer f.Unlock()
@ -274,7 +274,7 @@ func (f *FakeRuntime) KillContainerInPod(container v1.Container, pod *v1.Pod) er
return f.Err
}
func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error) {
func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
f.Lock()
defer f.Unlock()
@ -283,7 +283,7 @@ func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*PodS
return &status, f.Err
}
func (f *FakeRuntime) GetContainerLogs(_ context.Context, pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
func (f *FakeRuntime) GetContainerLogs(_ context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
f.Lock()
defer f.Unlock()
@ -291,7 +291,7 @@ func (f *FakeRuntime) GetContainerLogs(_ context.Context, pod *v1.Pod, container
return f.Err
}
func (f *FakeRuntime) PullImage(image ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
func (f *FakeRuntime) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
f.Lock()
defer f.Unlock()
@ -299,7 +299,7 @@ func (f *FakeRuntime) PullImage(image ImageSpec, pullSecrets []v1.Secret, podSan
return image.Image, f.Err
}
func (f *FakeRuntime) GetImageRef(image ImageSpec) (string, error) {
func (f *FakeRuntime) GetImageRef(image kubecontainer.ImageSpec) (string, error) {
f.Lock()
defer f.Unlock()
@ -312,7 +312,7 @@ func (f *FakeRuntime) GetImageRef(image ImageSpec) (string, error) {
return "", f.InspectErr
}
func (f *FakeRuntime) ListImages() ([]Image, error) {
func (f *FakeRuntime) ListImages() ([]kubecontainer.Image, error) {
f.Lock()
defer f.Unlock()
@ -320,7 +320,7 @@ func (f *FakeRuntime) ListImages() ([]Image, error) {
return f.ImageList, f.Err
}
func (f *FakeRuntime) RemoveImage(image ImageSpec) error {
func (f *FakeRuntime) RemoveImage(image kubecontainer.ImageSpec) error {
f.Lock()
defer f.Unlock()
@ -337,7 +337,7 @@ func (f *FakeRuntime) RemoveImage(image ImageSpec) error {
return f.Err
}
func (f *FakeRuntime) GarbageCollect(gcPolicy ContainerGCPolicy, ready bool, evictNonDeletedPods bool) error {
func (f *FakeRuntime) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, ready bool, evictNonDeletedPods bool) error {
f.Lock()
defer f.Unlock()
@ -345,7 +345,7 @@ func (f *FakeRuntime) GarbageCollect(gcPolicy ContainerGCPolicy, ready bool, evi
return f.Err
}
func (f *FakeRuntime) DeleteContainer(containerID ContainerID) error {
func (f *FakeRuntime) DeleteContainer(containerID kubecontainer.ContainerID) error {
f.Lock()
defer f.Unlock()
@ -353,7 +353,7 @@ func (f *FakeRuntime) DeleteContainer(containerID ContainerID) error {
return f.Err
}
func (f *FakeRuntime) ImageStats() (*ImageStats, error) {
func (f *FakeRuntime) ImageStats() (*kubecontainer.ImageStats, error) {
f.Lock()
defer f.Unlock()
@ -361,7 +361,7 @@ func (f *FakeRuntime) ImageStats() (*ImageStats, error) {
return nil, f.Err
}
func (f *FakeStreamingRuntime) GetExec(id ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
func (f *FakeStreamingRuntime) GetExec(id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
f.Lock()
defer f.Unlock()
@ -369,7 +369,7 @@ func (f *FakeStreamingRuntime) GetExec(id ContainerID, cmd []string, stdin, stdo
return &url.URL{Host: FakeHost}, f.Err
}
func (f *FakeStreamingRuntime) GetAttach(id ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) {
func (f *FakeStreamingRuntime) GetAttach(id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) {
f.Lock()
defer f.Unlock()
@ -391,13 +391,13 @@ type FakeContainerCommandRunner struct {
Err error
// actual values when invoked
ContainerID ContainerID
ContainerID kubecontainer.ContainerID
Cmd []string
}
var _ ContainerCommandRunner = &FakeContainerCommandRunner{}
var _ kubecontainer.ContainerCommandRunner = &FakeContainerCommandRunner{}
func (f *FakeContainerCommandRunner) RunInContainer(containerID ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
func (f *FakeContainerCommandRunner) RunInContainer(containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
// record invoked values
f.ContainerID = containerID
f.Cmd = cmd

View File

@ -27,7 +27,7 @@ import (
"k8s.io/client-go/tools/remotecommand"
"k8s.io/client-go/util/flowcontrol"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
. "k8s.io/kubernetes/pkg/kubelet/container"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/volume"
)
@ -35,7 +35,7 @@ type Mock struct {
mock.Mock
}
var _ Runtime = new(Mock)
var _ kubecontainer.Runtime = new(Mock)
func (r *Mock) Start() error {
args := r.Called()
@ -47,32 +47,32 @@ func (r *Mock) Type() string {
return args.Get(0).(string)
}
func (r *Mock) Version() (Version, error) {
func (r *Mock) Version() (kubecontainer.Version, error) {
args := r.Called()
return args.Get(0).(Version), args.Error(1)
return args.Get(0).(kubecontainer.Version), args.Error(1)
}
func (r *Mock) APIVersion() (Version, error) {
func (r *Mock) APIVersion() (kubecontainer.Version, error) {
args := r.Called()
return args.Get(0).(Version), args.Error(1)
return args.Get(0).(kubecontainer.Version), args.Error(1)
}
func (r *Mock) Status() (*RuntimeStatus, error) {
func (r *Mock) Status() (*kubecontainer.RuntimeStatus, error) {
args := r.Called()
return args.Get(0).(*RuntimeStatus), args.Error(0)
return args.Get(0).(*kubecontainer.RuntimeStatus), args.Error(0)
}
func (r *Mock) GetPods(all bool) ([]*Pod, error) {
func (r *Mock) GetPods(all bool) ([]*kubecontainer.Pod, error) {
args := r.Called(all)
return args.Get(0).([]*Pod), args.Error(1)
return args.Get(0).([]*kubecontainer.Pod), args.Error(1)
}
func (r *Mock) SyncPod(pod *v1.Pod, status *PodStatus, secrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult {
func (r *Mock) SyncPod(pod *v1.Pod, status *kubecontainer.PodStatus, secrets []v1.Secret, backOff *flowcontrol.Backoff) kubecontainer.PodSyncResult {
args := r.Called(pod, status, secrets, backOff)
return args.Get(0).(PodSyncResult)
return args.Get(0).(kubecontainer.PodSyncResult)
}
func (r *Mock) KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error {
func (r *Mock) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
args := r.Called(pod, runningPod, gracePeriodOverride)
return args.Error(0)
}
@ -87,64 +87,64 @@ func (r *Mock) KillContainerInPod(container v1.Container, pod *v1.Pod) error {
return args.Error(0)
}
func (r *Mock) GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error) {
func (r *Mock) GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
args := r.Called(uid, name, namespace)
return args.Get(0).(*PodStatus), args.Error(1)
return args.Get(0).(*kubecontainer.PodStatus), args.Error(1)
}
func (r *Mock) ExecInContainer(containerID ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error {
func (r *Mock) ExecInContainer(containerID kubecontainer.ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error {
args := r.Called(containerID, cmd, stdin, stdout, stderr, tty)
return args.Error(0)
}
func (r *Mock) AttachContainer(containerID ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
func (r *Mock) AttachContainer(containerID kubecontainer.ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
args := r.Called(containerID, stdin, stdout, stderr, tty)
return args.Error(0)
}
func (r *Mock) GetContainerLogs(_ context.Context, pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
func (r *Mock) GetContainerLogs(_ context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
args := r.Called(pod, containerID, logOptions, stdout, stderr)
return args.Error(0)
}
func (r *Mock) PullImage(image ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
func (r *Mock) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
args := r.Called(image, pullSecrets)
return image.Image, args.Error(0)
}
func (r *Mock) GetImageRef(image ImageSpec) (string, error) {
func (r *Mock) GetImageRef(image kubecontainer.ImageSpec) (string, error) {
args := r.Called(image)
return args.Get(0).(string), args.Error(1)
}
func (r *Mock) ListImages() ([]Image, error) {
func (r *Mock) ListImages() ([]kubecontainer.Image, error) {
args := r.Called()
return args.Get(0).([]Image), args.Error(1)
return args.Get(0).([]kubecontainer.Image), args.Error(1)
}
func (r *Mock) RemoveImage(image ImageSpec) error {
func (r *Mock) RemoveImage(image kubecontainer.ImageSpec) error {
args := r.Called(image)
return args.Error(0)
}
func (r *Mock) PortForward(pod *Pod, port uint16, stream io.ReadWriteCloser) error {
func (r *Mock) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error {
args := r.Called(pod, port, stream)
return args.Error(0)
}
func (r *Mock) GarbageCollect(gcPolicy ContainerGCPolicy, ready bool, evictNonDeletedPods bool) error {
func (r *Mock) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, ready bool, evictNonDeletedPods bool) error {
args := r.Called(gcPolicy, ready, evictNonDeletedPods)
return args.Error(0)
}
func (r *Mock) DeleteContainer(containerID ContainerID) error {
func (r *Mock) DeleteContainer(containerID kubecontainer.ContainerID) error {
args := r.Called(containerID)
return args.Error(0)
}
func (r *Mock) ImageStats() (*ImageStats, error) {
func (r *Mock) ImageStats() (*kubecontainer.ImageStats, error) {
args := r.Called()
return args.Get(0).(*ImageStats), args.Error(1)
return args.Get(0).(*kubecontainer.ImageStats), args.Error(1)
}
// UpdatePodCIDR fulfills the cri interface.

View File

@ -37,7 +37,7 @@ const (
)
var (
ethtoolOutputRegex = regexp.MustCompile("peer_ifindex: (\\d+)")
ethtoolOutputRegex = regexp.MustCompile(`peer_ifindex: (\d+)`)
)
func findPairInterfaceOfContainerInterface(e exec.Interface, containerInterfaceName, containerDesc string, nsenterArgs []string) (string, error) {

View File

@ -43,7 +43,7 @@ type fakeIPTables struct {
func NewFakeIPTables() *fakeIPTables {
return &fakeIPTables{
tables: make(map[string]*fakeTable, 0),
tables: make(map[string]*fakeTable),
builtinChains: map[string]sets.String{
string(utiliptables.TableFilter): sets.NewString("INPUT", "FORWARD", "OUTPUT"),
string(utiliptables.TableNAT): sets.NewString("PREROUTING", "INPUT", "OUTPUT", "POSTROUTING"),
@ -203,7 +203,7 @@ func (f *fakeIPTables) EnsureRule(position utiliptables.RulePosition, tableName
ruleArgs := make([]string, 0)
for _, arg := range args {
// quote args with internal spaces (like comments)
if strings.Index(arg, " ") >= 0 {
if strings.Contains(arg, " ") {
arg = fmt.Sprintf("\"%s\"", arg)
}
ruleArgs = append(ruleArgs, arg)

View File

@ -351,7 +351,7 @@ func getExistingHostportIPTablesRules(iptables utiliptables.Interface) (map[util
}
}
for _, line := range strings.Split(string(iptablesData.Bytes()), "\n") {
for _, line := range strings.Split(iptablesData.String(), "\n") {
if strings.HasPrefix(line, fmt.Sprintf("-A %s", kubeHostportChainPrefix)) ||
strings.HasPrefix(line, fmt.Sprintf("-A %s", string(kubeHostportsChain))) {
existingHostportRules = append(existingHostportRules, line)
@ -382,8 +382,6 @@ func filterRules(rules []string, filters []utiliptables.Chain) []string {
// filterChains deletes all entries of filter chains from chain map
func filterChains(chains map[utiliptables.Chain]string, filterChains []utiliptables.Chain) {
for _, chain := range filterChains {
if _, ok := chains[chain]; ok {
delete(chains, chain)
}
delete(chains, chain)
}
}

View File

@ -284,7 +284,7 @@ func TestHostportManager(t *testing.T) {
err := iptables.SaveInto(utiliptables.TableNAT, raw)
assert.NoError(t, err)
lines := strings.Split(string(raw.Bytes()), "\n")
lines := strings.Split(raw.String(), "\n")
expectedLines := map[string]bool{
`*nat`: true,
`:KUBE-HOSTPORTS - [0:0]`: true,
@ -331,7 +331,7 @@ func TestHostportManager(t *testing.T) {
raw.Reset()
err = iptables.SaveInto(utiliptables.TableNAT, raw)
assert.NoError(t, err)
lines = strings.Split(string(raw.Bytes()), "\n")
lines = strings.Split(raw.String(), "\n")
remainingChains := make(map[string]bool)
for _, line := range lines {
if strings.HasPrefix(line, ":") {

View File

@ -507,7 +507,7 @@ func (plugin *kubenetNetworkPlugin) teardown(namespace string, name string, id k
portMappings, err := plugin.host.GetPodPortMappings(id.ID)
if err != nil {
errList = append(errList, err)
} else if portMappings != nil && len(portMappings) > 0 {
} else if len(portMappings) > 0 {
if err = plugin.hostportManager.Remove(id.ID, &hostport.PodPortMapping{
Namespace: namespace,
Name: name,

View File

@ -135,8 +135,7 @@ func TestUpdateThreshold(t *testing.T) {
notifier := &MockCgroupNotifier{}
m := newTestMemoryThresholdNotifier(tc.evictionThreshold, notifierFactory, nil)
notifierFactory.On("NewCgroupNotifier", testCgroupPath, memoryUsageAttribute, tc.expectedThreshold.Value()).Return(notifier, tc.updateThresholdErr)
var events chan<- struct{}
events = m.events
var events chan<- struct{} = m.events
notifier.On("Start", events).Return()
err := m.UpdateThreshold(nodeSummary(tc.available, tc.workingSet, tc.usage, isAllocatableEvictionThreshold(tc.evictionThreshold)))
if err != nil && !tc.expectErr {
@ -169,8 +168,7 @@ func TestStart(t *testing.T) {
wg.Done()
})
notifierFactory.On("NewCgroupNotifier", testCgroupPath, memoryUsageAttribute, int64(0)).Return(notifier, nil)
var events chan<- struct{}
events = m.events
var events chan<- struct{} = m.events
notifier.On("Start", events).Return()
notifier.On("Stop").Return()

View File

@ -980,7 +980,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch())
require.NoError(t, err)
memCapacity, _ := updatedNode.Status.Capacity[v1.ResourceMemory]
memCapacity := updatedNode.Status.Capacity[v1.ResourceMemory]
updatedMemoryCapacity, _ := (&memCapacity).AsInt64()
assert.Equal(t, newMemoryCapacity, updatedMemoryCapacity, "Memory capacity")
@ -2001,8 +2001,6 @@ func TestRegisterWithApiServerWithTaint(t *testing.T) {
utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition),
taintutil.TaintExists(got.Spec.Taints, unschedulableTaint),
"test unschedulable taint for TaintNodesByCondition")
return
})
}

View File

@ -253,7 +253,7 @@ func (g *GenericPLEG) relist() {
needsReinspection[pid] = pod
continue
} else if _, found := g.podsToReinspect[pid]; found {
} else {
// this pod was in the list to reinspect and we did so because it had events, so remove it
// from the list (we don't want the reinspection code below to inspect it a second time in
// this relist execution)

View File

@ -28,7 +28,6 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/record"
pluginwatcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1"
registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher"
@ -76,7 +75,6 @@ func (f *fakePluginHandler) DeRegisterPlugin(pluginName string) {
f.Lock()
defer f.Unlock()
f.deregisterPluginCalled = true
return
}
func init() {
@ -143,7 +141,7 @@ func TestPluginRegistration(t *testing.T) {
// Add handler for device plugin
fakeHandler := newFakePluginHandler()
pluginManager.AddHandler(pluginwatcherapi.DevicePlugin, fakeHandler)
pluginManager.AddHandler(registerapi.DevicePlugin, fakeHandler)
// Add a new plugin
socketPath := fmt.Sprintf("%s/plugin.sock", socketDir)

View File

@ -26,7 +26,6 @@ import (
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/record"
pluginwatcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1"
registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor"
@ -183,7 +182,7 @@ func Test_Run_Positive_Register(t *testing.T) {
dsw,
asw,
)
reconciler.AddHandler(pluginwatcherapi.DevicePlugin, cache.PluginHandler(di))
reconciler.AddHandler(registerapi.DevicePlugin, cache.PluginHandler(di))
// Start the reconciler to fill ASW.
stopChan := make(chan struct{})
@ -228,7 +227,7 @@ func Test_Run_Positive_RegisterThenUnregister(t *testing.T) {
dsw,
asw,
)
reconciler.AddHandler(pluginwatcherapi.DevicePlugin, cache.PluginHandler(di))
reconciler.AddHandler(registerapi.DevicePlugin, cache.PluginHandler(di))
// Start the reconciler to fill ASW.
stopChan := make(chan struct{})
@ -283,7 +282,7 @@ func Test_Run_Positive_ReRegister(t *testing.T) {
dsw,
asw,
)
reconciler.AddHandler(pluginwatcherapi.DevicePlugin, cache.PluginHandler(di))
reconciler.AddHandler(registerapi.DevicePlugin, cache.PluginHandler(di))
// Start the reconciler to fill ASW.
stopChan := make(chan struct{})

View File

@ -92,7 +92,6 @@ func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string, uid *types.UID)
}
klog.V(2).Infof("Deleting a mirror pod %q (uid %#v)", podFullName, uid)
var GracePeriodSeconds int64
GracePeriodSeconds = 0
if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &GracePeriodSeconds, Preconditions: &metav1.Preconditions{UID: uid}}); err != nil {
// Unfortunately, there's no generic error for failing a precondition
if !(errors.IsNotFound(err) || errors.IsConflict(err)) {

View File

@ -241,9 +241,7 @@ func (p *podWorkers) removeWorker(uid types.UID) {
// If there is an undelivered work update for this pod we need to remove it
// since per-pod goroutine won't be able to put it to the already closed
// channel when it finishes processing the current work update.
if _, cached := p.lastUndeliveredWorkUpdate[uid]; cached {
delete(p.lastUndeliveredWorkUpdate, uid)
}
delete(p.lastUndeliveredWorkUpdate, uid)
}
}
func (p *podWorkers) ForgetWorker(uid types.UID) {

View File

@ -164,10 +164,7 @@ func Preemptable(preemptor, preemptee *v1.Pod) bool {
// IsCriticalPodBasedOnPriority checks if the given pod is a critical pod based on priority resolved from pod Spec.
func IsCriticalPodBasedOnPriority(priority int32) bool {
if priority >= scheduling.SystemCriticalPriority {
return true
}
return false
return priority >= scheduling.SystemCriticalPriority
}
// IsStaticPod returns true if the pod is a static pod.

View File

@ -426,7 +426,6 @@ func delayClaimBecomesBound(
Phase: v1.ClaimBound,
}
kubeClient.CoreV1().PersistentVolumeClaims(namespace).Update(volumeClaim)
return
}
func runVolumeManager(manager VolumeManager) chan struct{} {