mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Cleanup more static check issues (S1*,ST*)
This commit is contained in:
parent
8a495cb5e4
commit
a2c51674cf
@ -20,10 +20,11 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"k8s.io/klog"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"k8s.io/klog"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||||
)
|
)
|
||||||
|
|
||||||
type stateFileData struct {
|
type stateFileData struct {
|
||||||
@ -144,7 +145,6 @@ func (sf *stateFile) storeState() {
|
|||||||
if err = ioutil.WriteFile(sf.stateFilePath, content, 0644); err != nil {
|
if err = ioutil.WriteFile(sf.stateFilePath, content, 0644); err != nil {
|
||||||
panic("[cpumanager] state file not written")
|
panic("[cpumanager] state file not written")
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sf *stateFile) GetCPUSet(containerID string) (cpuset.CPUSet, bool) {
|
func (sf *stateFile) GetCPUSet(containerID string) (cpuset.CPUSet, bool) {
|
||||||
|
@ -109,8 +109,8 @@ func TestDevicePluginReRegistration(t *testing.T) {
|
|||||||
t.Fatalf("timeout while waiting for manager update")
|
t.Fatalf("timeout while waiting for manager update")
|
||||||
}
|
}
|
||||||
capacity, allocatable, _ := m.GetCapacity()
|
capacity, allocatable, _ := m.GetCapacity()
|
||||||
resourceCapacity, _ := capacity[v1.ResourceName(testResourceName)]
|
resourceCapacity := capacity[v1.ResourceName(testResourceName)]
|
||||||
resourceAllocatable, _ := allocatable[v1.ResourceName(testResourceName)]
|
resourceAllocatable := allocatable[v1.ResourceName(testResourceName)]
|
||||||
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
||||||
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
|
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
|
||||||
|
|
||||||
@ -125,8 +125,8 @@ func TestDevicePluginReRegistration(t *testing.T) {
|
|||||||
t.Fatalf("timeout while waiting for manager update")
|
t.Fatalf("timeout while waiting for manager update")
|
||||||
}
|
}
|
||||||
capacity, allocatable, _ = m.GetCapacity()
|
capacity, allocatable, _ = m.GetCapacity()
|
||||||
resourceCapacity, _ = capacity[v1.ResourceName(testResourceName)]
|
resourceCapacity = capacity[v1.ResourceName(testResourceName)]
|
||||||
resourceAllocatable, _ = allocatable[v1.ResourceName(testResourceName)]
|
resourceAllocatable = allocatable[v1.ResourceName(testResourceName)]
|
||||||
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
||||||
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices shouldn't change.")
|
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices shouldn't change.")
|
||||||
|
|
||||||
@ -142,8 +142,8 @@ func TestDevicePluginReRegistration(t *testing.T) {
|
|||||||
t.Fatalf("timeout while waiting for manager update")
|
t.Fatalf("timeout while waiting for manager update")
|
||||||
}
|
}
|
||||||
capacity, allocatable, _ = m.GetCapacity()
|
capacity, allocatable, _ = m.GetCapacity()
|
||||||
resourceCapacity, _ = capacity[v1.ResourceName(testResourceName)]
|
resourceCapacity = capacity[v1.ResourceName(testResourceName)]
|
||||||
resourceAllocatable, _ = allocatable[v1.ResourceName(testResourceName)]
|
resourceAllocatable = allocatable[v1.ResourceName(testResourceName)]
|
||||||
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
||||||
require.Equal(t, int64(1), resourceAllocatable.Value(), "Devices of plugin previously registered should be removed.")
|
require.Equal(t, int64(1), resourceAllocatable.Value(), "Devices of plugin previously registered should be removed.")
|
||||||
p2.Stop()
|
p2.Stop()
|
||||||
@ -178,8 +178,8 @@ func TestDevicePluginReRegistrationProbeMode(t *testing.T) {
|
|||||||
t.FailNow()
|
t.FailNow()
|
||||||
}
|
}
|
||||||
capacity, allocatable, _ := m.GetCapacity()
|
capacity, allocatable, _ := m.GetCapacity()
|
||||||
resourceCapacity, _ := capacity[v1.ResourceName(testResourceName)]
|
resourceCapacity := capacity[v1.ResourceName(testResourceName)]
|
||||||
resourceAllocatable, _ := allocatable[v1.ResourceName(testResourceName)]
|
resourceAllocatable := allocatable[v1.ResourceName(testResourceName)]
|
||||||
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
||||||
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
|
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
|
||||||
|
|
||||||
@ -194,8 +194,8 @@ func TestDevicePluginReRegistrationProbeMode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
capacity, allocatable, _ = m.GetCapacity()
|
capacity, allocatable, _ = m.GetCapacity()
|
||||||
resourceCapacity, _ = capacity[v1.ResourceName(testResourceName)]
|
resourceCapacity = capacity[v1.ResourceName(testResourceName)]
|
||||||
resourceAllocatable, _ = allocatable[v1.ResourceName(testResourceName)]
|
resourceAllocatable = allocatable[v1.ResourceName(testResourceName)]
|
||||||
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
||||||
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
|
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
|
||||||
|
|
||||||
@ -211,8 +211,8 @@ func TestDevicePluginReRegistrationProbeMode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
capacity, allocatable, _ = m.GetCapacity()
|
capacity, allocatable, _ = m.GetCapacity()
|
||||||
resourceCapacity, _ = capacity[v1.ResourceName(testResourceName)]
|
resourceCapacity = capacity[v1.ResourceName(testResourceName)]
|
||||||
resourceAllocatable, _ = allocatable[v1.ResourceName(testResourceName)]
|
resourceAllocatable = allocatable[v1.ResourceName(testResourceName)]
|
||||||
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
||||||
require.Equal(t, int64(1), resourceAllocatable.Value(), "Devices of previous registered should be removed")
|
require.Equal(t, int64(1), resourceAllocatable.Value(), "Devices of previous registered should be removed")
|
||||||
p2.Stop()
|
p2.Stop()
|
||||||
|
@ -164,8 +164,8 @@ func (m *manager) calculateAffinity(pod v1.Pod, container v1.Container) Topology
|
|||||||
// Get the TopologyHints from a provider.
|
// Get the TopologyHints from a provider.
|
||||||
hints := provider.GetTopologyHints(pod, container)
|
hints := provider.GetTopologyHints(pod, container)
|
||||||
|
|
||||||
// If hints is nil, insert a single, preferred any-socket hint into allProviderHints.
|
// If hints is empty, insert a single, preferred any-socket hint into allProviderHints.
|
||||||
if hints == nil || len(hints) == 0 {
|
if len(hints) == 0 {
|
||||||
klog.Infof("[topologymanager] Hint Provider has no preference for socket affinity with any resource")
|
klog.Infof("[topologymanager] Hint Provider has no preference for socket affinity with any resource")
|
||||||
affinity, _ := socketmask.NewSocketMask()
|
affinity, _ := socketmask.NewSocketMask()
|
||||||
affinity.Fill()
|
affinity.Fill()
|
||||||
@ -294,7 +294,7 @@ func (m *manager) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitR
|
|||||||
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
|
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
|
||||||
result := m.calculateAffinity(*pod, container)
|
result := m.calculateAffinity(*pod, container)
|
||||||
admitPod := m.policy.CanAdmitPodResult(result.Preferred)
|
admitPod := m.policy.CanAdmitPodResult(result.Preferred)
|
||||||
if admitPod.Admit == false {
|
if !admitPod.Admit {
|
||||||
return admitPod
|
return admitPod
|
||||||
}
|
}
|
||||||
c[container.Name] = result
|
c[container.Name] = result
|
||||||
|
@ -382,7 +382,7 @@ func TestPodUpdateAnnotations(t *testing.T) {
|
|||||||
channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental)
|
channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental)
|
||||||
|
|
||||||
pod := CreateValidPod("foo2", "new")
|
pod := CreateValidPod("foo2", "new")
|
||||||
pod.Annotations = make(map[string]string, 0)
|
pod.Annotations = make(map[string]string)
|
||||||
pod.Annotations["kubernetes.io/blah"] = "blah"
|
pod.Annotations["kubernetes.io/blah"] = "blah"
|
||||||
|
|
||||||
clone := pod.DeepCopy()
|
clone := pod.DeepCopy()
|
||||||
@ -411,7 +411,7 @@ func TestPodUpdateLabels(t *testing.T) {
|
|||||||
channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental)
|
channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental)
|
||||||
|
|
||||||
pod := CreateValidPod("foo2", "new")
|
pod := CreateValidPod("foo2", "new")
|
||||||
pod.Labels = make(map[string]string, 0)
|
pod.Labels = make(map[string]string)
|
||||||
pod.Labels["key"] = "value"
|
pod.Labels["key"] = "value"
|
||||||
|
|
||||||
clone := pod.DeepCopy()
|
clone := pod.DeepCopy()
|
||||||
@ -432,7 +432,7 @@ func TestPodRestore(t *testing.T) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
pod := CreateValidPod("api-server", "kube-default")
|
pod := CreateValidPod("api-server", "kube-default")
|
||||||
pod.Annotations = make(map[string]string, 0)
|
pod.Annotations = make(map[string]string)
|
||||||
pod.Annotations["kubernetes.io/config.source"] = kubetypes.ApiserverSource
|
pod.Annotations["kubernetes.io/config.source"] = kubetypes.ApiserverSource
|
||||||
pod.Annotations[core.BootstrapCheckpointAnnotationKey] = "true"
|
pod.Annotations[core.BootstrapCheckpointAnnotationKey] = "true"
|
||||||
|
|
||||||
|
@ -106,7 +106,7 @@ func (s *sourceURL) extractFromURL() error {
|
|||||||
return fmt.Errorf("zero-length data received from %v", s.url)
|
return fmt.Errorf("zero-length data received from %v", s.url)
|
||||||
}
|
}
|
||||||
// Short circuit if the data has not changed since the last time it was read.
|
// Short circuit if the data has not changed since the last time it was read.
|
||||||
if bytes.Compare(data, s.data) == 0 {
|
if bytes.Equal(data, s.data) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
s.data = data
|
s.data = data
|
||||||
|
@ -29,12 +29,12 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/util/flowcontrol"
|
"k8s.io/client-go/util/flowcontrol"
|
||||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||||
. "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
)
|
)
|
||||||
|
|
||||||
type FakePod struct {
|
type FakePod struct {
|
||||||
Pod *Pod
|
Pod *kubecontainer.Pod
|
||||||
NetnsPath string
|
NetnsPath string
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -44,14 +44,14 @@ type FakeRuntime struct {
|
|||||||
CalledFunctions []string
|
CalledFunctions []string
|
||||||
PodList []*FakePod
|
PodList []*FakePod
|
||||||
AllPodList []*FakePod
|
AllPodList []*FakePod
|
||||||
ImageList []Image
|
ImageList []kubecontainer.Image
|
||||||
APIPodStatus v1.PodStatus
|
APIPodStatus v1.PodStatus
|
||||||
PodStatus PodStatus
|
PodStatus kubecontainer.PodStatus
|
||||||
StartedPods []string
|
StartedPods []string
|
||||||
KilledPods []string
|
KilledPods []string
|
||||||
StartedContainers []string
|
StartedContainers []string
|
||||||
KilledContainers []string
|
KilledContainers []string
|
||||||
RuntimeStatus *RuntimeStatus
|
RuntimeStatus *kubecontainer.RuntimeStatus
|
||||||
VersionInfo string
|
VersionInfo string
|
||||||
APIVersionInfo string
|
APIVersionInfo string
|
||||||
RuntimeType string
|
RuntimeType string
|
||||||
@ -66,10 +66,10 @@ type FakeStreamingRuntime struct {
|
|||||||
*FakeRuntime
|
*FakeRuntime
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ StreamingRuntime = &FakeStreamingRuntime{}
|
var _ kubecontainer.StreamingRuntime = &FakeStreamingRuntime{}
|
||||||
|
|
||||||
// FakeRuntime should implement Runtime.
|
// FakeRuntime should implement Runtime.
|
||||||
var _ Runtime = &FakeRuntime{}
|
var _ kubecontainer.Runtime = &FakeRuntime{}
|
||||||
|
|
||||||
type FakeVersion struct {
|
type FakeVersion struct {
|
||||||
Version string
|
Version string
|
||||||
@ -90,18 +90,18 @@ func (fv *FakeVersion) Compare(other string) (int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type podsGetter interface {
|
type podsGetter interface {
|
||||||
GetPods(bool) ([]*Pod, error)
|
GetPods(bool) ([]*kubecontainer.Pod, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type FakeRuntimeCache struct {
|
type FakeRuntimeCache struct {
|
||||||
getter podsGetter
|
getter podsGetter
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFakeRuntimeCache(getter podsGetter) RuntimeCache {
|
func NewFakeRuntimeCache(getter podsGetter) kubecontainer.RuntimeCache {
|
||||||
return &FakeRuntimeCache{getter}
|
return &FakeRuntimeCache{getter}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntimeCache) GetPods() ([]*Pod, error) {
|
func (f *FakeRuntimeCache) GetPods() ([]*kubecontainer.Pod, error) {
|
||||||
return f.getter.GetPods(false)
|
return f.getter.GetPods(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -177,7 +177,7 @@ func (f *FakeRuntime) Type() string {
|
|||||||
return f.RuntimeType
|
return f.RuntimeType
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntime) Version() (Version, error) {
|
func (f *FakeRuntime) Version() (kubecontainer.Version, error) {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
|
||||||
@ -185,7 +185,7 @@ func (f *FakeRuntime) Version() (Version, error) {
|
|||||||
return &FakeVersion{Version: f.VersionInfo}, f.Err
|
return &FakeVersion{Version: f.VersionInfo}, f.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntime) APIVersion() (Version, error) {
|
func (f *FakeRuntime) APIVersion() (kubecontainer.Version, error) {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
|
||||||
@ -193,7 +193,7 @@ func (f *FakeRuntime) APIVersion() (Version, error) {
|
|||||||
return &FakeVersion{Version: f.APIVersionInfo}, f.Err
|
return &FakeVersion{Version: f.APIVersionInfo}, f.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntime) Status() (*RuntimeStatus, error) {
|
func (f *FakeRuntime) Status() (*kubecontainer.RuntimeStatus, error) {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
|
||||||
@ -201,11 +201,11 @@ func (f *FakeRuntime) Status() (*RuntimeStatus, error) {
|
|||||||
return f.RuntimeStatus, f.StatusErr
|
return f.RuntimeStatus, f.StatusErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntime) GetPods(all bool) ([]*Pod, error) {
|
func (f *FakeRuntime) GetPods(all bool) ([]*kubecontainer.Pod, error) {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
|
||||||
var pods []*Pod
|
var pods []*kubecontainer.Pod
|
||||||
|
|
||||||
f.CalledFunctions = append(f.CalledFunctions, "GetPods")
|
f.CalledFunctions = append(f.CalledFunctions, "GetPods")
|
||||||
if all {
|
if all {
|
||||||
@ -220,7 +220,7 @@ func (f *FakeRuntime) GetPods(all bool) ([]*Pod, error) {
|
|||||||
return pods, f.Err
|
return pods, f.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntime) SyncPod(pod *v1.Pod, _ *PodStatus, _ []v1.Secret, backOff *flowcontrol.Backoff) (result PodSyncResult) {
|
func (f *FakeRuntime) SyncPod(pod *v1.Pod, _ *kubecontainer.PodStatus, _ []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
|
||||||
@ -236,7 +236,7 @@ func (f *FakeRuntime) SyncPod(pod *v1.Pod, _ *PodStatus, _ []v1.Secret, backOff
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntime) KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error {
|
func (f *FakeRuntime) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
|
||||||
@ -274,7 +274,7 @@ func (f *FakeRuntime) KillContainerInPod(container v1.Container, pod *v1.Pod) er
|
|||||||
return f.Err
|
return f.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error) {
|
func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
|
||||||
@ -283,7 +283,7 @@ func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*PodS
|
|||||||
return &status, f.Err
|
return &status, f.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntime) GetContainerLogs(_ context.Context, pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
|
func (f *FakeRuntime) GetContainerLogs(_ context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
|
||||||
@ -291,7 +291,7 @@ func (f *FakeRuntime) GetContainerLogs(_ context.Context, pod *v1.Pod, container
|
|||||||
return f.Err
|
return f.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntime) PullImage(image ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
func (f *FakeRuntime) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
|
||||||
@ -299,7 +299,7 @@ func (f *FakeRuntime) PullImage(image ImageSpec, pullSecrets []v1.Secret, podSan
|
|||||||
return image.Image, f.Err
|
return image.Image, f.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntime) GetImageRef(image ImageSpec) (string, error) {
|
func (f *FakeRuntime) GetImageRef(image kubecontainer.ImageSpec) (string, error) {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
|
||||||
@ -312,7 +312,7 @@ func (f *FakeRuntime) GetImageRef(image ImageSpec) (string, error) {
|
|||||||
return "", f.InspectErr
|
return "", f.InspectErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntime) ListImages() ([]Image, error) {
|
func (f *FakeRuntime) ListImages() ([]kubecontainer.Image, error) {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
|
||||||
@ -320,7 +320,7 @@ func (f *FakeRuntime) ListImages() ([]Image, error) {
|
|||||||
return f.ImageList, f.Err
|
return f.ImageList, f.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntime) RemoveImage(image ImageSpec) error {
|
func (f *FakeRuntime) RemoveImage(image kubecontainer.ImageSpec) error {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
|
||||||
@ -337,7 +337,7 @@ func (f *FakeRuntime) RemoveImage(image ImageSpec) error {
|
|||||||
return f.Err
|
return f.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntime) GarbageCollect(gcPolicy ContainerGCPolicy, ready bool, evictNonDeletedPods bool) error {
|
func (f *FakeRuntime) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, ready bool, evictNonDeletedPods bool) error {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
|
||||||
@ -345,7 +345,7 @@ func (f *FakeRuntime) GarbageCollect(gcPolicy ContainerGCPolicy, ready bool, evi
|
|||||||
return f.Err
|
return f.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntime) DeleteContainer(containerID ContainerID) error {
|
func (f *FakeRuntime) DeleteContainer(containerID kubecontainer.ContainerID) error {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
|
||||||
@ -353,7 +353,7 @@ func (f *FakeRuntime) DeleteContainer(containerID ContainerID) error {
|
|||||||
return f.Err
|
return f.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntime) ImageStats() (*ImageStats, error) {
|
func (f *FakeRuntime) ImageStats() (*kubecontainer.ImageStats, error) {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
|
||||||
@ -361,7 +361,7 @@ func (f *FakeRuntime) ImageStats() (*ImageStats, error) {
|
|||||||
return nil, f.Err
|
return nil, f.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeStreamingRuntime) GetExec(id ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
func (f *FakeStreamingRuntime) GetExec(id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
|
||||||
@ -369,7 +369,7 @@ func (f *FakeStreamingRuntime) GetExec(id ContainerID, cmd []string, stdin, stdo
|
|||||||
return &url.URL{Host: FakeHost}, f.Err
|
return &url.URL{Host: FakeHost}, f.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeStreamingRuntime) GetAttach(id ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
func (f *FakeStreamingRuntime) GetAttach(id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
|
||||||
@ -391,13 +391,13 @@ type FakeContainerCommandRunner struct {
|
|||||||
Err error
|
Err error
|
||||||
|
|
||||||
// actual values when invoked
|
// actual values when invoked
|
||||||
ContainerID ContainerID
|
ContainerID kubecontainer.ContainerID
|
||||||
Cmd []string
|
Cmd []string
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ ContainerCommandRunner = &FakeContainerCommandRunner{}
|
var _ kubecontainer.ContainerCommandRunner = &FakeContainerCommandRunner{}
|
||||||
|
|
||||||
func (f *FakeContainerCommandRunner) RunInContainer(containerID ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
|
func (f *FakeContainerCommandRunner) RunInContainer(containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
|
||||||
// record invoked values
|
// record invoked values
|
||||||
f.ContainerID = containerID
|
f.ContainerID = containerID
|
||||||
f.Cmd = cmd
|
f.Cmd = cmd
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
"k8s.io/client-go/tools/remotecommand"
|
"k8s.io/client-go/tools/remotecommand"
|
||||||
"k8s.io/client-go/util/flowcontrol"
|
"k8s.io/client-go/util/flowcontrol"
|
||||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||||
. "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -35,7 +35,7 @@ type Mock struct {
|
|||||||
mock.Mock
|
mock.Mock
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Runtime = new(Mock)
|
var _ kubecontainer.Runtime = new(Mock)
|
||||||
|
|
||||||
func (r *Mock) Start() error {
|
func (r *Mock) Start() error {
|
||||||
args := r.Called()
|
args := r.Called()
|
||||||
@ -47,32 +47,32 @@ func (r *Mock) Type() string {
|
|||||||
return args.Get(0).(string)
|
return args.Get(0).(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) Version() (Version, error) {
|
func (r *Mock) Version() (kubecontainer.Version, error) {
|
||||||
args := r.Called()
|
args := r.Called()
|
||||||
return args.Get(0).(Version), args.Error(1)
|
return args.Get(0).(kubecontainer.Version), args.Error(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) APIVersion() (Version, error) {
|
func (r *Mock) APIVersion() (kubecontainer.Version, error) {
|
||||||
args := r.Called()
|
args := r.Called()
|
||||||
return args.Get(0).(Version), args.Error(1)
|
return args.Get(0).(kubecontainer.Version), args.Error(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) Status() (*RuntimeStatus, error) {
|
func (r *Mock) Status() (*kubecontainer.RuntimeStatus, error) {
|
||||||
args := r.Called()
|
args := r.Called()
|
||||||
return args.Get(0).(*RuntimeStatus), args.Error(0)
|
return args.Get(0).(*kubecontainer.RuntimeStatus), args.Error(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) GetPods(all bool) ([]*Pod, error) {
|
func (r *Mock) GetPods(all bool) ([]*kubecontainer.Pod, error) {
|
||||||
args := r.Called(all)
|
args := r.Called(all)
|
||||||
return args.Get(0).([]*Pod), args.Error(1)
|
return args.Get(0).([]*kubecontainer.Pod), args.Error(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) SyncPod(pod *v1.Pod, status *PodStatus, secrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult {
|
func (r *Mock) SyncPod(pod *v1.Pod, status *kubecontainer.PodStatus, secrets []v1.Secret, backOff *flowcontrol.Backoff) kubecontainer.PodSyncResult {
|
||||||
args := r.Called(pod, status, secrets, backOff)
|
args := r.Called(pod, status, secrets, backOff)
|
||||||
return args.Get(0).(PodSyncResult)
|
return args.Get(0).(kubecontainer.PodSyncResult)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error {
|
func (r *Mock) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
|
||||||
args := r.Called(pod, runningPod, gracePeriodOverride)
|
args := r.Called(pod, runningPod, gracePeriodOverride)
|
||||||
return args.Error(0)
|
return args.Error(0)
|
||||||
}
|
}
|
||||||
@ -87,64 +87,64 @@ func (r *Mock) KillContainerInPod(container v1.Container, pod *v1.Pod) error {
|
|||||||
return args.Error(0)
|
return args.Error(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error) {
|
func (r *Mock) GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
|
||||||
args := r.Called(uid, name, namespace)
|
args := r.Called(uid, name, namespace)
|
||||||
return args.Get(0).(*PodStatus), args.Error(1)
|
return args.Get(0).(*kubecontainer.PodStatus), args.Error(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) ExecInContainer(containerID ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error {
|
func (r *Mock) ExecInContainer(containerID kubecontainer.ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error {
|
||||||
args := r.Called(containerID, cmd, stdin, stdout, stderr, tty)
|
args := r.Called(containerID, cmd, stdin, stdout, stderr, tty)
|
||||||
return args.Error(0)
|
return args.Error(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) AttachContainer(containerID ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
func (r *Mock) AttachContainer(containerID kubecontainer.ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||||
args := r.Called(containerID, stdin, stdout, stderr, tty)
|
args := r.Called(containerID, stdin, stdout, stderr, tty)
|
||||||
return args.Error(0)
|
return args.Error(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) GetContainerLogs(_ context.Context, pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
|
func (r *Mock) GetContainerLogs(_ context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
|
||||||
args := r.Called(pod, containerID, logOptions, stdout, stderr)
|
args := r.Called(pod, containerID, logOptions, stdout, stderr)
|
||||||
return args.Error(0)
|
return args.Error(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) PullImage(image ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
func (r *Mock) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||||
args := r.Called(image, pullSecrets)
|
args := r.Called(image, pullSecrets)
|
||||||
return image.Image, args.Error(0)
|
return image.Image, args.Error(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) GetImageRef(image ImageSpec) (string, error) {
|
func (r *Mock) GetImageRef(image kubecontainer.ImageSpec) (string, error) {
|
||||||
args := r.Called(image)
|
args := r.Called(image)
|
||||||
return args.Get(0).(string), args.Error(1)
|
return args.Get(0).(string), args.Error(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) ListImages() ([]Image, error) {
|
func (r *Mock) ListImages() ([]kubecontainer.Image, error) {
|
||||||
args := r.Called()
|
args := r.Called()
|
||||||
return args.Get(0).([]Image), args.Error(1)
|
return args.Get(0).([]kubecontainer.Image), args.Error(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) RemoveImage(image ImageSpec) error {
|
func (r *Mock) RemoveImage(image kubecontainer.ImageSpec) error {
|
||||||
args := r.Called(image)
|
args := r.Called(image)
|
||||||
return args.Error(0)
|
return args.Error(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) PortForward(pod *Pod, port uint16, stream io.ReadWriteCloser) error {
|
func (r *Mock) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error {
|
||||||
args := r.Called(pod, port, stream)
|
args := r.Called(pod, port, stream)
|
||||||
return args.Error(0)
|
return args.Error(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) GarbageCollect(gcPolicy ContainerGCPolicy, ready bool, evictNonDeletedPods bool) error {
|
func (r *Mock) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, ready bool, evictNonDeletedPods bool) error {
|
||||||
args := r.Called(gcPolicy, ready, evictNonDeletedPods)
|
args := r.Called(gcPolicy, ready, evictNonDeletedPods)
|
||||||
return args.Error(0)
|
return args.Error(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) DeleteContainer(containerID ContainerID) error {
|
func (r *Mock) DeleteContainer(containerID kubecontainer.ContainerID) error {
|
||||||
args := r.Called(containerID)
|
args := r.Called(containerID)
|
||||||
return args.Error(0)
|
return args.Error(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) ImageStats() (*ImageStats, error) {
|
func (r *Mock) ImageStats() (*kubecontainer.ImageStats, error) {
|
||||||
args := r.Called()
|
args := r.Called()
|
||||||
return args.Get(0).(*ImageStats), args.Error(1)
|
return args.Get(0).(*kubecontainer.ImageStats), args.Error(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdatePodCIDR fulfills the cri interface.
|
// UpdatePodCIDR fulfills the cri interface.
|
||||||
|
@ -37,7 +37,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ethtoolOutputRegex = regexp.MustCompile("peer_ifindex: (\\d+)")
|
ethtoolOutputRegex = regexp.MustCompile(`peer_ifindex: (\d+)`)
|
||||||
)
|
)
|
||||||
|
|
||||||
func findPairInterfaceOfContainerInterface(e exec.Interface, containerInterfaceName, containerDesc string, nsenterArgs []string) (string, error) {
|
func findPairInterfaceOfContainerInterface(e exec.Interface, containerInterfaceName, containerDesc string, nsenterArgs []string) (string, error) {
|
||||||
|
@ -43,7 +43,7 @@ type fakeIPTables struct {
|
|||||||
|
|
||||||
func NewFakeIPTables() *fakeIPTables {
|
func NewFakeIPTables() *fakeIPTables {
|
||||||
return &fakeIPTables{
|
return &fakeIPTables{
|
||||||
tables: make(map[string]*fakeTable, 0),
|
tables: make(map[string]*fakeTable),
|
||||||
builtinChains: map[string]sets.String{
|
builtinChains: map[string]sets.String{
|
||||||
string(utiliptables.TableFilter): sets.NewString("INPUT", "FORWARD", "OUTPUT"),
|
string(utiliptables.TableFilter): sets.NewString("INPUT", "FORWARD", "OUTPUT"),
|
||||||
string(utiliptables.TableNAT): sets.NewString("PREROUTING", "INPUT", "OUTPUT", "POSTROUTING"),
|
string(utiliptables.TableNAT): sets.NewString("PREROUTING", "INPUT", "OUTPUT", "POSTROUTING"),
|
||||||
@ -203,7 +203,7 @@ func (f *fakeIPTables) EnsureRule(position utiliptables.RulePosition, tableName
|
|||||||
ruleArgs := make([]string, 0)
|
ruleArgs := make([]string, 0)
|
||||||
for _, arg := range args {
|
for _, arg := range args {
|
||||||
// quote args with internal spaces (like comments)
|
// quote args with internal spaces (like comments)
|
||||||
if strings.Index(arg, " ") >= 0 {
|
if strings.Contains(arg, " ") {
|
||||||
arg = fmt.Sprintf("\"%s\"", arg)
|
arg = fmt.Sprintf("\"%s\"", arg)
|
||||||
}
|
}
|
||||||
ruleArgs = append(ruleArgs, arg)
|
ruleArgs = append(ruleArgs, arg)
|
||||||
|
@ -351,7 +351,7 @@ func getExistingHostportIPTablesRules(iptables utiliptables.Interface) (map[util
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, line := range strings.Split(string(iptablesData.Bytes()), "\n") {
|
for _, line := range strings.Split(iptablesData.String(), "\n") {
|
||||||
if strings.HasPrefix(line, fmt.Sprintf("-A %s", kubeHostportChainPrefix)) ||
|
if strings.HasPrefix(line, fmt.Sprintf("-A %s", kubeHostportChainPrefix)) ||
|
||||||
strings.HasPrefix(line, fmt.Sprintf("-A %s", string(kubeHostportsChain))) {
|
strings.HasPrefix(line, fmt.Sprintf("-A %s", string(kubeHostportsChain))) {
|
||||||
existingHostportRules = append(existingHostportRules, line)
|
existingHostportRules = append(existingHostportRules, line)
|
||||||
@ -382,8 +382,6 @@ func filterRules(rules []string, filters []utiliptables.Chain) []string {
|
|||||||
// filterChains deletes all entries of filter chains from chain map
|
// filterChains deletes all entries of filter chains from chain map
|
||||||
func filterChains(chains map[utiliptables.Chain]string, filterChains []utiliptables.Chain) {
|
func filterChains(chains map[utiliptables.Chain]string, filterChains []utiliptables.Chain) {
|
||||||
for _, chain := range filterChains {
|
for _, chain := range filterChains {
|
||||||
if _, ok := chains[chain]; ok {
|
delete(chains, chain)
|
||||||
delete(chains, chain)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -284,7 +284,7 @@ func TestHostportManager(t *testing.T) {
|
|||||||
err := iptables.SaveInto(utiliptables.TableNAT, raw)
|
err := iptables.SaveInto(utiliptables.TableNAT, raw)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
lines := strings.Split(string(raw.Bytes()), "\n")
|
lines := strings.Split(raw.String(), "\n")
|
||||||
expectedLines := map[string]bool{
|
expectedLines := map[string]bool{
|
||||||
`*nat`: true,
|
`*nat`: true,
|
||||||
`:KUBE-HOSTPORTS - [0:0]`: true,
|
`:KUBE-HOSTPORTS - [0:0]`: true,
|
||||||
@ -331,7 +331,7 @@ func TestHostportManager(t *testing.T) {
|
|||||||
raw.Reset()
|
raw.Reset()
|
||||||
err = iptables.SaveInto(utiliptables.TableNAT, raw)
|
err = iptables.SaveInto(utiliptables.TableNAT, raw)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
lines = strings.Split(string(raw.Bytes()), "\n")
|
lines = strings.Split(raw.String(), "\n")
|
||||||
remainingChains := make(map[string]bool)
|
remainingChains := make(map[string]bool)
|
||||||
for _, line := range lines {
|
for _, line := range lines {
|
||||||
if strings.HasPrefix(line, ":") {
|
if strings.HasPrefix(line, ":") {
|
||||||
|
@ -507,7 +507,7 @@ func (plugin *kubenetNetworkPlugin) teardown(namespace string, name string, id k
|
|||||||
portMappings, err := plugin.host.GetPodPortMappings(id.ID)
|
portMappings, err := plugin.host.GetPodPortMappings(id.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errList = append(errList, err)
|
errList = append(errList, err)
|
||||||
} else if portMappings != nil && len(portMappings) > 0 {
|
} else if len(portMappings) > 0 {
|
||||||
if err = plugin.hostportManager.Remove(id.ID, &hostport.PodPortMapping{
|
if err = plugin.hostportManager.Remove(id.ID, &hostport.PodPortMapping{
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Name: name,
|
Name: name,
|
||||||
|
@ -135,8 +135,7 @@ func TestUpdateThreshold(t *testing.T) {
|
|||||||
notifier := &MockCgroupNotifier{}
|
notifier := &MockCgroupNotifier{}
|
||||||
m := newTestMemoryThresholdNotifier(tc.evictionThreshold, notifierFactory, nil)
|
m := newTestMemoryThresholdNotifier(tc.evictionThreshold, notifierFactory, nil)
|
||||||
notifierFactory.On("NewCgroupNotifier", testCgroupPath, memoryUsageAttribute, tc.expectedThreshold.Value()).Return(notifier, tc.updateThresholdErr)
|
notifierFactory.On("NewCgroupNotifier", testCgroupPath, memoryUsageAttribute, tc.expectedThreshold.Value()).Return(notifier, tc.updateThresholdErr)
|
||||||
var events chan<- struct{}
|
var events chan<- struct{} = m.events
|
||||||
events = m.events
|
|
||||||
notifier.On("Start", events).Return()
|
notifier.On("Start", events).Return()
|
||||||
err := m.UpdateThreshold(nodeSummary(tc.available, tc.workingSet, tc.usage, isAllocatableEvictionThreshold(tc.evictionThreshold)))
|
err := m.UpdateThreshold(nodeSummary(tc.available, tc.workingSet, tc.usage, isAllocatableEvictionThreshold(tc.evictionThreshold)))
|
||||||
if err != nil && !tc.expectErr {
|
if err != nil && !tc.expectErr {
|
||||||
@ -169,8 +168,7 @@ func TestStart(t *testing.T) {
|
|||||||
wg.Done()
|
wg.Done()
|
||||||
})
|
})
|
||||||
notifierFactory.On("NewCgroupNotifier", testCgroupPath, memoryUsageAttribute, int64(0)).Return(notifier, nil)
|
notifierFactory.On("NewCgroupNotifier", testCgroupPath, memoryUsageAttribute, int64(0)).Return(notifier, nil)
|
||||||
var events chan<- struct{}
|
var events chan<- struct{} = m.events
|
||||||
events = m.events
|
|
||||||
notifier.On("Start", events).Return()
|
notifier.On("Start", events).Return()
|
||||||
notifier.On("Stop").Return()
|
notifier.On("Stop").Return()
|
||||||
|
|
||||||
|
@ -980,7 +980,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
|
|||||||
|
|
||||||
updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch())
|
updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
memCapacity, _ := updatedNode.Status.Capacity[v1.ResourceMemory]
|
memCapacity := updatedNode.Status.Capacity[v1.ResourceMemory]
|
||||||
updatedMemoryCapacity, _ := (&memCapacity).AsInt64()
|
updatedMemoryCapacity, _ := (&memCapacity).AsInt64()
|
||||||
assert.Equal(t, newMemoryCapacity, updatedMemoryCapacity, "Memory capacity")
|
assert.Equal(t, newMemoryCapacity, updatedMemoryCapacity, "Memory capacity")
|
||||||
|
|
||||||
@ -2001,8 +2001,6 @@ func TestRegisterWithApiServerWithTaint(t *testing.T) {
|
|||||||
utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition),
|
utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition),
|
||||||
taintutil.TaintExists(got.Spec.Taints, unschedulableTaint),
|
taintutil.TaintExists(got.Spec.Taints, unschedulableTaint),
|
||||||
"test unschedulable taint for TaintNodesByCondition")
|
"test unschedulable taint for TaintNodesByCondition")
|
||||||
|
|
||||||
return
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -253,7 +253,7 @@ func (g *GenericPLEG) relist() {
|
|||||||
needsReinspection[pid] = pod
|
needsReinspection[pid] = pod
|
||||||
|
|
||||||
continue
|
continue
|
||||||
} else if _, found := g.podsToReinspect[pid]; found {
|
} else {
|
||||||
// this pod was in the list to reinspect and we did so because it had events, so remove it
|
// this pod was in the list to reinspect and we did so because it had events, so remove it
|
||||||
// from the list (we don't want the reinspection code below to inspect it a second time in
|
// from the list (we don't want the reinspection code below to inspect it a second time in
|
||||||
// this relist execution)
|
// this relist execution)
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
pluginwatcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1"
|
|
||||||
registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1"
|
registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher"
|
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher"
|
||||||
@ -76,7 +75,6 @@ func (f *fakePluginHandler) DeRegisterPlugin(pluginName string) {
|
|||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
f.deregisterPluginCalled = true
|
f.deregisterPluginCalled = true
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -143,7 +141,7 @@ func TestPluginRegistration(t *testing.T) {
|
|||||||
|
|
||||||
// Add handler for device plugin
|
// Add handler for device plugin
|
||||||
fakeHandler := newFakePluginHandler()
|
fakeHandler := newFakePluginHandler()
|
||||||
pluginManager.AddHandler(pluginwatcherapi.DevicePlugin, fakeHandler)
|
pluginManager.AddHandler(registerapi.DevicePlugin, fakeHandler)
|
||||||
|
|
||||||
// Add a new plugin
|
// Add a new plugin
|
||||||
socketPath := fmt.Sprintf("%s/plugin.sock", socketDir)
|
socketPath := fmt.Sprintf("%s/plugin.sock", socketDir)
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
pluginwatcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1"
|
|
||||||
registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1"
|
registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
|
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor"
|
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor"
|
||||||
@ -183,7 +182,7 @@ func Test_Run_Positive_Register(t *testing.T) {
|
|||||||
dsw,
|
dsw,
|
||||||
asw,
|
asw,
|
||||||
)
|
)
|
||||||
reconciler.AddHandler(pluginwatcherapi.DevicePlugin, cache.PluginHandler(di))
|
reconciler.AddHandler(registerapi.DevicePlugin, cache.PluginHandler(di))
|
||||||
|
|
||||||
// Start the reconciler to fill ASW.
|
// Start the reconciler to fill ASW.
|
||||||
stopChan := make(chan struct{})
|
stopChan := make(chan struct{})
|
||||||
@ -228,7 +227,7 @@ func Test_Run_Positive_RegisterThenUnregister(t *testing.T) {
|
|||||||
dsw,
|
dsw,
|
||||||
asw,
|
asw,
|
||||||
)
|
)
|
||||||
reconciler.AddHandler(pluginwatcherapi.DevicePlugin, cache.PluginHandler(di))
|
reconciler.AddHandler(registerapi.DevicePlugin, cache.PluginHandler(di))
|
||||||
|
|
||||||
// Start the reconciler to fill ASW.
|
// Start the reconciler to fill ASW.
|
||||||
stopChan := make(chan struct{})
|
stopChan := make(chan struct{})
|
||||||
@ -283,7 +282,7 @@ func Test_Run_Positive_ReRegister(t *testing.T) {
|
|||||||
dsw,
|
dsw,
|
||||||
asw,
|
asw,
|
||||||
)
|
)
|
||||||
reconciler.AddHandler(pluginwatcherapi.DevicePlugin, cache.PluginHandler(di))
|
reconciler.AddHandler(registerapi.DevicePlugin, cache.PluginHandler(di))
|
||||||
|
|
||||||
// Start the reconciler to fill ASW.
|
// Start the reconciler to fill ASW.
|
||||||
stopChan := make(chan struct{})
|
stopChan := make(chan struct{})
|
||||||
|
@ -92,7 +92,6 @@ func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string, uid *types.UID)
|
|||||||
}
|
}
|
||||||
klog.V(2).Infof("Deleting a mirror pod %q (uid %#v)", podFullName, uid)
|
klog.V(2).Infof("Deleting a mirror pod %q (uid %#v)", podFullName, uid)
|
||||||
var GracePeriodSeconds int64
|
var GracePeriodSeconds int64
|
||||||
GracePeriodSeconds = 0
|
|
||||||
if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &GracePeriodSeconds, Preconditions: &metav1.Preconditions{UID: uid}}); err != nil {
|
if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &GracePeriodSeconds, Preconditions: &metav1.Preconditions{UID: uid}}); err != nil {
|
||||||
// Unfortunately, there's no generic error for failing a precondition
|
// Unfortunately, there's no generic error for failing a precondition
|
||||||
if !(errors.IsNotFound(err) || errors.IsConflict(err)) {
|
if !(errors.IsNotFound(err) || errors.IsConflict(err)) {
|
||||||
|
@ -241,9 +241,7 @@ func (p *podWorkers) removeWorker(uid types.UID) {
|
|||||||
// If there is an undelivered work update for this pod we need to remove it
|
// If there is an undelivered work update for this pod we need to remove it
|
||||||
// since per-pod goroutine won't be able to put it to the already closed
|
// since per-pod goroutine won't be able to put it to the already closed
|
||||||
// channel when it finishes processing the current work update.
|
// channel when it finishes processing the current work update.
|
||||||
if _, cached := p.lastUndeliveredWorkUpdate[uid]; cached {
|
delete(p.lastUndeliveredWorkUpdate, uid)
|
||||||
delete(p.lastUndeliveredWorkUpdate, uid)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (p *podWorkers) ForgetWorker(uid types.UID) {
|
func (p *podWorkers) ForgetWorker(uid types.UID) {
|
||||||
|
@ -164,10 +164,7 @@ func Preemptable(preemptor, preemptee *v1.Pod) bool {
|
|||||||
|
|
||||||
// IsCriticalPodBasedOnPriority checks if the given pod is a critical pod based on priority resolved from pod Spec.
|
// IsCriticalPodBasedOnPriority checks if the given pod is a critical pod based on priority resolved from pod Spec.
|
||||||
func IsCriticalPodBasedOnPriority(priority int32) bool {
|
func IsCriticalPodBasedOnPriority(priority int32) bool {
|
||||||
if priority >= scheduling.SystemCriticalPriority {
|
return priority >= scheduling.SystemCriticalPriority
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsStaticPod returns true if the pod is a static pod.
|
// IsStaticPod returns true if the pod is a static pod.
|
||||||
|
@ -426,7 +426,6 @@ func delayClaimBecomesBound(
|
|||||||
Phase: v1.ClaimBound,
|
Phase: v1.ClaimBound,
|
||||||
}
|
}
|
||||||
kubeClient.CoreV1().PersistentVolumeClaims(namespace).Update(volumeClaim)
|
kubeClient.CoreV1().PersistentVolumeClaims(namespace).Update(volumeClaim)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func runVolumeManager(manager VolumeManager) chan struct{} {
|
func runVolumeManager(manager VolumeManager) chan struct{} {
|
||||||
|
Loading…
Reference in New Issue
Block a user