plumb context from CRI calls through kubelet

This commit is contained in:
David Ashpole 2022-10-27 20:03:05 +00:00
parent 6e31c6531f
commit f43b4f1b95
No known key found for this signature in database
GPG Key ID: 563A85007BFA1BA2
115 changed files with 1440 additions and 1183 deletions

View File

@ -18,6 +18,7 @@ package tests
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"io" "io"
"net" "net"
@ -51,7 +52,7 @@ type fakePortForwarder struct {
var _ portforward.PortForwarder = &fakePortForwarder{} var _ portforward.PortForwarder = &fakePortForwarder{}
func (pf *fakePortForwarder) PortForward(name string, uid types.UID, port int32, stream io.ReadWriteCloser) error { func (pf *fakePortForwarder) PortForward(_ context.Context, name string, uid types.UID, port int32, stream io.ReadWriteCloser) error {
defer stream.Close() defer stream.Close()
// read from the client // read from the client

View File

@ -18,6 +18,7 @@ package tests
import ( import (
"bytes" "bytes"
"context"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -58,11 +59,11 @@ type fakeExecutor struct {
exec bool exec bool
} }
func (ex *fakeExecutor) ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remoteclient.TerminalSize, timeout time.Duration) error { func (ex *fakeExecutor) ExecInContainer(_ context.Context, name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remoteclient.TerminalSize, timeout time.Duration) error {
return ex.run(name, uid, container, cmd, in, out, err, tty) return ex.run(name, uid, container, cmd, in, out, err, tty)
} }
func (ex *fakeExecutor) AttachContainer(name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remoteclient.TerminalSize) error { func (ex *fakeExecutor) AttachContainer(_ context.Context, name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remoteclient.TerminalSize) error {
return ex.run(name, uid, container, nil, in, out, err, tty) return ex.run(name, uid, container, nil, in, out, err, tty)
} }

View File

@ -21,6 +21,7 @@ package cm
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"os" "os"
"path" "path"
@ -557,10 +558,11 @@ func (cm *containerManagerImpl) Start(node *v1.Node,
podStatusProvider status.PodStatusProvider, podStatusProvider status.PodStatusProvider,
runtimeService internalapi.RuntimeService, runtimeService internalapi.RuntimeService,
localStorageCapacityIsolation bool) error { localStorageCapacityIsolation bool) error {
ctx := context.Background()
// Initialize CPU manager // Initialize CPU manager
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUManager) { if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUManager) {
containerMap := buildContainerMapFromRuntime(runtimeService) containerMap := buildContainerMapFromRuntime(ctx, runtimeService)
err := cm.cpuManager.Start(cpumanager.ActivePodsFunc(activePods), sourcesReady, podStatusProvider, runtimeService, containerMap) err := cm.cpuManager.Start(cpumanager.ActivePodsFunc(activePods), sourcesReady, podStatusProvider, runtimeService, containerMap)
if err != nil { if err != nil {
return fmt.Errorf("start cpu manager error: %v", err) return fmt.Errorf("start cpu manager error: %v", err)
@ -569,7 +571,7 @@ func (cm *containerManagerImpl) Start(node *v1.Node,
// Initialize memory manager // Initialize memory manager
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryManager) { if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryManager) {
containerMap := buildContainerMapFromRuntime(runtimeService) containerMap := buildContainerMapFromRuntime(ctx, runtimeService)
err := cm.memoryManager.Start(memorymanager.ActivePodsFunc(activePods), sourcesReady, podStatusProvider, runtimeService, containerMap) err := cm.memoryManager.Start(memorymanager.ActivePodsFunc(activePods), sourcesReady, podStatusProvider, runtimeService, containerMap)
if err != nil { if err != nil {
return fmt.Errorf("start memory manager error: %v", err) return fmt.Errorf("start memory manager error: %v", err)
@ -727,15 +729,15 @@ func (cm *containerManagerImpl) SystemCgroupsLimit() v1.ResourceList {
} }
} }
func buildContainerMapFromRuntime(runtimeService internalapi.RuntimeService) containermap.ContainerMap { func buildContainerMapFromRuntime(ctx context.Context, runtimeService internalapi.RuntimeService) containermap.ContainerMap {
podSandboxMap := make(map[string]string) podSandboxMap := make(map[string]string)
podSandboxList, _ := runtimeService.ListPodSandbox(nil) podSandboxList, _ := runtimeService.ListPodSandbox(ctx, nil)
for _, p := range podSandboxList { for _, p := range podSandboxList {
podSandboxMap[p.Id] = p.Metadata.Uid podSandboxMap[p.Id] = p.Metadata.Uid
} }
containerMap := containermap.NewContainerMap() containerMap := containermap.NewContainerMap()
containerList, _ := runtimeService.ListContainers(nil) containerList, _ := runtimeService.ListContainers(ctx, nil)
for _, c := range containerList { for _, c := range containerList {
if _, exists := podSandboxMap[c.PodSandboxId]; !exists { if _, exists := podSandboxMap[c.PodSandboxId]; !exists {
klog.InfoS("no PodSandBox found for the container", "podSandboxId", c.PodSandboxId, "containerName", c.Metadata.Name, "containerId", c.Id) klog.InfoS("no PodSandBox found for the container", "podSandboxId", c.PodSandboxId, "containerName", c.Metadata.Name, "containerId", c.Id)

View File

@ -17,6 +17,7 @@ limitations under the License.
package cpumanager package cpumanager
import ( import (
"context"
"fmt" "fmt"
"math" "math"
"sync" "sync"
@ -42,7 +43,7 @@ import (
type ActivePodsFunc func() []*v1.Pod type ActivePodsFunc func() []*v1.Pod
type runtimeService interface { type runtimeService interface {
UpdateContainerResources(id string, resources *runtimeapi.ContainerResources) error UpdateContainerResources(ctx context.Context, id string, resources *runtimeapi.ContainerResources) error
} }
type policyName string type policyName string
@ -401,6 +402,7 @@ func (m *manager) removeStaleState() {
} }
func (m *manager) reconcileState() (success []reconciledContainer, failure []reconciledContainer) { func (m *manager) reconcileState() (success []reconciledContainer, failure []reconciledContainer) {
ctx := context.Background()
success = []reconciledContainer{} success = []reconciledContainer{}
failure = []reconciledContainer{} failure = []reconciledContainer{}
@ -469,7 +471,7 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec
lcset := m.lastUpdateState.GetCPUSetOrDefault(string(pod.UID), container.Name) lcset := m.lastUpdateState.GetCPUSetOrDefault(string(pod.UID), container.Name)
if !cset.Equals(lcset) { if !cset.Equals(lcset) {
klog.V(4).InfoS("ReconcileState: updating container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID, "cpuSet", cset) klog.V(4).InfoS("ReconcileState: updating container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID, "cpuSet", cset)
err = m.updateContainerCPUSet(containerID, cset) err = m.updateContainerCPUSet(ctx, containerID, cset)
if err != nil { if err != nil {
klog.ErrorS(err, "ReconcileState: failed to update container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID, "cpuSet", cset) klog.ErrorS(err, "ReconcileState: failed to update container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID, "cpuSet", cset)
failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID}) failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID})
@ -508,12 +510,13 @@ func findContainerStatusByName(status *v1.PodStatus, name string) (*v1.Container
return nil, fmt.Errorf("unable to find status for container with name %v in pod status (it may not be running)", name) return nil, fmt.Errorf("unable to find status for container with name %v in pod status (it may not be running)", name)
} }
func (m *manager) updateContainerCPUSet(containerID string, cpus cpuset.CPUSet) error { func (m *manager) updateContainerCPUSet(ctx context.Context, containerID string, cpus cpuset.CPUSet) error {
// TODO: Consider adding a `ResourceConfigForContainer` helper in // TODO: Consider adding a `ResourceConfigForContainer` helper in
// helpers_linux.go similar to what exists for pods. // helpers_linux.go similar to what exists for pods.
// It would be better to pass the full container resources here instead of // It would be better to pass the full container resources here instead of
// this patch-like partial resources. // this patch-like partial resources.
return m.containerRuntime.UpdateContainerResources( return m.containerRuntime.UpdateContainerResources(
ctx,
containerID, containerID,
&runtimeapi.ContainerResources{ &runtimeapi.ContainerResources{
Linux: &runtimeapi.LinuxContainerResources{ Linux: &runtimeapi.LinuxContainerResources{

View File

@ -17,6 +17,7 @@ limitations under the License.
package cpumanager package cpumanager
import ( import (
"context"
"fmt" "fmt"
"os" "os"
"reflect" "reflect"
@ -126,7 +127,7 @@ type mockRuntimeService struct {
err error err error
} }
func (rt mockRuntimeService) UpdateContainerResources(id string, resources *runtimeapi.ContainerResources) error { func (rt mockRuntimeService) UpdateContainerResources(_ context.Context, id string, resources *runtimeapi.ContainerResources) error {
return rt.err return rt.err
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package memorymanager package memorymanager
import ( import (
"context"
"fmt" "fmt"
"sync" "sync"
@ -43,7 +44,7 @@ const memoryManagerStateFileName = "memory_manager_state"
type ActivePodsFunc func() []*v1.Pod type ActivePodsFunc func() []*v1.Pod
type runtimeService interface { type runtimeService interface {
UpdateContainerResources(id string, resources *runtimeapi.ContainerResources) error UpdateContainerResources(ctx context.Context, id string, resources *runtimeapi.ContainerResources) error
} }
type sourcesReadyStub struct{} type sourcesReadyStub struct{}

View File

@ -17,6 +17,7 @@ limitations under the License.
package memorymanager package memorymanager
import ( import (
"context"
"fmt" "fmt"
"os" "os"
"reflect" "reflect"
@ -121,7 +122,7 @@ type mockRuntimeService struct {
err error err error
} }
func (rt mockRuntimeService) UpdateContainerResources(id string, resources *runtimeapi.ContainerResources) error { func (rt mockRuntimeService) UpdateContainerResources(_ context.Context, id string, resources *runtimeapi.ContainerResources) error {
return rt.err return rt.err
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package container package container
import ( import (
"context"
"fmt" "fmt"
"time" "time"
@ -41,9 +42,9 @@ type GCPolicy struct {
// Implementation is thread-compatible. // Implementation is thread-compatible.
type GC interface { type GC interface {
// Garbage collect containers. // Garbage collect containers.
GarbageCollect() error GarbageCollect(ctx context.Context) error
// Deletes all unused containers, including containers belonging to pods that are terminated but not deleted // Deletes all unused containers, including containers belonging to pods that are terminated but not deleted
DeleteAllUnusedContainers() error DeleteAllUnusedContainers(ctx context.Context) error
} }
// SourcesReadyProvider knows how to determine if configuration sources are ready // SourcesReadyProvider knows how to determine if configuration sources are ready
@ -77,11 +78,11 @@ func NewContainerGC(runtime Runtime, policy GCPolicy, sourcesReadyProvider Sourc
}, nil }, nil
} }
func (cgc *realContainerGC) GarbageCollect() error { func (cgc *realContainerGC) GarbageCollect(ctx context.Context) error {
return cgc.runtime.GarbageCollect(cgc.policy, cgc.sourcesReadyProvider.AllReady(), false) return cgc.runtime.GarbageCollect(ctx, cgc.policy, cgc.sourcesReadyProvider.AllReady(), false)
} }
func (cgc *realContainerGC) DeleteAllUnusedContainers() error { func (cgc *realContainerGC) DeleteAllUnusedContainers(ctx context.Context) error {
klog.InfoS("Attempting to delete unused containers") klog.InfoS("Attempting to delete unused containers")
return cgc.runtime.GarbageCollect(cgc.policy, cgc.sourcesReadyProvider.AllReady(), true) return cgc.runtime.GarbageCollect(ctx, cgc.policy, cgc.sourcesReadyProvider.AllReady(), true)
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package container package container
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"hash/fnv" "hash/fnv"
@ -39,13 +40,13 @@ import (
// HandlerRunner runs a lifecycle handler for a container. // HandlerRunner runs a lifecycle handler for a container.
type HandlerRunner interface { type HandlerRunner interface {
Run(containerID ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler) (string, error) Run(ctx context.Context, containerID ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler) (string, error)
} }
// RuntimeHelper wraps kubelet to make container runtime // RuntimeHelper wraps kubelet to make container runtime
// able to get necessary informations like the RunContainerOptions, DNS settings, Host IP. // able to get necessary informations like the RunContainerOptions, DNS settings, Host IP.
type RuntimeHelper interface { type RuntimeHelper interface {
GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (contOpts *RunContainerOptions, cleanupAction func(), err error) GenerateRunContainerOptions(ctx context.Context, pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (contOpts *RunContainerOptions, cleanupAction func(), err error)
GetPodDNS(pod *v1.Pod) (dnsConfig *runtimeapi.DNSConfig, err error) GetPodDNS(pod *v1.Pod) (dnsConfig *runtimeapi.DNSConfig, err error)
// GetPodCgroupParent returns the CgroupName identifier, and its literal cgroupfs form on the host // GetPodCgroupParent returns the CgroupName identifier, and its literal cgroupfs form on the host
// of a pod. // of a pod.

View File

@ -70,7 +70,7 @@ type Runtime interface {
Type() string Type() string
// Version returns the version information of the container runtime. // Version returns the version information of the container runtime.
Version() (Version, error) Version(ctx context.Context) (Version, error)
// APIVersion returns the cached API version information of the container // APIVersion returns the cached API version information of the container
// runtime. Implementation is expected to update this cache periodically. // runtime. Implementation is expected to update this cache periodically.
@ -79,11 +79,11 @@ type Runtime interface {
APIVersion() (Version, error) APIVersion() (Version, error)
// Status returns the status of the runtime. An error is returned if the Status // Status returns the status of the runtime. An error is returned if the Status
// function itself fails, nil otherwise. // function itself fails, nil otherwise.
Status() (*RuntimeStatus, error) Status(ctx context.Context) (*RuntimeStatus, error)
// GetPods returns a list of containers grouped by pods. The boolean parameter // GetPods returns a list of containers grouped by pods. The boolean parameter
// specifies whether the runtime returns all containers including those already // specifies whether the runtime returns all containers including those already
// exited and dead containers (used for garbage collection). // exited and dead containers (used for garbage collection).
GetPods(all bool) ([]*Pod, error) GetPods(ctx context.Context, all bool) ([]*Pod, error)
// GarbageCollect removes dead containers using the specified container gc policy // GarbageCollect removes dead containers using the specified container gc policy
// If allSourcesReady is not true, it means that kubelet doesn't have the // If allSourcesReady is not true, it means that kubelet doesn't have the
// complete list of pods from all available sources (e.g., apiserver, http, // complete list of pods from all available sources (e.g., apiserver, http,
@ -93,18 +93,18 @@ type Runtime interface {
// that are terminated, but not deleted will be evicted. Otherwise, only deleted pods // that are terminated, but not deleted will be evicted. Otherwise, only deleted pods
// will be GC'd. // will be GC'd.
// TODO: Revisit this method and make it cleaner. // TODO: Revisit this method and make it cleaner.
GarbageCollect(gcPolicy GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error GarbageCollect(ctx context.Context, gcPolicy GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error
// SyncPod syncs the running pod into the desired pod. // SyncPod syncs the running pod into the desired pod.
SyncPod(pod *v1.Pod, podStatus *PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult SyncPod(ctx context.Context, pod *v1.Pod, podStatus *PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult
// KillPod kills all the containers of a pod. Pod may be nil, running pod must not be. // KillPod kills all the containers of a pod. Pod may be nil, running pod must not be.
// TODO(random-liu): Return PodSyncResult in KillPod. // TODO(random-liu): Return PodSyncResult in KillPod.
// gracePeriodOverride if specified allows the caller to override the pod default grace period. // gracePeriodOverride if specified allows the caller to override the pod default grace period.
// only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data. // only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data.
// it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios. // it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios.
KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error KillPod(ctx context.Context, pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error
// GetPodStatus retrieves the status of the pod, including the // GetPodStatus retrieves the status of the pod, including the
// information of all containers in the pod that are visible in Runtime. // information of all containers in the pod that are visible in Runtime.
GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error) GetPodStatus(ctx context.Context, uid types.UID, name, namespace string) (*PodStatus, error)
// TODO(vmarmol): Unify pod and containerID args. // TODO(vmarmol): Unify pod and containerID args.
// GetContainerLogs returns logs of a specific container. By // GetContainerLogs returns logs of a specific container. By
// default, it returns a snapshot of the container log. Set 'follow' to true to // default, it returns a snapshot of the container log. Set 'follow' to true to
@ -112,53 +112,53 @@ type Runtime interface {
// "100" or "all") to tail the log. // "100" or "all") to tail the log.
GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error)
// DeleteContainer deletes a container. If the container is still running, an error is returned. // DeleteContainer deletes a container. If the container is still running, an error is returned.
DeleteContainer(containerID ContainerID) error DeleteContainer(ctx context.Context, containerID ContainerID) error
// ImageService provides methods to image-related methods. // ImageService provides methods to image-related methods.
ImageService ImageService
// UpdatePodCIDR sends a new podCIDR to the runtime. // UpdatePodCIDR sends a new podCIDR to the runtime.
// This method just proxies a new runtimeConfig with the updated // This method just proxies a new runtimeConfig with the updated
// CIDR value down to the runtime shim. // CIDR value down to the runtime shim.
UpdatePodCIDR(podCIDR string) error UpdatePodCIDR(ctx context.Context, podCIDR string) error
// CheckpointContainer tells the runtime to checkpoint a container // CheckpointContainer tells the runtime to checkpoint a container
// and store the resulting archive to the checkpoint directory. // and store the resulting archive to the checkpoint directory.
CheckpointContainer(options *runtimeapi.CheckpointContainerRequest) error CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error
} }
// StreamingRuntime is the interface implemented by runtimes that handle the serving of the // StreamingRuntime is the interface implemented by runtimes that handle the serving of the
// streaming calls (exec/attach/port-forward) themselves. In this case, Kubelet should redirect to // streaming calls (exec/attach/port-forward) themselves. In this case, Kubelet should redirect to
// the runtime server. // the runtime server.
type StreamingRuntime interface { type StreamingRuntime interface {
GetExec(id ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) GetExec(ctx context.Context, id ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error)
GetAttach(id ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) GetAttach(ctx context.Context, id ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error)
GetPortForward(podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error)
} }
// ImageService interfaces allows to work with image service. // ImageService interfaces allows to work with image service.
type ImageService interface { type ImageService interface {
// PullImage pulls an image from the network to local storage using the supplied // PullImage pulls an image from the network to local storage using the supplied
// secrets if necessary. It returns a reference (digest or ID) to the pulled image. // secrets if necessary. It returns a reference (digest or ID) to the pulled image.
PullImage(image ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) PullImage(ctx context.Context, image ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error)
// GetImageRef gets the reference (digest or ID) of the image which has already been in // GetImageRef gets the reference (digest or ID) of the image which has already been in
// the local storage. It returns ("", nil) if the image isn't in the local storage. // the local storage. It returns ("", nil) if the image isn't in the local storage.
GetImageRef(image ImageSpec) (string, error) GetImageRef(ctx context.Context, image ImageSpec) (string, error)
// ListImages gets all images currently on the machine. // ListImages gets all images currently on the machine.
ListImages() ([]Image, error) ListImages(ctx context.Context) ([]Image, error)
// RemoveImage removes the specified image. // RemoveImage removes the specified image.
RemoveImage(image ImageSpec) error RemoveImage(ctx context.Context, image ImageSpec) error
// ImageStats returns Image statistics. // ImageStats returns Image statistics.
ImageStats() (*ImageStats, error) ImageStats(ctx context.Context) (*ImageStats, error)
} }
// Attacher interface allows to attach a container. // Attacher interface allows to attach a container.
type Attacher interface { type Attacher interface {
AttachContainer(id ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) (err error) AttachContainer(ctx context.Context, id ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) (err error)
} }
// CommandRunner interface allows to run command in a container. // CommandRunner interface allows to run command in a container.
type CommandRunner interface { type CommandRunner interface {
// RunInContainer synchronously executes the command in the container, and returns the output. // RunInContainer synchronously executes the command in the container, and returns the output.
// If the command completes with a non-0 exit code, a k8s.io/utils/exec.ExitError will be returned. // If the command completes with a non-0 exit code, a k8s.io/utils/exec.ExitError will be returned.
RunInContainer(id ContainerID, cmd []string, timeout time.Duration) ([]byte, error) RunInContainer(ctx context.Context, id ContainerID, cmd []string, timeout time.Duration) ([]byte, error)
} }
// Pod is a group of containers. // Pod is a group of containers.

View File

@ -18,6 +18,7 @@ limitations under the License.
package container package container
import ( import (
"context"
"sync" "sync"
"time" "time"
) )
@ -29,12 +30,12 @@ var (
// RuntimeCache is in interface for obtaining cached Pods. // RuntimeCache is in interface for obtaining cached Pods.
type RuntimeCache interface { type RuntimeCache interface {
GetPods() ([]*Pod, error) GetPods(context.Context) ([]*Pod, error)
ForceUpdateIfOlder(time.Time) error ForceUpdateIfOlder(context.Context, time.Time) error
} }
type podsGetter interface { type podsGetter interface {
GetPods(bool) ([]*Pod, error) GetPods(context.Context, bool) ([]*Pod, error)
} }
// NewRuntimeCache creates a container runtime cache. // NewRuntimeCache creates a container runtime cache.
@ -60,28 +61,28 @@ type runtimeCache struct {
// GetPods returns the cached pods if they are not outdated; otherwise, it // GetPods returns the cached pods if they are not outdated; otherwise, it
// retrieves the latest pods and return them. // retrieves the latest pods and return them.
func (r *runtimeCache) GetPods() ([]*Pod, error) { func (r *runtimeCache) GetPods(ctx context.Context) ([]*Pod, error) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
if time.Since(r.cacheTime) > defaultCachePeriod { if time.Since(r.cacheTime) > defaultCachePeriod {
if err := r.updateCache(); err != nil { if err := r.updateCache(ctx); err != nil {
return nil, err return nil, err
} }
} }
return r.pods, nil return r.pods, nil
} }
func (r *runtimeCache) ForceUpdateIfOlder(minExpectedCacheTime time.Time) error { func (r *runtimeCache) ForceUpdateIfOlder(ctx context.Context, minExpectedCacheTime time.Time) error {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
if r.cacheTime.Before(minExpectedCacheTime) { if r.cacheTime.Before(minExpectedCacheTime) {
return r.updateCache() return r.updateCache(ctx)
} }
return nil return nil
} }
func (r *runtimeCache) updateCache() error { func (r *runtimeCache) updateCache(ctx context.Context) error {
pods, timestamp, err := r.getPodsWithTimestamp() pods, timestamp, err := r.getPodsWithTimestamp(ctx)
if err != nil { if err != nil {
return err return err
} }
@ -90,9 +91,9 @@ func (r *runtimeCache) updateCache() error {
} }
// getPodsWithTimestamp records a timestamp and retrieves pods from the getter. // getPodsWithTimestamp records a timestamp and retrieves pods from the getter.
func (r *runtimeCache) getPodsWithTimestamp() ([]*Pod, time.Time, error) { func (r *runtimeCache) getPodsWithTimestamp(ctx context.Context) ([]*Pod, time.Time, error) {
// Always record the timestamp before getting the pods to avoid stale pods. // Always record the timestamp before getting the pods to avoid stale pods.
timestamp := time.Now() timestamp := time.Now()
pods, err := r.getter.GetPods(false) pods, err := r.getter.GetPods(ctx, false)
return pods, timestamp, err return pods, timestamp, err
} }

View File

@ -16,6 +16,8 @@ limitations under the License.
package container package container
import "context"
// TestRuntimeCache embeds runtimeCache with some additional methods for testing. // TestRuntimeCache embeds runtimeCache with some additional methods for testing.
// It must be declared in the container package to have visibility to runtimeCache. // It must be declared in the container package to have visibility to runtimeCache.
// It cannot be in a "..._test.go" file in order for runtime_cache_test.go to have cross-package visibility to it. // It cannot be in a "..._test.go" file in order for runtime_cache_test.go to have cross-package visibility to it.
@ -28,7 +30,7 @@ type TestRuntimeCache struct {
func (r *TestRuntimeCache) UpdateCacheWithLock() error { func (r *TestRuntimeCache) UpdateCacheWithLock() error {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
return r.updateCache() return r.updateCache(context.Background())
} }
// GetCachedPods returns the cached pods. // GetCachedPods returns the cached pods.

View File

@ -17,6 +17,7 @@ limitations under the License.
package container_test package container_test
import ( import (
"context"
"reflect" "reflect"
"testing" "testing"
"time" "time"
@ -37,11 +38,12 @@ func comparePods(t *testing.T, expected []*ctest.FakePod, actual []*Pod) {
} }
func TestGetPods(t *testing.T) { func TestGetPods(t *testing.T) {
ctx := context.Background()
runtime := &ctest.FakeRuntime{} runtime := &ctest.FakeRuntime{}
expected := []*ctest.FakePod{{Pod: &Pod{ID: "1111"}}, {Pod: &Pod{ID: "2222"}}, {Pod: &Pod{ID: "3333"}}} expected := []*ctest.FakePod{{Pod: &Pod{ID: "1111"}}, {Pod: &Pod{ID: "2222"}}, {Pod: &Pod{ID: "3333"}}}
runtime.PodList = expected runtime.PodList = expected
cache := NewTestRuntimeCache(runtime) cache := NewTestRuntimeCache(runtime)
actual, err := cache.GetPods() actual, err := cache.GetPods(ctx)
if err != nil { if err != nil {
t.Errorf("unexpected error %v", err) t.Errorf("unexpected error %v", err)
} }
@ -50,6 +52,7 @@ func TestGetPods(t *testing.T) {
} }
func TestForceUpdateIfOlder(t *testing.T) { func TestForceUpdateIfOlder(t *testing.T) {
ctx := context.Background()
runtime := &ctest.FakeRuntime{} runtime := &ctest.FakeRuntime{}
cache := NewTestRuntimeCache(runtime) cache := NewTestRuntimeCache(runtime)
@ -63,12 +66,12 @@ func TestForceUpdateIfOlder(t *testing.T) {
runtime.PodList = newpods runtime.PodList = newpods
// An older timestamp should not force an update. // An older timestamp should not force an update.
cache.ForceUpdateIfOlder(time.Now().Add(-20 * time.Minute)) cache.ForceUpdateIfOlder(ctx, time.Now().Add(-20*time.Minute))
actual := cache.GetCachedPods() actual := cache.GetCachedPods()
comparePods(t, oldpods, actual) comparePods(t, oldpods, actual)
// A newer timestamp should force an update. // A newer timestamp should force an update.
cache.ForceUpdateIfOlder(time.Now().Add(20 * time.Second)) cache.ForceUpdateIfOlder(ctx, time.Now().Add(20*time.Second))
actual = cache.GetCachedPods() actual = cache.GetCachedPods()
comparePods(t, newpods, actual) comparePods(t, newpods, actual)
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package testing package testing
import ( import (
"context"
"time" "time"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
@ -32,7 +33,7 @@ func NewFakeCache(runtime container.Runtime) container.Cache {
} }
func (c *fakeCache) Get(id types.UID) (*container.PodStatus, error) { func (c *fakeCache) Get(id types.UID) (*container.PodStatus, error) {
return c.runtime.GetPodStatus(id, "", "") return c.runtime.GetPodStatus(context.Background(), id, "", "")
} }
func (c *fakeCache) GetNewerThan(id types.UID, minTime time.Time) (*container.PodStatus, error) { func (c *fakeCache) GetNewerThan(id types.UID, minTime time.Time) (*container.PodStatus, error) {

View File

@ -91,7 +91,7 @@ func (fv *FakeVersion) Compare(other string) (int, error) {
} }
type podsGetter interface { type podsGetter interface {
GetPods(bool) ([]*kubecontainer.Pod, error) GetPods(context.Context, bool) ([]*kubecontainer.Pod, error)
} }
type FakeRuntimeCache struct { type FakeRuntimeCache struct {
@ -102,11 +102,11 @@ func NewFakeRuntimeCache(getter podsGetter) kubecontainer.RuntimeCache {
return &FakeRuntimeCache{getter} return &FakeRuntimeCache{getter}
} }
func (f *FakeRuntimeCache) GetPods() ([]*kubecontainer.Pod, error) { func (f *FakeRuntimeCache) GetPods(ctx context.Context) ([]*kubecontainer.Pod, error) {
return f.getter.GetPods(false) return f.getter.GetPods(ctx, false)
} }
func (f *FakeRuntimeCache) ForceUpdateIfOlder(time.Time) error { func (f *FakeRuntimeCache) ForceUpdateIfOlder(context.Context, time.Time) error {
return nil return nil
} }
@ -132,7 +132,7 @@ func (f *FakeRuntime) ClearCalls() {
} }
// UpdatePodCIDR fulfills the cri interface. // UpdatePodCIDR fulfills the cri interface.
func (f *FakeRuntime) UpdatePodCIDR(c string) error { func (f *FakeRuntime) UpdatePodCIDR(_ context.Context, c string) error {
return nil return nil
} }
@ -179,7 +179,7 @@ func (f *FakeRuntime) Type() string {
return f.RuntimeType return f.RuntimeType
} }
func (f *FakeRuntime) Version() (kubecontainer.Version, error) { func (f *FakeRuntime) Version(_ context.Context) (kubecontainer.Version, error) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -195,7 +195,7 @@ func (f *FakeRuntime) APIVersion() (kubecontainer.Version, error) {
return &FakeVersion{Version: f.APIVersionInfo}, f.Err return &FakeVersion{Version: f.APIVersionInfo}, f.Err
} }
func (f *FakeRuntime) Status() (*kubecontainer.RuntimeStatus, error) { func (f *FakeRuntime) Status(_ context.Context) (*kubecontainer.RuntimeStatus, error) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -203,7 +203,7 @@ func (f *FakeRuntime) Status() (*kubecontainer.RuntimeStatus, error) {
return f.RuntimeStatus, f.StatusErr return f.RuntimeStatus, f.StatusErr
} }
func (f *FakeRuntime) GetPods(all bool) ([]*kubecontainer.Pod, error) { func (f *FakeRuntime) GetPods(_ context.Context, all bool) ([]*kubecontainer.Pod, error) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -222,7 +222,7 @@ func (f *FakeRuntime) GetPods(all bool) ([]*kubecontainer.Pod, error) {
return pods, f.Err return pods, f.Err
} }
func (f *FakeRuntime) SyncPod(pod *v1.Pod, _ *kubecontainer.PodStatus, _ []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { func (f *FakeRuntime) SyncPod(_ context.Context, pod *v1.Pod, _ *kubecontainer.PodStatus, _ []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -238,7 +238,7 @@ func (f *FakeRuntime) SyncPod(pod *v1.Pod, _ *kubecontainer.PodStatus, _ []v1.Se
return return
} }
func (f *FakeRuntime) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { func (f *FakeRuntime) KillPod(_ context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -276,7 +276,7 @@ func (f *FakeRuntime) KillContainerInPod(container v1.Container, pod *v1.Pod) er
return f.Err return f.Err
} }
func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) { func (f *FakeRuntime) GetPodStatus(_ context.Context, uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -293,7 +293,7 @@ func (f *FakeRuntime) GetContainerLogs(_ context.Context, pod *v1.Pod, container
return f.Err return f.Err
} }
func (f *FakeRuntime) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { func (f *FakeRuntime) PullImage(_ context.Context, image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -308,7 +308,7 @@ func (f *FakeRuntime) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.
return image.Image, f.Err return image.Image, f.Err
} }
func (f *FakeRuntime) GetImageRef(image kubecontainer.ImageSpec) (string, error) { func (f *FakeRuntime) GetImageRef(_ context.Context, image kubecontainer.ImageSpec) (string, error) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -321,7 +321,7 @@ func (f *FakeRuntime) GetImageRef(image kubecontainer.ImageSpec) (string, error)
return "", f.InspectErr return "", f.InspectErr
} }
func (f *FakeRuntime) ListImages() ([]kubecontainer.Image, error) { func (f *FakeRuntime) ListImages(_ context.Context) ([]kubecontainer.Image, error) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -329,7 +329,7 @@ func (f *FakeRuntime) ListImages() ([]kubecontainer.Image, error) {
return f.ImageList, f.Err return f.ImageList, f.Err
} }
func (f *FakeRuntime) RemoveImage(image kubecontainer.ImageSpec) error { func (f *FakeRuntime) RemoveImage(_ context.Context, image kubecontainer.ImageSpec) error {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -346,7 +346,7 @@ func (f *FakeRuntime) RemoveImage(image kubecontainer.ImageSpec) error {
return f.Err return f.Err
} }
func (f *FakeRuntime) GarbageCollect(gcPolicy kubecontainer.GCPolicy, ready bool, evictNonDeletedPods bool) error { func (f *FakeRuntime) GarbageCollect(_ context.Context, gcPolicy kubecontainer.GCPolicy, ready bool, evictNonDeletedPods bool) error {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -354,7 +354,7 @@ func (f *FakeRuntime) GarbageCollect(gcPolicy kubecontainer.GCPolicy, ready bool
return f.Err return f.Err
} }
func (f *FakeRuntime) DeleteContainer(containerID kubecontainer.ContainerID) error { func (f *FakeRuntime) DeleteContainer(_ context.Context, containerID kubecontainer.ContainerID) error {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -362,7 +362,7 @@ func (f *FakeRuntime) DeleteContainer(containerID kubecontainer.ContainerID) err
return f.Err return f.Err
} }
func (f *FakeRuntime) CheckpointContainer(options *runtimeapi.CheckpointContainerRequest) error { func (f *FakeRuntime) CheckpointContainer(_ context.Context, options *runtimeapi.CheckpointContainerRequest) error {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -370,7 +370,7 @@ func (f *FakeRuntime) CheckpointContainer(options *runtimeapi.CheckpointContaine
return f.Err return f.Err
} }
func (f *FakeRuntime) ImageStats() (*kubecontainer.ImageStats, error) { func (f *FakeRuntime) ImageStats(_ context.Context) (*kubecontainer.ImageStats, error) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -378,7 +378,7 @@ func (f *FakeRuntime) ImageStats() (*kubecontainer.ImageStats, error) {
return nil, f.Err return nil, f.Err
} }
func (f *FakeStreamingRuntime) GetExec(id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) { func (f *FakeStreamingRuntime) GetExec(_ context.Context, id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -386,7 +386,7 @@ func (f *FakeStreamingRuntime) GetExec(id kubecontainer.ContainerID, cmd []strin
return &url.URL{Host: FakeHost}, f.Err return &url.URL{Host: FakeHost}, f.Err
} }
func (f *FakeStreamingRuntime) GetAttach(id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) { func (f *FakeStreamingRuntime) GetAttach(_ context.Context, id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -394,7 +394,7 @@ func (f *FakeStreamingRuntime) GetAttach(id kubecontainer.ContainerID, stdin, st
return &url.URL{Host: FakeHost}, f.Err return &url.URL{Host: FakeHost}, f.Err
} }
func (f *FakeStreamingRuntime) GetPortForward(podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) { func (f *FakeStreamingRuntime) GetPortForward(_ context.Context, podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -414,7 +414,7 @@ type FakeContainerCommandRunner struct {
var _ kubecontainer.CommandRunner = &FakeContainerCommandRunner{} var _ kubecontainer.CommandRunner = &FakeContainerCommandRunner{}
func (f *FakeContainerCommandRunner) RunInContainer(containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { func (f *FakeContainerCommandRunner) RunInContainer(_ context.Context, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
// record invoked values // record invoked values
f.ContainerID = containerID f.ContainerID = containerID
f.Cmd = cmd f.Cmd = cmd

View File

@ -17,7 +17,9 @@ limitations under the License.
package testing package testing
import ( import (
"k8s.io/api/core/v1" "context"
v1 "k8s.io/api/core/v1"
kubetypes "k8s.io/apimachinery/pkg/types" kubetypes "k8s.io/apimachinery/pkg/types"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -34,7 +36,7 @@ type FakeRuntimeHelper struct {
Err error Err error
} }
func (f *FakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) { func (f *FakeRuntimeHelper) GenerateRunContainerOptions(_ context.Context, pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) {
var opts kubecontainer.RunContainerOptions var opts kubecontainer.RunContainerOptions
if len(container.TerminationMessagePath) != 0 { if len(container.TerminationMessagePath) != 0 {
opts.PodContainerDir = f.PodContainerDir opts.PodContainerDir = f.PodContainerDir

View File

@ -21,6 +21,7 @@ limitations under the License.
package testing package testing
import ( import (
context "context"
reflect "reflect" reflect "reflect"
time "time" time "time"
@ -52,32 +53,32 @@ func (m *MockRuntimeCache) EXPECT() *MockRuntimeCacheMockRecorder {
} }
// ForceUpdateIfOlder mocks base method. // ForceUpdateIfOlder mocks base method.
func (m *MockRuntimeCache) ForceUpdateIfOlder(arg0 time.Time) error { func (m *MockRuntimeCache) ForceUpdateIfOlder(arg0 context.Context, arg1 time.Time) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ForceUpdateIfOlder", arg0) ret := m.ctrl.Call(m, "ForceUpdateIfOlder", arg0, arg1)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
return ret0 return ret0
} }
// ForceUpdateIfOlder indicates an expected call of ForceUpdateIfOlder. // ForceUpdateIfOlder indicates an expected call of ForceUpdateIfOlder.
func (mr *MockRuntimeCacheMockRecorder) ForceUpdateIfOlder(arg0 interface{}) *gomock.Call { func (mr *MockRuntimeCacheMockRecorder) ForceUpdateIfOlder(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceUpdateIfOlder", reflect.TypeOf((*MockRuntimeCache)(nil).ForceUpdateIfOlder), arg0) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceUpdateIfOlder", reflect.TypeOf((*MockRuntimeCache)(nil).ForceUpdateIfOlder), arg0, arg1)
} }
// GetPods mocks base method. // GetPods mocks base method.
func (m *MockRuntimeCache) GetPods() ([]*container.Pod, error) { func (m *MockRuntimeCache) GetPods(arg0 context.Context) ([]*container.Pod, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPods") ret := m.ctrl.Call(m, "GetPods", arg0)
ret0, _ := ret[0].([]*container.Pod) ret0, _ := ret[0].([]*container.Pod)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// GetPods indicates an expected call of GetPods. // GetPods indicates an expected call of GetPods.
func (mr *MockRuntimeCacheMockRecorder) GetPods() *gomock.Call { func (mr *MockRuntimeCacheMockRecorder) GetPods(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockRuntimeCache)(nil).GetPods)) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockRuntimeCache)(nil).GetPods), arg0)
} }
// MockpodsGetter is a mock of podsGetter interface. // MockpodsGetter is a mock of podsGetter interface.
@ -104,16 +105,16 @@ func (m *MockpodsGetter) EXPECT() *MockpodsGetterMockRecorder {
} }
// GetPods mocks base method. // GetPods mocks base method.
func (m *MockpodsGetter) GetPods(arg0 bool) ([]*container.Pod, error) { func (m *MockpodsGetter) GetPods(arg0 context.Context, arg1 bool) ([]*container.Pod, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPods", arg0) ret := m.ctrl.Call(m, "GetPods", arg0, arg1)
ret0, _ := ret[0].([]*container.Pod) ret0, _ := ret[0].([]*container.Pod)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// GetPods indicates an expected call of GetPods. // GetPods indicates an expected call of GetPods.
func (mr *MockpodsGetterMockRecorder) GetPods(arg0 interface{}) *gomock.Call { func (mr *MockpodsGetterMockRecorder) GetPods(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockpodsGetter)(nil).GetPods), arg0) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockpodsGetter)(nil).GetPods), arg0, arg1)
} }

View File

@ -127,45 +127,45 @@ func (mr *MockRuntimeMockRecorder) APIVersion() *gomock.Call {
} }
// CheckpointContainer mocks base method. // CheckpointContainer mocks base method.
func (m *MockRuntime) CheckpointContainer(options *v10.CheckpointContainerRequest) error { func (m *MockRuntime) CheckpointContainer(ctx context.Context, options *v10.CheckpointContainerRequest) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CheckpointContainer", options) ret := m.ctrl.Call(m, "CheckpointContainer", ctx, options)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
return ret0 return ret0
} }
// CheckpointContainer indicates an expected call of CheckpointContainer. // CheckpointContainer indicates an expected call of CheckpointContainer.
func (mr *MockRuntimeMockRecorder) CheckpointContainer(options interface{}) *gomock.Call { func (mr *MockRuntimeMockRecorder) CheckpointContainer(ctx, options interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckpointContainer", reflect.TypeOf((*MockRuntime)(nil).CheckpointContainer), options) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckpointContainer", reflect.TypeOf((*MockRuntime)(nil).CheckpointContainer), ctx, options)
} }
// DeleteContainer mocks base method. // DeleteContainer mocks base method.
func (m *MockRuntime) DeleteContainer(containerID container.ContainerID) error { func (m *MockRuntime) DeleteContainer(ctx context.Context, containerID container.ContainerID) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteContainer", containerID) ret := m.ctrl.Call(m, "DeleteContainer", ctx, containerID)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
return ret0 return ret0
} }
// DeleteContainer indicates an expected call of DeleteContainer. // DeleteContainer indicates an expected call of DeleteContainer.
func (mr *MockRuntimeMockRecorder) DeleteContainer(containerID interface{}) *gomock.Call { func (mr *MockRuntimeMockRecorder) DeleteContainer(ctx, containerID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteContainer", reflect.TypeOf((*MockRuntime)(nil).DeleteContainer), containerID) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteContainer", reflect.TypeOf((*MockRuntime)(nil).DeleteContainer), ctx, containerID)
} }
// GarbageCollect mocks base method. // GarbageCollect mocks base method.
func (m *MockRuntime) GarbageCollect(gcPolicy container.GCPolicy, allSourcesReady, evictNonDeletedPods bool) error { func (m *MockRuntime) GarbageCollect(ctx context.Context, gcPolicy container.GCPolicy, allSourcesReady, evictNonDeletedPods bool) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GarbageCollect", gcPolicy, allSourcesReady, evictNonDeletedPods) ret := m.ctrl.Call(m, "GarbageCollect", ctx, gcPolicy, allSourcesReady, evictNonDeletedPods)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
return ret0 return ret0
} }
// GarbageCollect indicates an expected call of GarbageCollect. // GarbageCollect indicates an expected call of GarbageCollect.
func (mr *MockRuntimeMockRecorder) GarbageCollect(gcPolicy, allSourcesReady, evictNonDeletedPods interface{}) *gomock.Call { func (mr *MockRuntimeMockRecorder) GarbageCollect(ctx, gcPolicy, allSourcesReady, evictNonDeletedPods interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GarbageCollect", reflect.TypeOf((*MockRuntime)(nil).GarbageCollect), gcPolicy, allSourcesReady, evictNonDeletedPods) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GarbageCollect", reflect.TypeOf((*MockRuntime)(nil).GarbageCollect), ctx, gcPolicy, allSourcesReady, evictNonDeletedPods)
} }
// GetContainerLogs mocks base method. // GetContainerLogs mocks base method.
@ -183,150 +183,150 @@ func (mr *MockRuntimeMockRecorder) GetContainerLogs(ctx, pod, containerID, logOp
} }
// GetImageRef mocks base method. // GetImageRef mocks base method.
func (m *MockRuntime) GetImageRef(image container.ImageSpec) (string, error) { func (m *MockRuntime) GetImageRef(ctx context.Context, image container.ImageSpec) (string, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetImageRef", image) ret := m.ctrl.Call(m, "GetImageRef", ctx, image)
ret0, _ := ret[0].(string) ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// GetImageRef indicates an expected call of GetImageRef. // GetImageRef indicates an expected call of GetImageRef.
func (mr *MockRuntimeMockRecorder) GetImageRef(image interface{}) *gomock.Call { func (mr *MockRuntimeMockRecorder) GetImageRef(ctx, image interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageRef", reflect.TypeOf((*MockRuntime)(nil).GetImageRef), image) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageRef", reflect.TypeOf((*MockRuntime)(nil).GetImageRef), ctx, image)
} }
// GetPodStatus mocks base method. // GetPodStatus mocks base method.
func (m *MockRuntime) GetPodStatus(uid types.UID, name, namespace string) (*container.PodStatus, error) { func (m *MockRuntime) GetPodStatus(ctx context.Context, uid types.UID, name, namespace string) (*container.PodStatus, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPodStatus", uid, name, namespace) ret := m.ctrl.Call(m, "GetPodStatus", ctx, uid, name, namespace)
ret0, _ := ret[0].(*container.PodStatus) ret0, _ := ret[0].(*container.PodStatus)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// GetPodStatus indicates an expected call of GetPodStatus. // GetPodStatus indicates an expected call of GetPodStatus.
func (mr *MockRuntimeMockRecorder) GetPodStatus(uid, name, namespace interface{}) *gomock.Call { func (mr *MockRuntimeMockRecorder) GetPodStatus(ctx, uid, name, namespace interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodStatus", reflect.TypeOf((*MockRuntime)(nil).GetPodStatus), uid, name, namespace) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodStatus", reflect.TypeOf((*MockRuntime)(nil).GetPodStatus), ctx, uid, name, namespace)
} }
// GetPods mocks base method. // GetPods mocks base method.
func (m *MockRuntime) GetPods(all bool) ([]*container.Pod, error) { func (m *MockRuntime) GetPods(ctx context.Context, all bool) ([]*container.Pod, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPods", all) ret := m.ctrl.Call(m, "GetPods", ctx, all)
ret0, _ := ret[0].([]*container.Pod) ret0, _ := ret[0].([]*container.Pod)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// GetPods indicates an expected call of GetPods. // GetPods indicates an expected call of GetPods.
func (mr *MockRuntimeMockRecorder) GetPods(all interface{}) *gomock.Call { func (mr *MockRuntimeMockRecorder) GetPods(ctx, all interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockRuntime)(nil).GetPods), all) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockRuntime)(nil).GetPods), ctx, all)
} }
// ImageStats mocks base method. // ImageStats mocks base method.
func (m *MockRuntime) ImageStats() (*container.ImageStats, error) { func (m *MockRuntime) ImageStats(ctx context.Context) (*container.ImageStats, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImageStats") ret := m.ctrl.Call(m, "ImageStats", ctx)
ret0, _ := ret[0].(*container.ImageStats) ret0, _ := ret[0].(*container.ImageStats)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// ImageStats indicates an expected call of ImageStats. // ImageStats indicates an expected call of ImageStats.
func (mr *MockRuntimeMockRecorder) ImageStats() *gomock.Call { func (mr *MockRuntimeMockRecorder) ImageStats(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageStats", reflect.TypeOf((*MockRuntime)(nil).ImageStats)) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageStats", reflect.TypeOf((*MockRuntime)(nil).ImageStats), ctx)
} }
// KillPod mocks base method. // KillPod mocks base method.
func (m *MockRuntime) KillPod(pod *v1.Pod, runningPod container.Pod, gracePeriodOverride *int64) error { func (m *MockRuntime) KillPod(ctx context.Context, pod *v1.Pod, runningPod container.Pod, gracePeriodOverride *int64) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "KillPod", pod, runningPod, gracePeriodOverride) ret := m.ctrl.Call(m, "KillPod", ctx, pod, runningPod, gracePeriodOverride)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
return ret0 return ret0
} }
// KillPod indicates an expected call of KillPod. // KillPod indicates an expected call of KillPod.
func (mr *MockRuntimeMockRecorder) KillPod(pod, runningPod, gracePeriodOverride interface{}) *gomock.Call { func (mr *MockRuntimeMockRecorder) KillPod(ctx, pod, runningPod, gracePeriodOverride interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KillPod", reflect.TypeOf((*MockRuntime)(nil).KillPod), pod, runningPod, gracePeriodOverride) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KillPod", reflect.TypeOf((*MockRuntime)(nil).KillPod), ctx, pod, runningPod, gracePeriodOverride)
} }
// ListImages mocks base method. // ListImages mocks base method.
func (m *MockRuntime) ListImages() ([]container.Image, error) { func (m *MockRuntime) ListImages(ctx context.Context) ([]container.Image, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListImages") ret := m.ctrl.Call(m, "ListImages", ctx)
ret0, _ := ret[0].([]container.Image) ret0, _ := ret[0].([]container.Image)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// ListImages indicates an expected call of ListImages. // ListImages indicates an expected call of ListImages.
func (mr *MockRuntimeMockRecorder) ListImages() *gomock.Call { func (mr *MockRuntimeMockRecorder) ListImages(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImages", reflect.TypeOf((*MockRuntime)(nil).ListImages)) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImages", reflect.TypeOf((*MockRuntime)(nil).ListImages), ctx)
} }
// PullImage mocks base method. // PullImage mocks base method.
func (m *MockRuntime) PullImage(image container.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *v10.PodSandboxConfig) (string, error) { func (m *MockRuntime) PullImage(ctx context.Context, image container.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *v10.PodSandboxConfig) (string, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PullImage", image, pullSecrets, podSandboxConfig) ret := m.ctrl.Call(m, "PullImage", ctx, image, pullSecrets, podSandboxConfig)
ret0, _ := ret[0].(string) ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// PullImage indicates an expected call of PullImage. // PullImage indicates an expected call of PullImage.
func (mr *MockRuntimeMockRecorder) PullImage(image, pullSecrets, podSandboxConfig interface{}) *gomock.Call { func (mr *MockRuntimeMockRecorder) PullImage(ctx, image, pullSecrets, podSandboxConfig interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockRuntime)(nil).PullImage), image, pullSecrets, podSandboxConfig) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockRuntime)(nil).PullImage), ctx, image, pullSecrets, podSandboxConfig)
} }
// RemoveImage mocks base method. // RemoveImage mocks base method.
func (m *MockRuntime) RemoveImage(image container.ImageSpec) error { func (m *MockRuntime) RemoveImage(ctx context.Context, image container.ImageSpec) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RemoveImage", image) ret := m.ctrl.Call(m, "RemoveImage", ctx, image)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
return ret0 return ret0
} }
// RemoveImage indicates an expected call of RemoveImage. // RemoveImage indicates an expected call of RemoveImage.
func (mr *MockRuntimeMockRecorder) RemoveImage(image interface{}) *gomock.Call { func (mr *MockRuntimeMockRecorder) RemoveImage(ctx, image interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveImage", reflect.TypeOf((*MockRuntime)(nil).RemoveImage), image) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveImage", reflect.TypeOf((*MockRuntime)(nil).RemoveImage), ctx, image)
} }
// Status mocks base method. // Status mocks base method.
func (m *MockRuntime) Status() (*container.RuntimeStatus, error) { func (m *MockRuntime) Status(ctx context.Context) (*container.RuntimeStatus, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Status") ret := m.ctrl.Call(m, "Status", ctx)
ret0, _ := ret[0].(*container.RuntimeStatus) ret0, _ := ret[0].(*container.RuntimeStatus)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// Status indicates an expected call of Status. // Status indicates an expected call of Status.
func (mr *MockRuntimeMockRecorder) Status() *gomock.Call { func (mr *MockRuntimeMockRecorder) Status(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockRuntime)(nil).Status)) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockRuntime)(nil).Status), ctx)
} }
// SyncPod mocks base method. // SyncPod mocks base method.
func (m *MockRuntime) SyncPod(pod *v1.Pod, podStatus *container.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) container.PodSyncResult { func (m *MockRuntime) SyncPod(ctx context.Context, pod *v1.Pod, podStatus *container.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) container.PodSyncResult {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SyncPod", pod, podStatus, pullSecrets, backOff) ret := m.ctrl.Call(m, "SyncPod", ctx, pod, podStatus, pullSecrets, backOff)
ret0, _ := ret[0].(container.PodSyncResult) ret0, _ := ret[0].(container.PodSyncResult)
return ret0 return ret0
} }
// SyncPod indicates an expected call of SyncPod. // SyncPod indicates an expected call of SyncPod.
func (mr *MockRuntimeMockRecorder) SyncPod(pod, podStatus, pullSecrets, backOff interface{}) *gomock.Call { func (mr *MockRuntimeMockRecorder) SyncPod(ctx, pod, podStatus, pullSecrets, backOff interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPod", reflect.TypeOf((*MockRuntime)(nil).SyncPod), pod, podStatus, pullSecrets, backOff) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPod", reflect.TypeOf((*MockRuntime)(nil).SyncPod), ctx, pod, podStatus, pullSecrets, backOff)
} }
// Type mocks base method. // Type mocks base method.
@ -344,32 +344,32 @@ func (mr *MockRuntimeMockRecorder) Type() *gomock.Call {
} }
// UpdatePodCIDR mocks base method. // UpdatePodCIDR mocks base method.
func (m *MockRuntime) UpdatePodCIDR(podCIDR string) error { func (m *MockRuntime) UpdatePodCIDR(ctx context.Context, podCIDR string) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdatePodCIDR", podCIDR) ret := m.ctrl.Call(m, "UpdatePodCIDR", ctx, podCIDR)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
return ret0 return ret0
} }
// UpdatePodCIDR indicates an expected call of UpdatePodCIDR. // UpdatePodCIDR indicates an expected call of UpdatePodCIDR.
func (mr *MockRuntimeMockRecorder) UpdatePodCIDR(podCIDR interface{}) *gomock.Call { func (mr *MockRuntimeMockRecorder) UpdatePodCIDR(ctx, podCIDR interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePodCIDR", reflect.TypeOf((*MockRuntime)(nil).UpdatePodCIDR), podCIDR) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePodCIDR", reflect.TypeOf((*MockRuntime)(nil).UpdatePodCIDR), ctx, podCIDR)
} }
// Version mocks base method. // Version mocks base method.
func (m *MockRuntime) Version() (container.Version, error) { func (m *MockRuntime) Version(ctx context.Context) (container.Version, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Version") ret := m.ctrl.Call(m, "Version", ctx)
ret0, _ := ret[0].(container.Version) ret0, _ := ret[0].(container.Version)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// Version indicates an expected call of Version. // Version indicates an expected call of Version.
func (mr *MockRuntimeMockRecorder) Version() *gomock.Call { func (mr *MockRuntimeMockRecorder) Version(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockRuntime)(nil).Version)) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockRuntime)(nil).Version), ctx)
} }
// MockStreamingRuntime is a mock of StreamingRuntime interface. // MockStreamingRuntime is a mock of StreamingRuntime interface.
@ -396,48 +396,48 @@ func (m *MockStreamingRuntime) EXPECT() *MockStreamingRuntimeMockRecorder {
} }
// GetAttach mocks base method. // GetAttach mocks base method.
func (m *MockStreamingRuntime) GetAttach(id container.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) { func (m *MockStreamingRuntime) GetAttach(ctx context.Context, id container.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetAttach", id, stdin, stdout, stderr, tty) ret := m.ctrl.Call(m, "GetAttach", ctx, id, stdin, stdout, stderr, tty)
ret0, _ := ret[0].(*url.URL) ret0, _ := ret[0].(*url.URL)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// GetAttach indicates an expected call of GetAttach. // GetAttach indicates an expected call of GetAttach.
func (mr *MockStreamingRuntimeMockRecorder) GetAttach(id, stdin, stdout, stderr, tty interface{}) *gomock.Call { func (mr *MockStreamingRuntimeMockRecorder) GetAttach(ctx, id, stdin, stdout, stderr, tty interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAttach", reflect.TypeOf((*MockStreamingRuntime)(nil).GetAttach), id, stdin, stdout, stderr, tty) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAttach", reflect.TypeOf((*MockStreamingRuntime)(nil).GetAttach), ctx, id, stdin, stdout, stderr, tty)
} }
// GetExec mocks base method. // GetExec mocks base method.
func (m *MockStreamingRuntime) GetExec(id container.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) { func (m *MockStreamingRuntime) GetExec(ctx context.Context, id container.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetExec", id, cmd, stdin, stdout, stderr, tty) ret := m.ctrl.Call(m, "GetExec", ctx, id, cmd, stdin, stdout, stderr, tty)
ret0, _ := ret[0].(*url.URL) ret0, _ := ret[0].(*url.URL)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// GetExec indicates an expected call of GetExec. // GetExec indicates an expected call of GetExec.
func (mr *MockStreamingRuntimeMockRecorder) GetExec(id, cmd, stdin, stdout, stderr, tty interface{}) *gomock.Call { func (mr *MockStreamingRuntimeMockRecorder) GetExec(ctx, id, cmd, stdin, stdout, stderr, tty interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExec", reflect.TypeOf((*MockStreamingRuntime)(nil).GetExec), id, cmd, stdin, stdout, stderr, tty) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExec", reflect.TypeOf((*MockStreamingRuntime)(nil).GetExec), ctx, id, cmd, stdin, stdout, stderr, tty)
} }
// GetPortForward mocks base method. // GetPortForward mocks base method.
func (m *MockStreamingRuntime) GetPortForward(podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) { func (m *MockStreamingRuntime) GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPortForward", podName, podNamespace, podUID, ports) ret := m.ctrl.Call(m, "GetPortForward", ctx, podName, podNamespace, podUID, ports)
ret0, _ := ret[0].(*url.URL) ret0, _ := ret[0].(*url.URL)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// GetPortForward indicates an expected call of GetPortForward. // GetPortForward indicates an expected call of GetPortForward.
func (mr *MockStreamingRuntimeMockRecorder) GetPortForward(podName, podNamespace, podUID, ports interface{}) *gomock.Call { func (mr *MockStreamingRuntimeMockRecorder) GetPortForward(ctx, podName, podNamespace, podUID, ports interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPortForward", reflect.TypeOf((*MockStreamingRuntime)(nil).GetPortForward), podName, podNamespace, podUID, ports) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPortForward", reflect.TypeOf((*MockStreamingRuntime)(nil).GetPortForward), ctx, podName, podNamespace, podUID, ports)
} }
// MockImageService is a mock of ImageService interface. // MockImageService is a mock of ImageService interface.
@ -464,77 +464,77 @@ func (m *MockImageService) EXPECT() *MockImageServiceMockRecorder {
} }
// GetImageRef mocks base method. // GetImageRef mocks base method.
func (m *MockImageService) GetImageRef(image container.ImageSpec) (string, error) { func (m *MockImageService) GetImageRef(ctx context.Context, image container.ImageSpec) (string, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetImageRef", image) ret := m.ctrl.Call(m, "GetImageRef", ctx, image)
ret0, _ := ret[0].(string) ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// GetImageRef indicates an expected call of GetImageRef. // GetImageRef indicates an expected call of GetImageRef.
func (mr *MockImageServiceMockRecorder) GetImageRef(image interface{}) *gomock.Call { func (mr *MockImageServiceMockRecorder) GetImageRef(ctx, image interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageRef", reflect.TypeOf((*MockImageService)(nil).GetImageRef), image) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageRef", reflect.TypeOf((*MockImageService)(nil).GetImageRef), ctx, image)
} }
// ImageStats mocks base method. // ImageStats mocks base method.
func (m *MockImageService) ImageStats() (*container.ImageStats, error) { func (m *MockImageService) ImageStats(ctx context.Context) (*container.ImageStats, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImageStats") ret := m.ctrl.Call(m, "ImageStats", ctx)
ret0, _ := ret[0].(*container.ImageStats) ret0, _ := ret[0].(*container.ImageStats)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// ImageStats indicates an expected call of ImageStats. // ImageStats indicates an expected call of ImageStats.
func (mr *MockImageServiceMockRecorder) ImageStats() *gomock.Call { func (mr *MockImageServiceMockRecorder) ImageStats(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageStats", reflect.TypeOf((*MockImageService)(nil).ImageStats)) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageStats", reflect.TypeOf((*MockImageService)(nil).ImageStats), ctx)
} }
// ListImages mocks base method. // ListImages mocks base method.
func (m *MockImageService) ListImages() ([]container.Image, error) { func (m *MockImageService) ListImages(ctx context.Context) ([]container.Image, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListImages") ret := m.ctrl.Call(m, "ListImages", ctx)
ret0, _ := ret[0].([]container.Image) ret0, _ := ret[0].([]container.Image)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// ListImages indicates an expected call of ListImages. // ListImages indicates an expected call of ListImages.
func (mr *MockImageServiceMockRecorder) ListImages() *gomock.Call { func (mr *MockImageServiceMockRecorder) ListImages(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImages", reflect.TypeOf((*MockImageService)(nil).ListImages)) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImages", reflect.TypeOf((*MockImageService)(nil).ListImages), ctx)
} }
// PullImage mocks base method. // PullImage mocks base method.
func (m *MockImageService) PullImage(image container.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *v10.PodSandboxConfig) (string, error) { func (m *MockImageService) PullImage(ctx context.Context, image container.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *v10.PodSandboxConfig) (string, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PullImage", image, pullSecrets, podSandboxConfig) ret := m.ctrl.Call(m, "PullImage", ctx, image, pullSecrets, podSandboxConfig)
ret0, _ := ret[0].(string) ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// PullImage indicates an expected call of PullImage. // PullImage indicates an expected call of PullImage.
func (mr *MockImageServiceMockRecorder) PullImage(image, pullSecrets, podSandboxConfig interface{}) *gomock.Call { func (mr *MockImageServiceMockRecorder) PullImage(ctx, image, pullSecrets, podSandboxConfig interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockImageService)(nil).PullImage), image, pullSecrets, podSandboxConfig) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockImageService)(nil).PullImage), ctx, image, pullSecrets, podSandboxConfig)
} }
// RemoveImage mocks base method. // RemoveImage mocks base method.
func (m *MockImageService) RemoveImage(image container.ImageSpec) error { func (m *MockImageService) RemoveImage(ctx context.Context, image container.ImageSpec) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RemoveImage", image) ret := m.ctrl.Call(m, "RemoveImage", ctx, image)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
return ret0 return ret0
} }
// RemoveImage indicates an expected call of RemoveImage. // RemoveImage indicates an expected call of RemoveImage.
func (mr *MockImageServiceMockRecorder) RemoveImage(image interface{}) *gomock.Call { func (mr *MockImageServiceMockRecorder) RemoveImage(ctx, image interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveImage", reflect.TypeOf((*MockImageService)(nil).RemoveImage), image) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveImage", reflect.TypeOf((*MockImageService)(nil).RemoveImage), ctx, image)
} }
// MockAttacher is a mock of Attacher interface. // MockAttacher is a mock of Attacher interface.
@ -561,17 +561,17 @@ func (m *MockAttacher) EXPECT() *MockAttacherMockRecorder {
} }
// AttachContainer mocks base method. // AttachContainer mocks base method.
func (m *MockAttacher) AttachContainer(id container.ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { func (m *MockAttacher) AttachContainer(ctx context.Context, id container.ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AttachContainer", id, stdin, stdout, stderr, tty, resize) ret := m.ctrl.Call(m, "AttachContainer", ctx, id, stdin, stdout, stderr, tty, resize)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
return ret0 return ret0
} }
// AttachContainer indicates an expected call of AttachContainer. // AttachContainer indicates an expected call of AttachContainer.
func (mr *MockAttacherMockRecorder) AttachContainer(id, stdin, stdout, stderr, tty, resize interface{}) *gomock.Call { func (mr *MockAttacherMockRecorder) AttachContainer(ctx, id, stdin, stdout, stderr, tty, resize interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachContainer", reflect.TypeOf((*MockAttacher)(nil).AttachContainer), id, stdin, stdout, stderr, tty, resize) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachContainer", reflect.TypeOf((*MockAttacher)(nil).AttachContainer), ctx, id, stdin, stdout, stderr, tty, resize)
} }
// MockCommandRunner is a mock of CommandRunner interface. // MockCommandRunner is a mock of CommandRunner interface.
@ -598,16 +598,16 @@ func (m *MockCommandRunner) EXPECT() *MockCommandRunnerMockRecorder {
} }
// RunInContainer mocks base method. // RunInContainer mocks base method.
func (m *MockCommandRunner) RunInContainer(id container.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { func (m *MockCommandRunner) RunInContainer(ctx context.Context, id container.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RunInContainer", id, cmd, timeout) ret := m.ctrl.Call(m, "RunInContainer", ctx, id, cmd, timeout)
ret0, _ := ret[0].([]byte) ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// RunInContainer indicates an expected call of RunInContainer. // RunInContainer indicates an expected call of RunInContainer.
func (mr *MockCommandRunnerMockRecorder) RunInContainer(id, cmd, timeout interface{}) *gomock.Call { func (mr *MockCommandRunnerMockRecorder) RunInContainer(ctx, id, cmd, timeout interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunInContainer", reflect.TypeOf((*MockCommandRunner)(nil).RunInContainer), id, cmd, timeout) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunInContainer", reflect.TypeOf((*MockCommandRunner)(nil).RunInContainer), ctx, id, cmd, timeout)
} }

View File

@ -24,7 +24,7 @@ import (
// ListImages lists existing images. // ListImages lists existing images.
func (f *RemoteRuntime) ListImages(ctx context.Context, req *kubeapi.ListImagesRequest) (*kubeapi.ListImagesResponse, error) { func (f *RemoteRuntime) ListImages(ctx context.Context, req *kubeapi.ListImagesRequest) (*kubeapi.ListImagesResponse, error) {
images, err := f.ImageService.ListImages(req.Filter) images, err := f.ImageService.ListImages(ctx, req.Filter)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -38,7 +38,7 @@ func (f *RemoteRuntime) ListImages(ctx context.Context, req *kubeapi.ListImagesR
// present, returns a response with ImageStatusResponse.Image set to // present, returns a response with ImageStatusResponse.Image set to
// nil. // nil.
func (f *RemoteRuntime) ImageStatus(ctx context.Context, req *kubeapi.ImageStatusRequest) (*kubeapi.ImageStatusResponse, error) { func (f *RemoteRuntime) ImageStatus(ctx context.Context, req *kubeapi.ImageStatusRequest) (*kubeapi.ImageStatusResponse, error) {
resp, err := f.ImageService.ImageStatus(req.Image, false) resp, err := f.ImageService.ImageStatus(ctx, req.Image, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -48,7 +48,7 @@ func (f *RemoteRuntime) ImageStatus(ctx context.Context, req *kubeapi.ImageStatu
// PullImage pulls an image with authentication config. // PullImage pulls an image with authentication config.
func (f *RemoteRuntime) PullImage(ctx context.Context, req *kubeapi.PullImageRequest) (*kubeapi.PullImageResponse, error) { func (f *RemoteRuntime) PullImage(ctx context.Context, req *kubeapi.PullImageRequest) (*kubeapi.PullImageResponse, error) {
image, err := f.ImageService.PullImage(req.Image, req.Auth, req.SandboxConfig) image, err := f.ImageService.PullImage(ctx, req.Image, req.Auth, req.SandboxConfig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -62,7 +62,7 @@ func (f *RemoteRuntime) PullImage(ctx context.Context, req *kubeapi.PullImageReq
// This call is idempotent, and must not return an error if the image has // This call is idempotent, and must not return an error if the image has
// already been removed. // already been removed.
func (f *RemoteRuntime) RemoveImage(ctx context.Context, req *kubeapi.RemoveImageRequest) (*kubeapi.RemoveImageResponse, error) { func (f *RemoteRuntime) RemoveImage(ctx context.Context, req *kubeapi.RemoveImageRequest) (*kubeapi.RemoveImageResponse, error) {
err := f.ImageService.RemoveImage(req.Image) err := f.ImageService.RemoveImage(ctx, req.Image)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -72,7 +72,7 @@ func (f *RemoteRuntime) RemoveImage(ctx context.Context, req *kubeapi.RemoveImag
// ImageFsInfo returns information of the filesystem that is used to store images. // ImageFsInfo returns information of the filesystem that is used to store images.
func (f *RemoteRuntime) ImageFsInfo(ctx context.Context, req *kubeapi.ImageFsInfoRequest) (*kubeapi.ImageFsInfoResponse, error) { func (f *RemoteRuntime) ImageFsInfo(ctx context.Context, req *kubeapi.ImageFsInfoRequest) (*kubeapi.ImageFsInfoResponse, error) {
fsUsage, err := f.ImageService.ImageFsInfo() fsUsage, err := f.ImageService.ImageFsInfo(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -80,13 +80,13 @@ func (f *RemoteRuntime) Stop() {
// Version returns the runtime name, runtime version, and runtime API version. // Version returns the runtime name, runtime version, and runtime API version.
func (f *RemoteRuntime) Version(ctx context.Context, req *kubeapi.VersionRequest) (*kubeapi.VersionResponse, error) { func (f *RemoteRuntime) Version(ctx context.Context, req *kubeapi.VersionRequest) (*kubeapi.VersionResponse, error) {
return f.RuntimeService.Version(req.Version) return f.RuntimeService.Version(ctx, req.Version)
} }
// RunPodSandbox creates and starts a pod-level sandbox. Runtimes must ensure // RunPodSandbox creates and starts a pod-level sandbox. Runtimes must ensure
// the sandbox is in the ready state on success. // the sandbox is in the ready state on success.
func (f *RemoteRuntime) RunPodSandbox(ctx context.Context, req *kubeapi.RunPodSandboxRequest) (*kubeapi.RunPodSandboxResponse, error) { func (f *RemoteRuntime) RunPodSandbox(ctx context.Context, req *kubeapi.RunPodSandboxRequest) (*kubeapi.RunPodSandboxResponse, error) {
sandboxID, err := f.RuntimeService.RunPodSandbox(req.Config, req.RuntimeHandler) sandboxID, err := f.RuntimeService.RunPodSandbox(ctx, req.Config, req.RuntimeHandler)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -99,7 +99,7 @@ func (f *RemoteRuntime) RunPodSandbox(ctx context.Context, req *kubeapi.RunPodSa
// If there are any running containers in the sandbox, they must be forcibly // If there are any running containers in the sandbox, they must be forcibly
// terminated. // terminated.
func (f *RemoteRuntime) StopPodSandbox(ctx context.Context, req *kubeapi.StopPodSandboxRequest) (*kubeapi.StopPodSandboxResponse, error) { func (f *RemoteRuntime) StopPodSandbox(ctx context.Context, req *kubeapi.StopPodSandboxRequest) (*kubeapi.StopPodSandboxResponse, error) {
err := f.RuntimeService.StopPodSandbox(req.PodSandboxId) err := f.RuntimeService.StopPodSandbox(ctx, req.PodSandboxId)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -112,7 +112,7 @@ func (f *RemoteRuntime) StopPodSandbox(ctx context.Context, req *kubeapi.StopPod
// This call is idempotent, and must not return an error if the sandbox has // This call is idempotent, and must not return an error if the sandbox has
// already been removed. // already been removed.
func (f *RemoteRuntime) RemovePodSandbox(ctx context.Context, req *kubeapi.RemovePodSandboxRequest) (*kubeapi.RemovePodSandboxResponse, error) { func (f *RemoteRuntime) RemovePodSandbox(ctx context.Context, req *kubeapi.RemovePodSandboxRequest) (*kubeapi.RemovePodSandboxResponse, error) {
err := f.RuntimeService.StopPodSandbox(req.PodSandboxId) err := f.RuntimeService.StopPodSandbox(ctx, req.PodSandboxId)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -123,7 +123,7 @@ func (f *RemoteRuntime) RemovePodSandbox(ctx context.Context, req *kubeapi.Remov
// PodSandboxStatus returns the status of the PodSandbox. If the PodSandbox is not // PodSandboxStatus returns the status of the PodSandbox. If the PodSandbox is not
// present, returns an error. // present, returns an error.
func (f *RemoteRuntime) PodSandboxStatus(ctx context.Context, req *kubeapi.PodSandboxStatusRequest) (*kubeapi.PodSandboxStatusResponse, error) { func (f *RemoteRuntime) PodSandboxStatus(ctx context.Context, req *kubeapi.PodSandboxStatusRequest) (*kubeapi.PodSandboxStatusResponse, error) {
resp, err := f.RuntimeService.PodSandboxStatus(req.PodSandboxId, false) resp, err := f.RuntimeService.PodSandboxStatus(ctx, req.PodSandboxId, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -133,7 +133,7 @@ func (f *RemoteRuntime) PodSandboxStatus(ctx context.Context, req *kubeapi.PodSa
// ListPodSandbox returns a list of PodSandboxes. // ListPodSandbox returns a list of PodSandboxes.
func (f *RemoteRuntime) ListPodSandbox(ctx context.Context, req *kubeapi.ListPodSandboxRequest) (*kubeapi.ListPodSandboxResponse, error) { func (f *RemoteRuntime) ListPodSandbox(ctx context.Context, req *kubeapi.ListPodSandboxRequest) (*kubeapi.ListPodSandboxResponse, error) {
items, err := f.RuntimeService.ListPodSandbox(req.Filter) items, err := f.RuntimeService.ListPodSandbox(ctx, req.Filter)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -143,7 +143,7 @@ func (f *RemoteRuntime) ListPodSandbox(ctx context.Context, req *kubeapi.ListPod
// CreateContainer creates a new container in specified PodSandbox // CreateContainer creates a new container in specified PodSandbox
func (f *RemoteRuntime) CreateContainer(ctx context.Context, req *kubeapi.CreateContainerRequest) (*kubeapi.CreateContainerResponse, error) { func (f *RemoteRuntime) CreateContainer(ctx context.Context, req *kubeapi.CreateContainerRequest) (*kubeapi.CreateContainerResponse, error) {
containerID, err := f.RuntimeService.CreateContainer(req.PodSandboxId, req.Config, req.SandboxConfig) containerID, err := f.RuntimeService.CreateContainer(ctx, req.PodSandboxId, req.Config, req.SandboxConfig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -153,7 +153,7 @@ func (f *RemoteRuntime) CreateContainer(ctx context.Context, req *kubeapi.Create
// StartContainer starts the container. // StartContainer starts the container.
func (f *RemoteRuntime) StartContainer(ctx context.Context, req *kubeapi.StartContainerRequest) (*kubeapi.StartContainerResponse, error) { func (f *RemoteRuntime) StartContainer(ctx context.Context, req *kubeapi.StartContainerRequest) (*kubeapi.StartContainerResponse, error) {
err := f.RuntimeService.StartContainer(req.ContainerId) err := f.RuntimeService.StartContainer(ctx, req.ContainerId)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -165,7 +165,7 @@ func (f *RemoteRuntime) StartContainer(ctx context.Context, req *kubeapi.StartCo
// This call is idempotent, and must not return an error if the container has // This call is idempotent, and must not return an error if the container has
// already been stopped. // already been stopped.
func (f *RemoteRuntime) StopContainer(ctx context.Context, req *kubeapi.StopContainerRequest) (*kubeapi.StopContainerResponse, error) { func (f *RemoteRuntime) StopContainer(ctx context.Context, req *kubeapi.StopContainerRequest) (*kubeapi.StopContainerResponse, error) {
err := f.RuntimeService.StopContainer(req.ContainerId, req.Timeout) err := f.RuntimeService.StopContainer(ctx, req.ContainerId, req.Timeout)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -178,7 +178,7 @@ func (f *RemoteRuntime) StopContainer(ctx context.Context, req *kubeapi.StopCont
// This call is idempotent, and must not return an error if the container has // This call is idempotent, and must not return an error if the container has
// already been removed. // already been removed.
func (f *RemoteRuntime) RemoveContainer(ctx context.Context, req *kubeapi.RemoveContainerRequest) (*kubeapi.RemoveContainerResponse, error) { func (f *RemoteRuntime) RemoveContainer(ctx context.Context, req *kubeapi.RemoveContainerRequest) (*kubeapi.RemoveContainerResponse, error) {
err := f.RuntimeService.RemoveContainer(req.ContainerId) err := f.RuntimeService.RemoveContainer(ctx, req.ContainerId)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -188,7 +188,7 @@ func (f *RemoteRuntime) RemoveContainer(ctx context.Context, req *kubeapi.Remove
// ListContainers lists all containers by filters. // ListContainers lists all containers by filters.
func (f *RemoteRuntime) ListContainers(ctx context.Context, req *kubeapi.ListContainersRequest) (*kubeapi.ListContainersResponse, error) { func (f *RemoteRuntime) ListContainers(ctx context.Context, req *kubeapi.ListContainersRequest) (*kubeapi.ListContainersResponse, error) {
items, err := f.RuntimeService.ListContainers(req.Filter) items, err := f.RuntimeService.ListContainers(ctx, req.Filter)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -199,7 +199,7 @@ func (f *RemoteRuntime) ListContainers(ctx context.Context, req *kubeapi.ListCon
// ContainerStatus returns status of the container. If the container is not // ContainerStatus returns status of the container. If the container is not
// present, returns an error. // present, returns an error.
func (f *RemoteRuntime) ContainerStatus(ctx context.Context, req *kubeapi.ContainerStatusRequest) (*kubeapi.ContainerStatusResponse, error) { func (f *RemoteRuntime) ContainerStatus(ctx context.Context, req *kubeapi.ContainerStatusRequest) (*kubeapi.ContainerStatusResponse, error) {
resp, err := f.RuntimeService.ContainerStatus(req.ContainerId, false) resp, err := f.RuntimeService.ContainerStatus(ctx, req.ContainerId, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -210,7 +210,7 @@ func (f *RemoteRuntime) ContainerStatus(ctx context.Context, req *kubeapi.Contai
// ExecSync runs a command in a container synchronously. // ExecSync runs a command in a container synchronously.
func (f *RemoteRuntime) ExecSync(ctx context.Context, req *kubeapi.ExecSyncRequest) (*kubeapi.ExecSyncResponse, error) { func (f *RemoteRuntime) ExecSync(ctx context.Context, req *kubeapi.ExecSyncRequest) (*kubeapi.ExecSyncResponse, error) {
var exitCode int32 var exitCode int32
stdout, stderr, err := f.RuntimeService.ExecSync(req.ContainerId, req.Cmd, time.Duration(req.Timeout)*time.Second) stdout, stderr, err := f.RuntimeService.ExecSync(ctx, req.ContainerId, req.Cmd, time.Duration(req.Timeout)*time.Second)
if err != nil { if err != nil {
exitError, ok := err.(utilexec.ExitError) exitError, ok := err.(utilexec.ExitError)
if !ok { if !ok {
@ -228,23 +228,23 @@ func (f *RemoteRuntime) ExecSync(ctx context.Context, req *kubeapi.ExecSyncReque
// Exec prepares a streaming endpoint to execute a command in the container. // Exec prepares a streaming endpoint to execute a command in the container.
func (f *RemoteRuntime) Exec(ctx context.Context, req *kubeapi.ExecRequest) (*kubeapi.ExecResponse, error) { func (f *RemoteRuntime) Exec(ctx context.Context, req *kubeapi.ExecRequest) (*kubeapi.ExecResponse, error) {
return f.RuntimeService.Exec(req) return f.RuntimeService.Exec(ctx, req)
} }
// Attach prepares a streaming endpoint to attach to a running container. // Attach prepares a streaming endpoint to attach to a running container.
func (f *RemoteRuntime) Attach(ctx context.Context, req *kubeapi.AttachRequest) (*kubeapi.AttachResponse, error) { func (f *RemoteRuntime) Attach(ctx context.Context, req *kubeapi.AttachRequest) (*kubeapi.AttachResponse, error) {
return f.RuntimeService.Attach(req) return f.RuntimeService.Attach(ctx, req)
} }
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox. // PortForward prepares a streaming endpoint to forward ports from a PodSandbox.
func (f *RemoteRuntime) PortForward(ctx context.Context, req *kubeapi.PortForwardRequest) (*kubeapi.PortForwardResponse, error) { func (f *RemoteRuntime) PortForward(ctx context.Context, req *kubeapi.PortForwardRequest) (*kubeapi.PortForwardResponse, error) {
return f.RuntimeService.PortForward(req) return f.RuntimeService.PortForward(ctx, req)
} }
// ContainerStats returns stats of the container. If the container does not // ContainerStats returns stats of the container. If the container does not
// exist, the call returns an error. // exist, the call returns an error.
func (f *RemoteRuntime) ContainerStats(ctx context.Context, req *kubeapi.ContainerStatsRequest) (*kubeapi.ContainerStatsResponse, error) { func (f *RemoteRuntime) ContainerStats(ctx context.Context, req *kubeapi.ContainerStatsRequest) (*kubeapi.ContainerStatsResponse, error) {
stats, err := f.RuntimeService.ContainerStats(req.ContainerId) stats, err := f.RuntimeService.ContainerStats(ctx, req.ContainerId)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -254,7 +254,7 @@ func (f *RemoteRuntime) ContainerStats(ctx context.Context, req *kubeapi.Contain
// ListContainerStats returns stats of all running containers. // ListContainerStats returns stats of all running containers.
func (f *RemoteRuntime) ListContainerStats(ctx context.Context, req *kubeapi.ListContainerStatsRequest) (*kubeapi.ListContainerStatsResponse, error) { func (f *RemoteRuntime) ListContainerStats(ctx context.Context, req *kubeapi.ListContainerStatsRequest) (*kubeapi.ListContainerStatsResponse, error) {
stats, err := f.RuntimeService.ListContainerStats(req.Filter) stats, err := f.RuntimeService.ListContainerStats(ctx, req.Filter)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -265,7 +265,7 @@ func (f *RemoteRuntime) ListContainerStats(ctx context.Context, req *kubeapi.Lis
// PodSandboxStats returns stats of the pod. If the pod does not // PodSandboxStats returns stats of the pod. If the pod does not
// exist, the call returns an error. // exist, the call returns an error.
func (f *RemoteRuntime) PodSandboxStats(ctx context.Context, req *kubeapi.PodSandboxStatsRequest) (*kubeapi.PodSandboxStatsResponse, error) { func (f *RemoteRuntime) PodSandboxStats(ctx context.Context, req *kubeapi.PodSandboxStatsRequest) (*kubeapi.PodSandboxStatsResponse, error) {
stats, err := f.RuntimeService.PodSandboxStats(req.PodSandboxId) stats, err := f.RuntimeService.PodSandboxStats(ctx, req.PodSandboxId)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -275,7 +275,7 @@ func (f *RemoteRuntime) PodSandboxStats(ctx context.Context, req *kubeapi.PodSan
// ListPodSandboxStats returns stats of all running pods. // ListPodSandboxStats returns stats of all running pods.
func (f *RemoteRuntime) ListPodSandboxStats(ctx context.Context, req *kubeapi.ListPodSandboxStatsRequest) (*kubeapi.ListPodSandboxStatsResponse, error) { func (f *RemoteRuntime) ListPodSandboxStats(ctx context.Context, req *kubeapi.ListPodSandboxStatsRequest) (*kubeapi.ListPodSandboxStatsResponse, error) {
stats, err := f.RuntimeService.ListPodSandboxStats(req.Filter) stats, err := f.RuntimeService.ListPodSandboxStats(ctx, req.Filter)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -285,7 +285,7 @@ func (f *RemoteRuntime) ListPodSandboxStats(ctx context.Context, req *kubeapi.Li
// UpdateRuntimeConfig updates the runtime configuration based on the given request. // UpdateRuntimeConfig updates the runtime configuration based on the given request.
func (f *RemoteRuntime) UpdateRuntimeConfig(ctx context.Context, req *kubeapi.UpdateRuntimeConfigRequest) (*kubeapi.UpdateRuntimeConfigResponse, error) { func (f *RemoteRuntime) UpdateRuntimeConfig(ctx context.Context, req *kubeapi.UpdateRuntimeConfigRequest) (*kubeapi.UpdateRuntimeConfigResponse, error) {
err := f.RuntimeService.UpdateRuntimeConfig(req.RuntimeConfig) err := f.RuntimeService.UpdateRuntimeConfig(ctx, req.RuntimeConfig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -295,7 +295,7 @@ func (f *RemoteRuntime) UpdateRuntimeConfig(ctx context.Context, req *kubeapi.Up
// Status returns the status of the runtime. // Status returns the status of the runtime.
func (f *RemoteRuntime) Status(ctx context.Context, req *kubeapi.StatusRequest) (*kubeapi.StatusResponse, error) { func (f *RemoteRuntime) Status(ctx context.Context, req *kubeapi.StatusRequest) (*kubeapi.StatusResponse, error) {
resp, err := f.RuntimeService.Status(false) resp, err := f.RuntimeService.Status(ctx, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -305,7 +305,7 @@ func (f *RemoteRuntime) Status(ctx context.Context, req *kubeapi.StatusRequest)
// UpdateContainerResources updates ContainerConfig of the container. // UpdateContainerResources updates ContainerConfig of the container.
func (f *RemoteRuntime) UpdateContainerResources(ctx context.Context, req *kubeapi.UpdateContainerResourcesRequest) (*kubeapi.UpdateContainerResourcesResponse, error) { func (f *RemoteRuntime) UpdateContainerResources(ctx context.Context, req *kubeapi.UpdateContainerResourcesRequest) (*kubeapi.UpdateContainerResourcesResponse, error) {
err := f.RuntimeService.UpdateContainerResources(req.ContainerId, &kubeapi.ContainerResources{Linux: req.Linux}) err := f.RuntimeService.UpdateContainerResources(ctx, req.ContainerId, &kubeapi.ContainerResources{Linux: req.Linux})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -315,7 +315,7 @@ func (f *RemoteRuntime) UpdateContainerResources(ctx context.Context, req *kubea
// ReopenContainerLog reopens the container log file. // ReopenContainerLog reopens the container log file.
func (f *RemoteRuntime) ReopenContainerLog(ctx context.Context, req *kubeapi.ReopenContainerLogRequest) (*kubeapi.ReopenContainerLogResponse, error) { func (f *RemoteRuntime) ReopenContainerLog(ctx context.Context, req *kubeapi.ReopenContainerLogRequest) (*kubeapi.ReopenContainerLogResponse, error) {
err := f.RuntimeService.ReopenContainerLog(req.ContainerId) err := f.RuntimeService.ReopenContainerLog(ctx, req.ContainerId)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -325,7 +325,7 @@ func (f *RemoteRuntime) ReopenContainerLog(ctx context.Context, req *kubeapi.Reo
// CheckpointContainer checkpoints the given container. // CheckpointContainer checkpoints the given container.
func (f *RemoteRuntime) CheckpointContainer(ctx context.Context, req *kubeapi.CheckpointContainerRequest) (*kubeapi.CheckpointContainerResponse, error) { func (f *RemoteRuntime) CheckpointContainer(ctx context.Context, req *kubeapi.CheckpointContainerRequest) (*kubeapi.CheckpointContainerResponse, error) {
err := f.RuntimeService.CheckpointContainer(&kubeapi.CheckpointContainerRequest{}) err := f.RuntimeService.CheckpointContainer(ctx, &kubeapi.CheckpointContainerRequest{})
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -82,7 +82,7 @@ func NewRemoteImageService(endpoint string, connectionTimeout time.Duration, tp
service := &remoteImageService{timeout: connectionTimeout} service := &remoteImageService{timeout: connectionTimeout}
if err := service.determineAPIVersion(conn, endpoint); err != nil { if err := service.determineAPIVersion(ctx, conn, endpoint); err != nil {
return nil, err return nil, err
} }
@ -103,8 +103,8 @@ func (r *remoteImageService) useV1API() bool {
// being upgraded, then the container runtime must also support the initially // being upgraded, then the container runtime must also support the initially
// selected version or the redial is expected to fail, which requires a restart // selected version or the redial is expected to fail, which requires a restart
// of kubelet. // of kubelet.
func (r *remoteImageService) determineAPIVersion(conn *grpc.ClientConn, endpoint string) error { func (r *remoteImageService) determineAPIVersion(ctx context.Context, conn *grpc.ClientConn, endpoint string) error {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
klog.V(4).InfoS("Finding the CRI API image version") klog.V(4).InfoS("Finding the CRI API image version")
@ -125,8 +125,8 @@ func (r *remoteImageService) determineAPIVersion(conn *grpc.ClientConn, endpoint
} }
// ListImages lists available images. // ListImages lists available images.
func (r *remoteImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) { func (r *remoteImageService) ListImages(ctx context.Context, filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -160,8 +160,8 @@ func (r *remoteImageService) listImagesV1(ctx context.Context, filter *runtimeap
} }
// ImageStatus returns the status of the image. // ImageStatus returns the status of the image.
func (r *remoteImageService) ImageStatus(image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) { func (r *remoteImageService) ImageStatus(ctx context.Context, image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
// TODO: for refactoring common code blocks between the cri versions into // TODO: for refactoring common code blocks between the cri versions into
@ -220,8 +220,8 @@ func (r *remoteImageService) imageStatusV1(ctx context.Context, image *runtimeap
} }
// PullImage pulls an image with authentication config. // PullImage pulls an image with authentication config.
func (r *remoteImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { func (r *remoteImageService) PullImage(ctx context.Context, image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
ctx, cancel := getContextWithCancel() ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -272,8 +272,8 @@ func (r *remoteImageService) pullImageV1(ctx context.Context, image *runtimeapi.
} }
// RemoveImage removes the image. // RemoveImage removes the image.
func (r *remoteImageService) RemoveImage(image *runtimeapi.ImageSpec) (err error) { func (r *remoteImageService) RemoveImage(ctx context.Context, image *runtimeapi.ImageSpec) (err error) {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -294,10 +294,10 @@ func (r *remoteImageService) RemoveImage(image *runtimeapi.ImageSpec) (err error
} }
// ImageFsInfo returns information of the filesystem that is used to store images. // ImageFsInfo returns information of the filesystem that is used to store images.
func (r *remoteImageService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) { func (r *remoteImageService) ImageFsInfo(ctx context.Context) ([]*runtimeapi.FilesystemUsage, error) {
// Do not set timeout, because `ImageFsInfo` takes time. // Do not set timeout, because `ImageFsInfo` takes time.
// TODO(random-liu): Should we assume runtime should cache the result, and set timeout here? // TODO(random-liu): Should we assume runtime should cache the result, and set timeout here?
ctx, cancel := getContextWithCancel() ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {

View File

@ -66,7 +66,7 @@ func TestImageServiceSpansWithTP(t *testing.T) {
) )
ctx := context.Background() ctx := context.Background()
imgSvc := createRemoteImageServiceWithTracerProvider(endpoint, tp, t) imgSvc := createRemoteImageServiceWithTracerProvider(endpoint, tp, t)
imgRef, err := imgSvc.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil, nil) imgRef, err := imgSvc.PullImage(ctx, &runtimeapi.ImageSpec{Image: "busybox"}, nil, nil)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "busybox", imgRef) assert.Equal(t, "busybox", imgRef)
require.NoError(t, err) require.NoError(t, err)
@ -93,7 +93,7 @@ func TestImageServiceSpansWithoutTP(t *testing.T) {
) )
ctx := context.Background() ctx := context.Background()
imgSvc := createRemoteImageServiceWithoutTracerProvider(endpoint, t) imgSvc := createRemoteImageServiceWithoutTracerProvider(endpoint, t)
imgRef, err := imgSvc.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil, nil) imgRef, err := imgSvc.PullImage(ctx, &runtimeapi.ImageSpec{Image: "busybox"}, nil, nil)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "busybox", imgRef) assert.Equal(t, "busybox", imgRef)
require.NoError(t, err) require.NoError(t, err)

View File

@ -108,7 +108,7 @@ func NewRemoteRuntimeService(endpoint string, connectionTimeout time.Duration, t
logReduction: logreduction.NewLogReduction(identicalErrorDelay), logReduction: logreduction.NewLogReduction(identicalErrorDelay),
} }
if err := service.determineAPIVersion(conn, endpoint); err != nil { if err := service.determineAPIVersion(ctx, conn, endpoint); err != nil {
return nil, err return nil, err
} }
@ -128,8 +128,8 @@ func (r *remoteRuntimeService) useV1API() bool {
// being upgraded, then the container runtime must also support the initially // being upgraded, then the container runtime must also support the initially
// selected version or the redial is expected to fail, which requires a restart // selected version or the redial is expected to fail, which requires a restart
// of kubelet. // of kubelet.
func (r *remoteRuntimeService) determineAPIVersion(conn *grpc.ClientConn, endpoint string) error { func (r *remoteRuntimeService) determineAPIVersion(ctx context.Context, conn *grpc.ClientConn, endpoint string) error {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
klog.V(4).InfoS("Finding the CRI API runtime version") klog.V(4).InfoS("Finding the CRI API runtime version")
@ -150,10 +150,10 @@ func (r *remoteRuntimeService) determineAPIVersion(conn *grpc.ClientConn, endpoi
} }
// Version returns the runtime name, runtime version and runtime API version. // Version returns the runtime name, runtime version and runtime API version.
func (r *remoteRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) { func (r *remoteRuntimeService) Version(ctx context.Context, apiVersion string) (*runtimeapi.VersionResponse, error) {
klog.V(10).InfoS("[RemoteRuntimeService] Version", "apiVersion", apiVersion, "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] Version", "apiVersion", apiVersion, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -201,14 +201,14 @@ func (r *remoteRuntimeService) versionV1alpha2(ctx context.Context, apiVersion s
// RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure // RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure
// the sandbox is in ready state. // the sandbox is in ready state.
func (r *remoteRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) { func (r *remoteRuntimeService) RunPodSandbox(ctx context.Context, config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) {
// Use 2 times longer timeout for sandbox operation (4 mins by default) // Use 2 times longer timeout for sandbox operation (4 mins by default)
// TODO: Make the pod sandbox timeout configurable. // TODO: Make the pod sandbox timeout configurable.
timeout := r.timeout * 2 timeout := r.timeout * 2
klog.V(10).InfoS("[RemoteRuntimeService] RunPodSandbox", "config", config, "runtimeHandler", runtimeHandler, "timeout", timeout) klog.V(10).InfoS("[RemoteRuntimeService] RunPodSandbox", "config", config, "runtimeHandler", runtimeHandler, "timeout", timeout)
ctx, cancel := getContextWithTimeout(timeout) ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel() defer cancel()
var podSandboxID string var podSandboxID string
@ -250,10 +250,10 @@ func (r *remoteRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig
// StopPodSandbox stops the sandbox. If there are any running containers in the // StopPodSandbox stops the sandbox. If there are any running containers in the
// sandbox, they should be forced to termination. // sandbox, they should be forced to termination.
func (r *remoteRuntimeService) StopPodSandbox(podSandBoxID string) (err error) { func (r *remoteRuntimeService) StopPodSandbox(ctx context.Context, podSandBoxID string) (err error) {
klog.V(10).InfoS("[RemoteRuntimeService] StopPodSandbox", "podSandboxID", podSandBoxID, "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] StopPodSandbox", "podSandboxID", podSandBoxID, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -277,9 +277,9 @@ func (r *remoteRuntimeService) StopPodSandbox(podSandBoxID string) (err error) {
// RemovePodSandbox removes the sandbox. If there are any containers in the // RemovePodSandbox removes the sandbox. If there are any containers in the
// sandbox, they should be forcibly removed. // sandbox, they should be forcibly removed.
func (r *remoteRuntimeService) RemovePodSandbox(podSandBoxID string) (err error) { func (r *remoteRuntimeService) RemovePodSandbox(ctx context.Context, podSandBoxID string) (err error) {
klog.V(10).InfoS("[RemoteRuntimeService] RemovePodSandbox", "podSandboxID", podSandBoxID, "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] RemovePodSandbox", "podSandboxID", podSandBoxID, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -302,9 +302,9 @@ func (r *remoteRuntimeService) RemovePodSandbox(podSandBoxID string) (err error)
} }
// PodSandboxStatus returns the status of the PodSandbox. // PodSandboxStatus returns the status of the PodSandbox.
func (r *remoteRuntimeService) PodSandboxStatus(podSandBoxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) { func (r *remoteRuntimeService) PodSandboxStatus(ctx context.Context, podSandBoxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) {
klog.V(10).InfoS("[RemoteRuntimeService] PodSandboxStatus", "podSandboxID", podSandBoxID, "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] PodSandboxStatus", "podSandboxID", podSandBoxID, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -357,9 +357,9 @@ func (r *remoteRuntimeService) podSandboxStatusV1(ctx context.Context, podSandBo
} }
// ListPodSandbox returns a list of PodSandboxes. // ListPodSandbox returns a list of PodSandboxes.
func (r *remoteRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) { func (r *remoteRuntimeService) ListPodSandbox(ctx context.Context, filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
klog.V(10).InfoS("[RemoteRuntimeService] ListPodSandbox", "filter", filter, "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] ListPodSandbox", "filter", filter, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -398,9 +398,9 @@ func (r *remoteRuntimeService) listPodSandboxV1(ctx context.Context, filter *run
} }
// CreateContainer creates a new container in the specified PodSandbox. // CreateContainer creates a new container in the specified PodSandbox.
func (r *remoteRuntimeService) CreateContainer(podSandBoxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { func (r *remoteRuntimeService) CreateContainer(ctx context.Context, podSandBoxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
klog.V(10).InfoS("[RemoteRuntimeService] CreateContainer", "podSandboxID", podSandBoxID, "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] CreateContainer", "podSandboxID", podSandBoxID, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -455,9 +455,9 @@ func (r *remoteRuntimeService) createContainerV1(ctx context.Context, podSandBox
} }
// StartContainer starts the container. // StartContainer starts the container.
func (r *remoteRuntimeService) StartContainer(containerID string) (err error) { func (r *remoteRuntimeService) StartContainer(ctx context.Context, containerID string) (err error) {
klog.V(10).InfoS("[RemoteRuntimeService] StartContainer", "containerID", containerID, "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] StartContainer", "containerID", containerID, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -480,12 +480,12 @@ func (r *remoteRuntimeService) StartContainer(containerID string) (err error) {
} }
// StopContainer stops a running container with a grace period (i.e., timeout). // StopContainer stops a running container with a grace period (i.e., timeout).
func (r *remoteRuntimeService) StopContainer(containerID string, timeout int64) (err error) { func (r *remoteRuntimeService) StopContainer(ctx context.Context, containerID string, timeout int64) (err error) {
klog.V(10).InfoS("[RemoteRuntimeService] StopContainer", "containerID", containerID, "timeout", timeout) klog.V(10).InfoS("[RemoteRuntimeService] StopContainer", "containerID", containerID, "timeout", timeout)
// Use timeout + default timeout (2 minutes) as timeout to leave extra time // Use timeout + default timeout (2 minutes) as timeout to leave extra time
// for SIGKILL container and request latency. // for SIGKILL container and request latency.
t := r.timeout + time.Duration(timeout)*time.Second t := r.timeout + time.Duration(timeout)*time.Second
ctx, cancel := getContextWithTimeout(t) ctx, cancel := context.WithTimeout(ctx, t)
defer cancel() defer cancel()
r.logReduction.ClearID(containerID) r.logReduction.ClearID(containerID)
@ -512,9 +512,9 @@ func (r *remoteRuntimeService) StopContainer(containerID string, timeout int64)
// RemoveContainer removes the container. If the container is running, the container // RemoveContainer removes the container. If the container is running, the container
// should be forced to removal. // should be forced to removal.
func (r *remoteRuntimeService) RemoveContainer(containerID string) (err error) { func (r *remoteRuntimeService) RemoveContainer(ctx context.Context, containerID string) (err error) {
klog.V(10).InfoS("[RemoteRuntimeService] RemoveContainer", "containerID", containerID, "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] RemoveContainer", "containerID", containerID, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
r.logReduction.ClearID(containerID) r.logReduction.ClearID(containerID)
@ -537,9 +537,9 @@ func (r *remoteRuntimeService) RemoveContainer(containerID string) (err error) {
} }
// ListContainers lists containers by filters. // ListContainers lists containers by filters.
func (r *remoteRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) { func (r *remoteRuntimeService) ListContainers(ctx context.Context, filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
klog.V(10).InfoS("[RemoteRuntimeService] ListContainers", "filter", filter, "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] ListContainers", "filter", filter, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -576,9 +576,9 @@ func (r *remoteRuntimeService) listContainersV1(ctx context.Context, filter *run
} }
// ContainerStatus returns the container status. // ContainerStatus returns the container status.
func (r *remoteRuntimeService) ContainerStatus(containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) { func (r *remoteRuntimeService) ContainerStatus(ctx context.Context, containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) {
klog.V(10).InfoS("[RemoteRuntimeService] ContainerStatus", "containerID", containerID, "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] ContainerStatus", "containerID", containerID, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -641,9 +641,9 @@ func (r *remoteRuntimeService) containerStatusV1(ctx context.Context, containerI
} }
// UpdateContainerResources updates a containers resource config // UpdateContainerResources updates a containers resource config
func (r *remoteRuntimeService) UpdateContainerResources(containerID string, resources *runtimeapi.ContainerResources) (err error) { func (r *remoteRuntimeService) UpdateContainerResources(ctx context.Context, containerID string, resources *runtimeapi.ContainerResources) (err error) {
klog.V(10).InfoS("[RemoteRuntimeService] UpdateContainerResources", "containerID", containerID, "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] UpdateContainerResources", "containerID", containerID, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -670,17 +670,16 @@ func (r *remoteRuntimeService) UpdateContainerResources(containerID string, reso
// ExecSync executes a command in the container, and returns the stdout output. // ExecSync executes a command in the container, and returns the stdout output.
// If command exits with a non-zero exit code, an error is returned. // If command exits with a non-zero exit code, an error is returned.
func (r *remoteRuntimeService) ExecSync(containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) { func (r *remoteRuntimeService) ExecSync(ctx context.Context, containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) {
klog.V(10).InfoS("[RemoteRuntimeService] ExecSync", "containerID", containerID, "timeout", timeout) klog.V(10).InfoS("[RemoteRuntimeService] ExecSync", "containerID", containerID, "timeout", timeout)
// Do not set timeout when timeout is 0. // Do not set timeout when timeout is 0.
var ctx context.Context
var cancel context.CancelFunc var cancel context.CancelFunc
if timeout != 0 { if timeout != 0 {
// Use timeout + default timeout (2 minutes) as timeout to leave some time for // Use timeout + default timeout (2 minutes) as timeout to leave some time for
// the runtime to do cleanup. // the runtime to do cleanup.
ctx, cancel = getContextWithTimeout(r.timeout + timeout) ctx, cancel = context.WithTimeout(ctx, r.timeout+timeout)
} else { } else {
ctx, cancel = getContextWithCancel() ctx, cancel = context.WithCancel(ctx)
} }
defer cancel() defer cancel()
@ -754,9 +753,9 @@ func (r *remoteRuntimeService) execSyncV1(ctx context.Context, containerID strin
} }
// Exec prepares a streaming endpoint to execute a command in the container, and returns the address. // Exec prepares a streaming endpoint to execute a command in the container, and returns the address.
func (r *remoteRuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { func (r *remoteRuntimeService) Exec(ctx context.Context, req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
klog.V(10).InfoS("[RemoteRuntimeService] Exec", "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] Exec", "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -803,9 +802,9 @@ func (r *remoteRuntimeService) execV1(ctx context.Context, req *runtimeapi.ExecR
} }
// Attach prepares a streaming endpoint to attach to a running container, and returns the address. // Attach prepares a streaming endpoint to attach to a running container, and returns the address.
func (r *remoteRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { func (r *remoteRuntimeService) Attach(ctx context.Context, req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
klog.V(10).InfoS("[RemoteRuntimeService] Attach", "containerID", req.ContainerId, "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] Attach", "containerID", req.ContainerId, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -850,9 +849,9 @@ func (r *remoteRuntimeService) attachV1(ctx context.Context, req *runtimeapi.Att
} }
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address. // PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address.
func (r *remoteRuntimeService) PortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) { func (r *remoteRuntimeService) PortForward(ctx context.Context, req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
klog.V(10).InfoS("[RemoteRuntimeService] PortForward", "podSandboxID", req.PodSandboxId, "port", req.Port, "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] PortForward", "podSandboxID", req.PodSandboxId, "port", req.Port, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -901,9 +900,9 @@ func (r *remoteRuntimeService) portForwardV1(ctx context.Context, req *runtimeap
// UpdateRuntimeConfig updates the config of a runtime service. The only // UpdateRuntimeConfig updates the config of a runtime service. The only
// update payload currently supported is the pod CIDR assigned to a node, // update payload currently supported is the pod CIDR assigned to a node,
// and the runtime service just proxies it down to the network plugin. // and the runtime service just proxies it down to the network plugin.
func (r *remoteRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) (err error) { func (r *remoteRuntimeService) UpdateRuntimeConfig(ctx context.Context, runtimeConfig *runtimeapi.RuntimeConfig) (err error) {
klog.V(10).InfoS("[RemoteRuntimeService] UpdateRuntimeConfig", "runtimeConfig", runtimeConfig, "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] UpdateRuntimeConfig", "runtimeConfig", runtimeConfig, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
// Response doesn't contain anything of interest. This translates to an // Response doesn't contain anything of interest. This translates to an
@ -928,9 +927,9 @@ func (r *remoteRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.Run
} }
// Status returns the status of the runtime. // Status returns the status of the runtime.
func (r *remoteRuntimeService) Status(verbose bool) (*runtimeapi.StatusResponse, error) { func (r *remoteRuntimeService) Status(ctx context.Context, verbose bool) (*runtimeapi.StatusResponse, error) {
klog.V(10).InfoS("[RemoteRuntimeService] Status", "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] Status", "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -983,9 +982,9 @@ func (r *remoteRuntimeService) statusV1(ctx context.Context, verbose bool) (*run
} }
// ContainerStats returns the stats of the container. // ContainerStats returns the stats of the container.
func (r *remoteRuntimeService) ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) { func (r *remoteRuntimeService) ContainerStats(ctx context.Context, containerID string) (*runtimeapi.ContainerStats, error) {
klog.V(10).InfoS("[RemoteRuntimeService] ContainerStats", "containerID", containerID, "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] ContainerStats", "containerID", containerID, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -1028,11 +1027,11 @@ func (r *remoteRuntimeService) containerStatsV1(ctx context.Context, containerID
} }
// ListContainerStats returns the list of ContainerStats given the filter. // ListContainerStats returns the list of ContainerStats given the filter.
func (r *remoteRuntimeService) ListContainerStats(filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { func (r *remoteRuntimeService) ListContainerStats(ctx context.Context, filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) {
klog.V(10).InfoS("[RemoteRuntimeService] ListContainerStats", "filter", filter) klog.V(10).InfoS("[RemoteRuntimeService] ListContainerStats", "filter", filter)
// Do not set timeout, because writable layer stats collection takes time. // Do not set timeout, because writable layer stats collection takes time.
// TODO(random-liu): Should we assume runtime should cache the result, and set timeout here? // TODO(random-liu): Should we assume runtime should cache the result, and set timeout here?
ctx, cancel := getContextWithCancel() ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -1069,9 +1068,9 @@ func (r *remoteRuntimeService) listContainerStatsV1(ctx context.Context, filter
} }
// PodSandboxStats returns the stats of the pod. // PodSandboxStats returns the stats of the pod.
func (r *remoteRuntimeService) PodSandboxStats(podSandboxID string) (*runtimeapi.PodSandboxStats, error) { func (r *remoteRuntimeService) PodSandboxStats(ctx context.Context, podSandboxID string) (*runtimeapi.PodSandboxStats, error) {
klog.V(10).InfoS("[RemoteRuntimeService] PodSandboxStats", "podSandboxID", podSandboxID, "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] PodSandboxStats", "podSandboxID", podSandboxID, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -1114,10 +1113,10 @@ func (r *remoteRuntimeService) podSandboxStatsV1(ctx context.Context, podSandbox
} }
// ListPodSandboxStats returns the list of pod sandbox stats given the filter // ListPodSandboxStats returns the list of pod sandbox stats given the filter
func (r *remoteRuntimeService) ListPodSandboxStats(filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) { func (r *remoteRuntimeService) ListPodSandboxStats(ctx context.Context, filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) {
klog.V(10).InfoS("[RemoteRuntimeService] ListPodSandboxStats", "filter", filter) klog.V(10).InfoS("[RemoteRuntimeService] ListPodSandboxStats", "filter", filter)
// Set timeout, because runtimes are able to cache disk stats results // Set timeout, because runtimes are able to cache disk stats results
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -1154,9 +1153,9 @@ func (r *remoteRuntimeService) listPodSandboxStatsV1(ctx context.Context, filter
} }
// ReopenContainerLog reopens the container log file. // ReopenContainerLog reopens the container log file.
func (r *remoteRuntimeService) ReopenContainerLog(containerID string) (err error) { func (r *remoteRuntimeService) ReopenContainerLog(ctx context.Context, containerID string) (err error) {
klog.V(10).InfoS("[RemoteRuntimeService] ReopenContainerLog", "containerID", containerID, "timeout", r.timeout) klog.V(10).InfoS("[RemoteRuntimeService] ReopenContainerLog", "containerID", containerID, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := context.WithTimeout(ctx, r.timeout)
defer cancel() defer cancel()
if r.useV1API() { if r.useV1API() {
@ -1174,7 +1173,7 @@ func (r *remoteRuntimeService) ReopenContainerLog(containerID string) (err error
} }
// CheckpointContainer triggers a checkpoint of the given CheckpointContainerRequest // CheckpointContainer triggers a checkpoint of the given CheckpointContainerRequest
func (r *remoteRuntimeService) CheckpointContainer(options *runtimeapi.CheckpointContainerRequest) error { func (r *remoteRuntimeService) CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error {
klog.V(10).InfoS( klog.V(10).InfoS(
"[RemoteRuntimeService] CheckpointContainer", "[RemoteRuntimeService] CheckpointContainer",
"options", "options",
@ -1191,18 +1190,18 @@ func (r *remoteRuntimeService) CheckpointContainer(options *runtimeapi.Checkpoin
return errors.New("CheckpointContainer requires the timeout value to be > 0") return errors.New("CheckpointContainer requires the timeout value to be > 0")
} }
ctx, cancel := func() (context.Context, context.CancelFunc) { ctx, cancel := func(ctx context.Context) (context.Context, context.CancelFunc) {
defaultTimeout := int64(r.timeout / time.Second) defaultTimeout := int64(r.timeout / time.Second)
if options.Timeout > defaultTimeout { if options.Timeout > defaultTimeout {
// The user requested a specific timeout, let's use that if it // The user requested a specific timeout, let's use that if it
// is larger than the CRI default. // is larger than the CRI default.
return getContextWithTimeout(time.Duration(options.Timeout) * time.Second) return context.WithTimeout(ctx, time.Duration(options.Timeout)*time.Second)
} }
// If the user requested a timeout less than the // If the user requested a timeout less than the
// CRI default, let's use the CRI default. // CRI default, let's use the CRI default.
options.Timeout = defaultTimeout options.Timeout = defaultTimeout
return getContextWithTimeout(r.timeout) return context.WithTimeout(ctx, r.timeout)
}() }(ctx)
defer cancel() defer cancel()
_, err := r.runtimeClient.CheckpointContainer( _, err := r.runtimeClient.CheckpointContainer(

View File

@ -87,7 +87,7 @@ func TestGetSpans(t *testing.T) {
) )
ctx := context.Background() ctx := context.Background()
rtSvc := createRemoteRuntimeServiceWithTracerProvider(endpoint, tp, t) rtSvc := createRemoteRuntimeServiceWithTracerProvider(endpoint, tp, t)
_, err := rtSvc.Version(apitest.FakeVersion) _, err := rtSvc.Version(ctx, apitest.FakeVersion)
require.NoError(t, err) require.NoError(t, err)
err = tp.ForceFlush(ctx) err = tp.ForceFlush(ctx)
require.NoError(t, err) require.NoError(t, err)
@ -106,8 +106,9 @@ func TestVersion(t *testing.T) {
} }
}() }()
ctx := context.Background()
rtSvc := createRemoteRuntimeService(endpoint, t) rtSvc := createRemoteRuntimeService(endpoint, t)
version, err := rtSvc.Version(apitest.FakeVersion) version, err := rtSvc.Version(ctx, apitest.FakeVersion)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, apitest.FakeVersion, version.Version) assert.Equal(t, apitest.FakeVersion, version.Version)
assert.Equal(t, apitest.FakeRuntimeName, version.RuntimeName) assert.Equal(t, apitest.FakeRuntimeName, version.RuntimeName)

View File

@ -17,9 +17,7 @@ limitations under the License.
package remote package remote
import ( import (
"context"
"fmt" "fmt"
"time"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
) )
@ -28,16 +26,6 @@ import (
// grpc library default is 4MB // grpc library default is 4MB
const maxMsgSize = 1024 * 1024 * 16 const maxMsgSize = 1024 * 1024 * 16
// getContextWithTimeout returns a context with timeout.
func getContextWithTimeout(timeout time.Duration) (context.Context, context.CancelFunc) {
return context.WithTimeout(context.Background(), timeout)
}
// getContextWithCancel returns a context with cancel.
func getContextWithCancel() (context.Context, context.CancelFunc) {
return context.WithCancel(context.Background())
}
// verifySandboxStatus verified whether all required fields are set in PodSandboxStatus. // verifySandboxStatus verified whether all required fields are set in PodSandboxStatus.
func verifySandboxStatus(status *runtimeapi.PodSandboxStatus) error { func verifySandboxStatus(status *runtimeapi.PodSandboxStatus) error {
if status.Id == "" { if status.Id == "" {

View File

@ -17,6 +17,7 @@ limitations under the License.
package portforward package portforward
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
@ -240,6 +241,7 @@ Loop:
// portForward invokes the httpStreamHandler's forwarder.PortForward // portForward invokes the httpStreamHandler's forwarder.PortForward
// function for the given stream pair. // function for the given stream pair.
func (h *httpStreamHandler) portForward(p *httpStreamPair) { func (h *httpStreamHandler) portForward(p *httpStreamPair) {
ctx := context.Background()
defer p.dataStream.Close() defer p.dataStream.Close()
defer p.errorStream.Close() defer p.errorStream.Close()
@ -247,7 +249,7 @@ func (h *httpStreamHandler) portForward(p *httpStreamPair) {
port, _ := strconv.ParseInt(portString, 10, 32) port, _ := strconv.ParseInt(portString, 10, 32)
klog.V(5).InfoS("Connection request invoking forwarder.PortForward for port", "connection", h.conn, "request", p.requestID, "port", portString) klog.V(5).InfoS("Connection request invoking forwarder.PortForward for port", "connection", h.conn, "request", p.requestID, "port", portString)
err := h.forwarder.PortForward(h.pod, h.uid, int32(port), p.dataStream) err := h.forwarder.PortForward(ctx, h.pod, h.uid, int32(port), p.dataStream)
klog.V(5).InfoS("Connection request done invoking forwarder.PortForward for port", "connection", h.conn, "request", p.requestID, "port", portString) klog.V(5).InfoS("Connection request done invoking forwarder.PortForward for port", "connection", h.conn, "request", p.requestID, "port", portString)
if err != nil { if err != nil {

View File

@ -17,6 +17,7 @@ limitations under the License.
package portforward package portforward
import ( import (
"context"
"io" "io"
"net/http" "net/http"
"time" "time"
@ -30,7 +31,7 @@ import (
// in a pod. // in a pod.
type PortForwarder interface { type PortForwarder interface {
// PortForwarder copies data between a data stream and a port in a pod. // PortForwarder copies data between a data stream and a port in a pod.
PortForward(name string, uid types.UID, port int32, stream io.ReadWriteCloser) error PortForward(ctx context.Context, name string, uid types.UID, port int32, stream io.ReadWriteCloser) error
} }
// ServePortForward handles a port forwarding request. A single request is // ServePortForward handles a port forwarding request. A single request is

View File

@ -17,6 +17,7 @@ limitations under the License.
package portforward package portforward
import ( import (
"context"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"io" "io"
@ -182,11 +183,12 @@ func (h *websocketStreamHandler) run() {
} }
func (h *websocketStreamHandler) portForward(p *websocketStreamPair) { func (h *websocketStreamHandler) portForward(p *websocketStreamPair) {
ctx := context.Background()
defer p.dataStream.Close() defer p.dataStream.Close()
defer p.errorStream.Close() defer p.errorStream.Close()
klog.V(5).InfoS("Connection invoking forwarder.PortForward for port", "connection", h.conn, "port", p.port) klog.V(5).InfoS("Connection invoking forwarder.PortForward for port", "connection", h.conn, "port", p.port)
err := h.forwarder.PortForward(h.pod, h.uid, p.port, p.dataStream) err := h.forwarder.PortForward(ctx, h.pod, h.uid, p.port, p.dataStream)
klog.V(5).InfoS("Connection done invoking forwarder.PortForward for port", "connection", h.conn, "port", p.port) klog.V(5).InfoS("Connection done invoking forwarder.PortForward for port", "connection", h.conn, "port", p.port)
if err != nil { if err != nil {

View File

@ -17,6 +17,7 @@ limitations under the License.
package remotecommand package remotecommand
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@ -33,7 +34,7 @@ import (
type Attacher interface { type Attacher interface {
// AttachContainer attaches to the running container in the pod, copying data between in/out/err // AttachContainer attaches to the running container in the pod, copying data between in/out/err
// and the container's stdin/stdout/stderr. // and the container's stdin/stdout/stderr.
AttachContainer(name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error AttachContainer(ctx context.Context, name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error
} }
// ServeAttach handles requests to attach to a container. After creating/receiving the required // ServeAttach handles requests to attach to a container. After creating/receiving the required
@ -46,7 +47,7 @@ func ServeAttach(w http.ResponseWriter, req *http.Request, attacher Attacher, po
} }
defer ctx.conn.Close() defer ctx.conn.Close()
err := attacher.AttachContainer(podName, uid, container, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty, ctx.resizeChan) err := attacher.AttachContainer(req.Context(), podName, uid, container, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty, ctx.resizeChan)
if err != nil { if err != nil {
err = fmt.Errorf("error attaching to container: %v", err) err = fmt.Errorf("error attaching to container: %v", err)
runtime.HandleError(err) runtime.HandleError(err)

View File

@ -17,6 +17,7 @@ limitations under the License.
package remotecommand package remotecommand
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@ -35,7 +36,7 @@ import (
type Executor interface { type Executor interface {
// ExecInContainer executes a command in a container in the pod, copying data // ExecInContainer executes a command in a container in the pod, copying data
// between in/out/err and the container's stdin/stdout/stderr. // between in/out/err and the container's stdin/stdout/stderr.
ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error ExecInContainer(ctx context.Context, name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error
} }
// ServeExec handles requests to execute a command in a container. After // ServeExec handles requests to execute a command in a container. After
@ -49,7 +50,7 @@ func ServeExec(w http.ResponseWriter, req *http.Request, executor Executor, podN
} }
defer ctx.conn.Close() defer ctx.conn.Close()
err := executor.ExecInContainer(podName, uid, container, cmd, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty, ctx.resizeChan, 0) err := executor.ExecInContainer(req.Context(), podName, uid, container, cmd, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty, ctx.resizeChan, 0)
if err != nil { if err != nil {
if exitErr, ok := err.(utilexec.ExitError); ok && exitErr.Exited() { if exitErr, ok := err.(utilexec.ExitError); ok && exitErr.Exited() {
rc := exitErr.ExitStatus() rc := exitErr.ExitStatus()

View File

@ -70,9 +70,9 @@ func NewOptions(req *http.Request) (*Options, error) {
}, nil }, nil
} }
// context contains the connection and streams used when // connectionContext contains the connection and streams used when
// forwarding an attach or execute session into a container. // forwarding an attach or execute session into a container.
type context struct { type connectionContext struct {
conn io.Closer conn io.Closer
stdinStream io.ReadCloser stdinStream io.ReadCloser
stdoutStream io.WriteCloser stdoutStream io.WriteCloser
@ -102,8 +102,8 @@ func waitStreamReply(replySent <-chan struct{}, notify chan<- struct{}, stop <-c
} }
} }
func createStreams(req *http.Request, w http.ResponseWriter, opts *Options, supportedStreamProtocols []string, idleTimeout, streamCreationTimeout time.Duration) (*context, bool) { func createStreams(req *http.Request, w http.ResponseWriter, opts *Options, supportedStreamProtocols []string, idleTimeout, streamCreationTimeout time.Duration) (*connectionContext, bool) {
var ctx *context var ctx *connectionContext
var ok bool var ok bool
if wsstream.IsWebSocketRequest(req) { if wsstream.IsWebSocketRequest(req) {
ctx, ok = createWebSocketStreams(req, w, opts, idleTimeout) ctx, ok = createWebSocketStreams(req, w, opts, idleTimeout)
@ -122,7 +122,7 @@ func createStreams(req *http.Request, w http.ResponseWriter, opts *Options, supp
return ctx, true return ctx, true
} }
func createHTTPStreamStreams(req *http.Request, w http.ResponseWriter, opts *Options, supportedStreamProtocols []string, idleTimeout, streamCreationTimeout time.Duration) (*context, bool) { func createHTTPStreamStreams(req *http.Request, w http.ResponseWriter, opts *Options, supportedStreamProtocols []string, idleTimeout, streamCreationTimeout time.Duration) (*connectionContext, bool) {
protocol, err := httpstream.Handshake(req, w, supportedStreamProtocols) protocol, err := httpstream.Handshake(req, w, supportedStreamProtocols)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest) http.Error(w, err.Error(), http.StatusBadRequest)
@ -194,7 +194,7 @@ func createHTTPStreamStreams(req *http.Request, w http.ResponseWriter, opts *Opt
type protocolHandler interface { type protocolHandler interface {
// waitForStreams waits for the expected streams or a timeout, returning a // waitForStreams waits for the expected streams or a timeout, returning a
// remoteCommandContext if all the streams were received, or an error if not. // remoteCommandContext if all the streams were received, or an error if not.
waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*connectionContext, error)
// supportsTerminalResizing returns true if the protocol handler supports terminal resizing // supportsTerminalResizing returns true if the protocol handler supports terminal resizing
supportsTerminalResizing() bool supportsTerminalResizing() bool
} }
@ -204,8 +204,8 @@ type protocolHandler interface {
// the process' exit code. // the process' exit code.
type v4ProtocolHandler struct{} type v4ProtocolHandler struct{}
func (*v4ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) { func (*v4ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*connectionContext, error) {
ctx := &context{} ctx := &connectionContext{}
receivedStreams := 0 receivedStreams := 0
replyChan := make(chan struct{}) replyChan := make(chan struct{})
stop := make(chan struct{}) stop := make(chan struct{})
@ -255,8 +255,8 @@ func (*v4ProtocolHandler) supportsTerminalResizing() bool { return true }
// v3ProtocolHandler implements the V3 protocol version for streaming command execution. // v3ProtocolHandler implements the V3 protocol version for streaming command execution.
type v3ProtocolHandler struct{} type v3ProtocolHandler struct{}
func (*v3ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) { func (*v3ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*connectionContext, error) {
ctx := &context{} ctx := &connectionContext{}
receivedStreams := 0 receivedStreams := 0
replyChan := make(chan struct{}) replyChan := make(chan struct{})
stop := make(chan struct{}) stop := make(chan struct{})
@ -306,8 +306,8 @@ func (*v3ProtocolHandler) supportsTerminalResizing() bool { return true }
// v2ProtocolHandler implements the V2 protocol version for streaming command execution. // v2ProtocolHandler implements the V2 protocol version for streaming command execution.
type v2ProtocolHandler struct{} type v2ProtocolHandler struct{}
func (*v2ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) { func (*v2ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*connectionContext, error) {
ctx := &context{} ctx := &connectionContext{}
receivedStreams := 0 receivedStreams := 0
replyChan := make(chan struct{}) replyChan := make(chan struct{})
stop := make(chan struct{}) stop := make(chan struct{})
@ -354,8 +354,8 @@ func (*v2ProtocolHandler) supportsTerminalResizing() bool { return false }
// v1ProtocolHandler implements the V1 protocol version for streaming command execution. // v1ProtocolHandler implements the V1 protocol version for streaming command execution.
type v1ProtocolHandler struct{} type v1ProtocolHandler struct{}
func (*v1ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) { func (*v1ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*connectionContext, error) {
ctx := &context{} ctx := &connectionContext{}
receivedStreams := 0 receivedStreams := 0
replyChan := make(chan struct{}) replyChan := make(chan struct{})
stop := make(chan struct{}) stop := make(chan struct{})

View File

@ -68,9 +68,9 @@ func writeChannel(real bool) wsstream.ChannelType {
return wsstream.IgnoreChannel return wsstream.IgnoreChannel
} }
// createWebSocketStreams returns a context containing the websocket connection and // createWebSocketStreams returns a connectionContext containing the websocket connection and
// streams needed to perform an exec or an attach. // streams needed to perform an exec or an attach.
func createWebSocketStreams(req *http.Request, w http.ResponseWriter, opts *Options, idleTimeout time.Duration) (*context, bool) { func createWebSocketStreams(req *http.Request, w http.ResponseWriter, opts *Options, idleTimeout time.Duration) (*connectionContext, bool) {
channels := createChannels(opts) channels := createChannels(opts)
conn := wsstream.NewConn(map[string]wsstream.ChannelProtocolConfig{ conn := wsstream.NewConn(map[string]wsstream.ChannelProtocolConfig{
"": { "": {
@ -112,7 +112,7 @@ func createWebSocketStreams(req *http.Request, w http.ResponseWriter, opts *Opti
streams[errorChannel].Write([]byte{}) streams[errorChannel].Write([]byte{})
} }
ctx := &context{ ctx := &connectionContext{
conn: conn, conn: conn,
stdinStream: streams[stdinChannel], stdinStream: streams[stdinChannel],
stdoutStream: streams[stdoutChannel], stdoutStream: streams[stdoutChannel],

View File

@ -17,6 +17,7 @@ limitations under the License.
package streaming package streaming
import ( import (
"context"
"crypto/tls" "crypto/tls"
"errors" "errors"
"io" "io"
@ -61,9 +62,9 @@ type Server interface {
// Runtime is the interface to execute the commands and provide the streams. // Runtime is the interface to execute the commands and provide the streams.
type Runtime interface { type Runtime interface {
Exec(containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error Exec(ctx context.Context, containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error
Attach(containerID string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error Attach(ctx context.Context, containerID string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error
PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error PortForward(ctx context.Context, podSandboxID string, port int32, stream io.ReadWriteCloser) error
} }
// Config defines the options used for running the stream server. // Config defines the options used for running the stream server.
@ -369,14 +370,14 @@ var _ remotecommandserver.Executor = &criAdapter{}
var _ remotecommandserver.Attacher = &criAdapter{} var _ remotecommandserver.Attacher = &criAdapter{}
var _ portforward.PortForwarder = &criAdapter{} var _ portforward.PortForwarder = &criAdapter{}
func (a *criAdapter) ExecInContainer(podName string, podUID types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { func (a *criAdapter) ExecInContainer(ctx context.Context, podName string, podUID types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error {
return a.Runtime.Exec(container, cmd, in, out, err, tty, resize) return a.Runtime.Exec(ctx, container, cmd, in, out, err, tty, resize)
} }
func (a *criAdapter) AttachContainer(podName string, podUID types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { func (a *criAdapter) AttachContainer(ctx context.Context, podName string, podUID types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
return a.Runtime.Attach(container, in, out, err, tty, resize) return a.Runtime.Attach(ctx, container, in, out, err, tty, resize)
} }
func (a *criAdapter) PortForward(podName string, podUID types.UID, port int32, stream io.ReadWriteCloser) error { func (a *criAdapter) PortForward(ctx context.Context, podName string, podUID types.UID, port int32, stream io.ReadWriteCloser) error {
return a.Runtime.PortForward(podName, port, stream) return a.Runtime.PortForward(ctx, podName, port, stream)
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package streaming package streaming
import ( import (
"context"
"crypto/tls" "crypto/tls"
"io" "io"
"net/http" "net/http"
@ -413,19 +414,19 @@ type fakeRuntime struct {
t *testing.T t *testing.T
} }
func (f *fakeRuntime) Exec(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { func (f *fakeRuntime) Exec(_ context.Context, containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
assert.Equal(f.t, testContainerID, containerID) assert.Equal(f.t, testContainerID, containerID)
doServerStreams(f.t, "exec", stdin, stdout, stderr) doServerStreams(f.t, "exec", stdin, stdout, stderr)
return nil return nil
} }
func (f *fakeRuntime) Attach(containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { func (f *fakeRuntime) Attach(_ context.Context, containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
assert.Equal(f.t, testContainerID, containerID) assert.Equal(f.t, testContainerID, containerID)
doServerStreams(f.t, "attach", stdin, stdout, stderr) doServerStreams(f.t, "attach", stdin, stdout, stderr)
return nil return nil
} }
func (f *fakeRuntime) PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error { func (f *fakeRuntime) PortForward(_ context.Context, podSandboxID string, port int32, stream io.ReadWriteCloser) error {
assert.Equal(f.t, testPodSandboxID, podSandboxID) assert.Equal(f.t, testPodSandboxID, podSandboxID)
assert.EqualValues(f.t, testPort, port) assert.EqualValues(f.t, testPort, port)
doServerStreams(f.t, "portforward", stream, stream, nil) doServerStreams(f.t, "portforward", stream, stream, nil)

View File

@ -17,6 +17,7 @@ limitations under the License.
package eviction package eviction
import ( import (
"context"
"fmt" "fmt"
"sort" "sort"
"sync" "sync"
@ -230,6 +231,7 @@ func (m *managerImpl) IsUnderPIDPressure() bool {
// synchronize is the main control loop that enforces eviction thresholds. // synchronize is the main control loop that enforces eviction thresholds.
// Returns the pod that was killed, or nil if no pod was killed. // Returns the pod that was killed, or nil if no pod was killed.
func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc) []*v1.Pod { func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc) []*v1.Pod {
ctx := context.Background()
// if we have nothing to do, just return // if we have nothing to do, just return
thresholds := m.config.Thresholds thresholds := m.config.Thresholds
if len(thresholds) == 0 && !m.localStorageCapacityIsolation { if len(thresholds) == 0 && !m.localStorageCapacityIsolation {
@ -240,7 +242,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
// build the ranking functions (if not yet known) // build the ranking functions (if not yet known)
// TODO: have a function in cadvisor that lets us know if global housekeeping has completed // TODO: have a function in cadvisor that lets us know if global housekeeping has completed
if m.dedicatedImageFs == nil { if m.dedicatedImageFs == nil {
hasImageFs, ok := diskInfoProvider.HasDedicatedImageFs() hasImageFs, ok := diskInfoProvider.HasDedicatedImageFs(ctx)
if ok != nil { if ok != nil {
return nil return nil
} }
@ -251,7 +253,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
activePods := podFunc() activePods := podFunc()
updateStats := true updateStats := true
summary, err := m.summaryProvider.Get(updateStats) summary, err := m.summaryProvider.Get(ctx, updateStats)
if err != nil { if err != nil {
klog.ErrorS(err, "Eviction manager: failed to get summary stats") klog.ErrorS(err, "Eviction manager: failed to get summary stats")
return nil return nil
@ -343,7 +345,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
m.recorder.Eventf(m.nodeRef, v1.EventTypeWarning, "EvictionThresholdMet", "Attempting to reclaim %s", resourceToReclaim) m.recorder.Eventf(m.nodeRef, v1.EventTypeWarning, "EvictionThresholdMet", "Attempting to reclaim %s", resourceToReclaim)
// check if there are node-level resources we can reclaim to reduce pressure before evicting end-user pods. // check if there are node-level resources we can reclaim to reduce pressure before evicting end-user pods.
if m.reclaimNodeLevelResources(thresholdToReclaim.Signal, resourceToReclaim) { if m.reclaimNodeLevelResources(ctx, thresholdToReclaim.Signal, resourceToReclaim) {
klog.InfoS("Eviction manager: able to reduce resource pressure without evicting pods.", "resourceName", resourceToReclaim) klog.InfoS("Eviction manager: able to reduce resource pressure without evicting pods.", "resourceName", resourceToReclaim)
return nil return nil
} }
@ -418,17 +420,17 @@ func (m *managerImpl) waitForPodsCleanup(podCleanedUpFunc PodCleanedUpFunc, pods
} }
// reclaimNodeLevelResources attempts to reclaim node level resources. returns true if thresholds were satisfied and no pod eviction is required. // reclaimNodeLevelResources attempts to reclaim node level resources. returns true if thresholds were satisfied and no pod eviction is required.
func (m *managerImpl) reclaimNodeLevelResources(signalToReclaim evictionapi.Signal, resourceToReclaim v1.ResourceName) bool { func (m *managerImpl) reclaimNodeLevelResources(ctx context.Context, signalToReclaim evictionapi.Signal, resourceToReclaim v1.ResourceName) bool {
nodeReclaimFuncs := m.signalToNodeReclaimFuncs[signalToReclaim] nodeReclaimFuncs := m.signalToNodeReclaimFuncs[signalToReclaim]
for _, nodeReclaimFunc := range nodeReclaimFuncs { for _, nodeReclaimFunc := range nodeReclaimFuncs {
// attempt to reclaim the pressured resource. // attempt to reclaim the pressured resource.
if err := nodeReclaimFunc(); err != nil { if err := nodeReclaimFunc(ctx); err != nil {
klog.InfoS("Eviction manager: unexpected error when attempting to reduce resource pressure", "resourceName", resourceToReclaim, "err", err) klog.InfoS("Eviction manager: unexpected error when attempting to reduce resource pressure", "resourceName", resourceToReclaim, "err", err)
} }
} }
if len(nodeReclaimFuncs) > 0 { if len(nodeReclaimFuncs) > 0 {
summary, err := m.summaryProvider.Get(true) summary, err := m.summaryProvider.Get(ctx, true)
if err != nil { if err != nil {
klog.ErrorS(err, "Eviction manager: failed to get summary stats after resource reclaim") klog.ErrorS(err, "Eviction manager: failed to get summary stats after resource reclaim")
return false return false

View File

@ -17,6 +17,7 @@ limitations under the License.
package eviction package eviction
import ( import (
"context"
"fmt" "fmt"
"testing" "testing"
"time" "time"
@ -67,7 +68,7 @@ type mockDiskInfoProvider struct {
} }
// HasDedicatedImageFs returns the mocked value // HasDedicatedImageFs returns the mocked value
func (m *mockDiskInfoProvider) HasDedicatedImageFs() (bool, error) { func (m *mockDiskInfoProvider) HasDedicatedImageFs(_ context.Context) (bool, error) {
return m.dedicatedImageFs, nil return m.dedicatedImageFs, nil
} }
@ -81,7 +82,7 @@ type mockDiskGC struct {
} }
// DeleteUnusedImages returns the mocked values. // DeleteUnusedImages returns the mocked values.
func (m *mockDiskGC) DeleteUnusedImages() error { func (m *mockDiskGC) DeleteUnusedImages(_ context.Context) error {
m.imageGCInvoked = true m.imageGCInvoked = true
if m.summaryAfterGC != nil && m.fakeSummaryProvider != nil { if m.summaryAfterGC != nil && m.fakeSummaryProvider != nil {
m.fakeSummaryProvider.result = m.summaryAfterGC m.fakeSummaryProvider.result = m.summaryAfterGC
@ -90,7 +91,7 @@ func (m *mockDiskGC) DeleteUnusedImages() error {
} }
// DeleteAllUnusedContainers returns the mocked value // DeleteAllUnusedContainers returns the mocked value
func (m *mockDiskGC) DeleteAllUnusedContainers() error { func (m *mockDiskGC) DeleteAllUnusedContainers(_ context.Context) error {
m.containerGCInvoked = true m.containerGCInvoked = true
if m.summaryAfterGC != nil && m.fakeSummaryProvider != nil { if m.summaryAfterGC != nil && m.fakeSummaryProvider != nil {
m.fakeSummaryProvider.result = m.summaryAfterGC m.fakeSummaryProvider.result = m.summaryAfterGC

View File

@ -17,6 +17,7 @@ limitations under the License.
package eviction package eviction
import ( import (
"context"
"fmt" "fmt"
"reflect" "reflect"
"sort" "sort"
@ -1185,11 +1186,11 @@ type fakeSummaryProvider struct {
result *statsapi.Summary result *statsapi.Summary
} }
func (f *fakeSummaryProvider) Get(updateStats bool) (*statsapi.Summary, error) { func (f *fakeSummaryProvider) Get(ctx context.Context, updateStats bool) (*statsapi.Summary, error) {
return f.result, nil return f.result, nil
} }
func (f *fakeSummaryProvider) GetCPUAndMemoryStats() (*statsapi.Summary, error) { func (f *fakeSummaryProvider) GetCPUAndMemoryStats(ctx context.Context) (*statsapi.Summary, error) {
return f.result, nil return f.result, nil
} }

View File

@ -21,6 +21,7 @@ limitations under the License.
package eviction package eviction
import ( import (
context "context"
reflect "reflect" reflect "reflect"
time "time" time "time"
@ -129,18 +130,18 @@ func (m *MockDiskInfoProvider) EXPECT() *MockDiskInfoProviderMockRecorder {
} }
// HasDedicatedImageFs mocks base method. // HasDedicatedImageFs mocks base method.
func (m *MockDiskInfoProvider) HasDedicatedImageFs() (bool, error) { func (m *MockDiskInfoProvider) HasDedicatedImageFs(ctx context.Context) (bool, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "HasDedicatedImageFs") ret := m.ctrl.Call(m, "HasDedicatedImageFs", ctx)
ret0, _ := ret[0].(bool) ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// HasDedicatedImageFs indicates an expected call of HasDedicatedImageFs. // HasDedicatedImageFs indicates an expected call of HasDedicatedImageFs.
func (mr *MockDiskInfoProviderMockRecorder) HasDedicatedImageFs() *gomock.Call { func (mr *MockDiskInfoProviderMockRecorder) HasDedicatedImageFs(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasDedicatedImageFs", reflect.TypeOf((*MockDiskInfoProvider)(nil).HasDedicatedImageFs)) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasDedicatedImageFs", reflect.TypeOf((*MockDiskInfoProvider)(nil).HasDedicatedImageFs), ctx)
} }
// MockImageGC is a mock of ImageGC interface. // MockImageGC is a mock of ImageGC interface.
@ -167,17 +168,17 @@ func (m *MockImageGC) EXPECT() *MockImageGCMockRecorder {
} }
// DeleteUnusedImages mocks base method. // DeleteUnusedImages mocks base method.
func (m *MockImageGC) DeleteUnusedImages() error { func (m *MockImageGC) DeleteUnusedImages(ctx context.Context) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteUnusedImages") ret := m.ctrl.Call(m, "DeleteUnusedImages", ctx)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
return ret0 return ret0
} }
// DeleteUnusedImages indicates an expected call of DeleteUnusedImages. // DeleteUnusedImages indicates an expected call of DeleteUnusedImages.
func (mr *MockImageGCMockRecorder) DeleteUnusedImages() *gomock.Call { func (mr *MockImageGCMockRecorder) DeleteUnusedImages(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUnusedImages", reflect.TypeOf((*MockImageGC)(nil).DeleteUnusedImages)) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUnusedImages", reflect.TypeOf((*MockImageGC)(nil).DeleteUnusedImages), ctx)
} }
// MockContainerGC is a mock of ContainerGC interface. // MockContainerGC is a mock of ContainerGC interface.
@ -204,17 +205,17 @@ func (m *MockContainerGC) EXPECT() *MockContainerGCMockRecorder {
} }
// DeleteAllUnusedContainers mocks base method. // DeleteAllUnusedContainers mocks base method.
func (m *MockContainerGC) DeleteAllUnusedContainers() error { func (m *MockContainerGC) DeleteAllUnusedContainers(ctx context.Context) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteAllUnusedContainers") ret := m.ctrl.Call(m, "DeleteAllUnusedContainers", ctx)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
return ret0 return ret0
} }
// DeleteAllUnusedContainers indicates an expected call of DeleteAllUnusedContainers. // DeleteAllUnusedContainers indicates an expected call of DeleteAllUnusedContainers.
func (mr *MockContainerGCMockRecorder) DeleteAllUnusedContainers() *gomock.Call { func (mr *MockContainerGCMockRecorder) DeleteAllUnusedContainers(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllUnusedContainers", reflect.TypeOf((*MockContainerGC)(nil).DeleteAllUnusedContainers)) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllUnusedContainers", reflect.TypeOf((*MockContainerGC)(nil).DeleteAllUnusedContainers), ctx)
} }
// MockCgroupNotifier is a mock of CgroupNotifier interface. // MockCgroupNotifier is a mock of CgroupNotifier interface.

View File

@ -18,6 +18,7 @@ limitations under the License.
package eviction package eviction
import ( import (
"context"
"time" "time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -71,19 +72,19 @@ type Manager interface {
// DiskInfoProvider is responsible for informing the manager how disk is configured. // DiskInfoProvider is responsible for informing the manager how disk is configured.
type DiskInfoProvider interface { type DiskInfoProvider interface {
// HasDedicatedImageFs returns true if the imagefs is on a separate device from the rootfs. // HasDedicatedImageFs returns true if the imagefs is on a separate device from the rootfs.
HasDedicatedImageFs() (bool, error) HasDedicatedImageFs(ctx context.Context) (bool, error)
} }
// ImageGC is responsible for performing garbage collection of unused images. // ImageGC is responsible for performing garbage collection of unused images.
type ImageGC interface { type ImageGC interface {
// DeleteUnusedImages deletes unused images. // DeleteUnusedImages deletes unused images.
DeleteUnusedImages() error DeleteUnusedImages(ctx context.Context) error
} }
// ContainerGC is responsible for performing garbage collection of unused containers. // ContainerGC is responsible for performing garbage collection of unused containers.
type ContainerGC interface { type ContainerGC interface {
// DeleteAllUnusedContainers deletes all unused containers, even those that belong to pods that are terminated, but not deleted. // DeleteAllUnusedContainers deletes all unused containers, even those that belong to pods that are terminated, but not deleted.
DeleteAllUnusedContainers() error DeleteAllUnusedContainers(ctx context.Context) error
} }
// KillPodFunc kills a pod. // KillPodFunc kills a pod.
@ -131,7 +132,7 @@ type thresholdsObservedAt map[evictionapi.Threshold]time.Time
type nodeConditionsObservedAt map[v1.NodeConditionType]time.Time type nodeConditionsObservedAt map[v1.NodeConditionType]time.Time
// nodeReclaimFunc is a function that knows how to reclaim a resource from the node without impacting pods. // nodeReclaimFunc is a function that knows how to reclaim a resource from the node without impacting pods.
type nodeReclaimFunc func() error type nodeReclaimFunc func(ctx context.Context) error
// nodeReclaimFuncs is an ordered list of nodeReclaimFunc // nodeReclaimFuncs is an ordered list of nodeReclaimFunc
type nodeReclaimFuncs []nodeReclaimFunc type nodeReclaimFuncs []nodeReclaimFunc

View File

@ -17,9 +17,10 @@ limitations under the License.
package images package images
import ( import (
"context"
"fmt" "fmt"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/flowcontrol"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -43,9 +44,9 @@ type throttledImageService struct {
limiter flowcontrol.RateLimiter limiter flowcontrol.RateLimiter
} }
func (ts throttledImageService) PullImage(image kubecontainer.ImageSpec, secrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { func (ts throttledImageService) PullImage(ctx context.Context, image kubecontainer.ImageSpec, secrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
if ts.limiter.TryAccept() { if ts.limiter.TryAccept() {
return ts.ImageService.PullImage(image, secrets, podSandboxConfig) return ts.ImageService.PullImage(ctx, image, secrets, podSandboxConfig)
} }
return "", fmt.Errorf("pull QPS exceeded") return "", fmt.Errorf("pull QPS exceeded")
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package images package images
import ( import (
"context"
goerrors "errors" goerrors "errors"
"fmt" "fmt"
"math" "math"
@ -24,9 +25,9 @@ import (
"sync" "sync"
"time" "time"
v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
@ -41,7 +42,7 @@ import (
// collection. // collection.
type StatsProvider interface { type StatsProvider interface {
// ImageFsStats returns the stats of the image filesystem. // ImageFsStats returns the stats of the image filesystem.
ImageFsStats() (*statsapi.FsStats, error) ImageFsStats(ctx context.Context) (*statsapi.FsStats, error)
} }
// ImageGCManager is an interface for managing lifecycle of all images. // ImageGCManager is an interface for managing lifecycle of all images.
@ -49,7 +50,7 @@ type StatsProvider interface {
type ImageGCManager interface { type ImageGCManager interface {
// Applies the garbage collection policy. Errors include being unable to free // Applies the garbage collection policy. Errors include being unable to free
// enough space as per the garbage collection policy. // enough space as per the garbage collection policy.
GarbageCollect() error GarbageCollect(ctx context.Context) error
// Start async garbage collection of images. // Start async garbage collection of images.
Start() Start()
@ -57,7 +58,7 @@ type ImageGCManager interface {
GetImageList() ([]container.Image, error) GetImageList() ([]container.Image, error)
// Delete all unused images. // Delete all unused images.
DeleteUnusedImages() error DeleteUnusedImages(ctx context.Context) error
} }
// ImageGCPolicy is a policy for garbage collecting images. Policy defines an allowed band in // ImageGCPolicy is a policy for garbage collecting images. Policy defines an allowed band in
@ -178,13 +179,14 @@ func NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, r
} }
func (im *realImageGCManager) Start() { func (im *realImageGCManager) Start() {
ctx := context.Background()
go wait.Until(func() { go wait.Until(func() {
// Initial detection make detected time "unknown" in the past. // Initial detection make detected time "unknown" in the past.
var ts time.Time var ts time.Time
if im.initialized { if im.initialized {
ts = time.Now() ts = time.Now()
} }
_, err := im.detectImages(ts) _, err := im.detectImages(ctx, ts)
if err != nil { if err != nil {
klog.InfoS("Failed to monitor images", "err", err) klog.InfoS("Failed to monitor images", "err", err)
} else { } else {
@ -194,7 +196,7 @@ func (im *realImageGCManager) Start() {
// Start a goroutine periodically updates image cache. // Start a goroutine periodically updates image cache.
go wait.Until(func() { go wait.Until(func() {
images, err := im.runtime.ListImages() images, err := im.runtime.ListImages(ctx)
if err != nil { if err != nil {
klog.InfoS("Failed to update image list", "err", err) klog.InfoS("Failed to update image list", "err", err)
} else { } else {
@ -209,20 +211,20 @@ func (im *realImageGCManager) GetImageList() ([]container.Image, error) {
return im.imageCache.get(), nil return im.imageCache.get(), nil
} }
func (im *realImageGCManager) detectImages(detectTime time.Time) (sets.String, error) { func (im *realImageGCManager) detectImages(ctx context.Context, detectTime time.Time) (sets.String, error) {
imagesInUse := sets.NewString() imagesInUse := sets.NewString()
// Always consider the container runtime pod sandbox image in use // Always consider the container runtime pod sandbox image in use
imageRef, err := im.runtime.GetImageRef(container.ImageSpec{Image: im.sandboxImage}) imageRef, err := im.runtime.GetImageRef(ctx, container.ImageSpec{Image: im.sandboxImage})
if err == nil && imageRef != "" { if err == nil && imageRef != "" {
imagesInUse.Insert(imageRef) imagesInUse.Insert(imageRef)
} }
images, err := im.runtime.ListImages() images, err := im.runtime.ListImages(ctx)
if err != nil { if err != nil {
return imagesInUse, err return imagesInUse, err
} }
pods, err := im.runtime.GetPods(true) pods, err := im.runtime.GetPods(ctx, true)
if err != nil { if err != nil {
return imagesInUse, err return imagesInUse, err
} }
@ -276,9 +278,9 @@ func (im *realImageGCManager) detectImages(detectTime time.Time) (sets.String, e
return imagesInUse, nil return imagesInUse, nil
} }
func (im *realImageGCManager) GarbageCollect() error { func (im *realImageGCManager) GarbageCollect(ctx context.Context) error {
// Get disk usage on disk holding images. // Get disk usage on disk holding images.
fsStats, err := im.statsProvider.ImageFsStats() fsStats, err := im.statsProvider.ImageFsStats(ctx)
if err != nil { if err != nil {
return err return err
} }
@ -308,7 +310,7 @@ func (im *realImageGCManager) GarbageCollect() error {
if usagePercent >= im.policy.HighThresholdPercent { if usagePercent >= im.policy.HighThresholdPercent {
amountToFree := capacity*int64(100-im.policy.LowThresholdPercent)/100 - available amountToFree := capacity*int64(100-im.policy.LowThresholdPercent)/100 - available
klog.InfoS("Disk usage on image filesystem is over the high threshold, trying to free bytes down to the low threshold", "usage", usagePercent, "highThreshold", im.policy.HighThresholdPercent, "amountToFree", amountToFree, "lowThreshold", im.policy.LowThresholdPercent) klog.InfoS("Disk usage on image filesystem is over the high threshold, trying to free bytes down to the low threshold", "usage", usagePercent, "highThreshold", im.policy.HighThresholdPercent, "amountToFree", amountToFree, "lowThreshold", im.policy.LowThresholdPercent)
freed, err := im.freeSpace(amountToFree, time.Now()) freed, err := im.freeSpace(ctx, amountToFree, time.Now())
if err != nil { if err != nil {
return err return err
} }
@ -323,9 +325,9 @@ func (im *realImageGCManager) GarbageCollect() error {
return nil return nil
} }
func (im *realImageGCManager) DeleteUnusedImages() error { func (im *realImageGCManager) DeleteUnusedImages(ctx context.Context) error {
klog.InfoS("Attempting to delete unused images") klog.InfoS("Attempting to delete unused images")
_, err := im.freeSpace(math.MaxInt64, time.Now()) _, err := im.freeSpace(ctx, math.MaxInt64, time.Now())
return err return err
} }
@ -335,8 +337,8 @@ func (im *realImageGCManager) DeleteUnusedImages() error {
// bytes freed is always returned. // bytes freed is always returned.
// Note that error may be nil and the number of bytes free may be less // Note that error may be nil and the number of bytes free may be less
// than bytesToFree. // than bytesToFree.
func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) (int64, error) { func (im *realImageGCManager) freeSpace(ctx context.Context, bytesToFree int64, freeTime time.Time) (int64, error) {
imagesInUse, err := im.detectImages(freeTime) imagesInUse, err := im.detectImages(ctx, freeTime)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -385,7 +387,7 @@ func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) (
// Remove image. Continue despite errors. // Remove image. Continue despite errors.
klog.InfoS("Removing image to free bytes", "imageID", image.id, "size", image.size) klog.InfoS("Removing image to free bytes", "imageID", image.id, "size", image.size)
err := im.runtime.RemoveImage(container.ImageSpec{Image: image.id}) err := im.runtime.RemoveImage(ctx, container.ImageSpec{Image: image.id})
if err != nil { if err != nil {
deletionErrors = append(deletionErrors, err) deletionErrors = append(deletionErrors, err)
continue continue

View File

@ -17,6 +17,7 @@ limitations under the License.
package images package images
import ( import (
"context"
"fmt" "fmt"
"testing" "testing"
"time" "time"
@ -91,6 +92,7 @@ func makeContainer(id int) *container.Container {
} }
func TestDetectImagesInitialDetect(t *testing.T) { func TestDetectImagesInitialDetect(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() defer mockCtrl.Finish()
mockStatsProvider := statstest.NewMockProvider(mockCtrl) mockStatsProvider := statstest.NewMockProvider(mockCtrl)
@ -119,7 +121,7 @@ func TestDetectImagesInitialDetect(t *testing.T) {
} }
startTime := time.Now().Add(-time.Millisecond) startTime := time.Now().Add(-time.Millisecond)
_, err := manager.detectImages(zero) _, err := manager.detectImages(ctx, zero)
assert := assert.New(t) assert := assert.New(t)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(manager.imageRecordsLen(), 3) assert.Equal(manager.imageRecordsLen(), 3)
@ -138,6 +140,7 @@ func TestDetectImagesInitialDetect(t *testing.T) {
} }
func TestDetectImagesWithNewImage(t *testing.T) { func TestDetectImagesWithNewImage(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() defer mockCtrl.Finish()
mockStatsProvider := statstest.NewMockProvider(mockCtrl) mockStatsProvider := statstest.NewMockProvider(mockCtrl)
@ -156,7 +159,7 @@ func TestDetectImagesWithNewImage(t *testing.T) {
}}, }},
} }
_, err := manager.detectImages(zero) _, err := manager.detectImages(ctx, zero)
assert := assert.New(t) assert := assert.New(t)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(manager.imageRecordsLen(), 2) assert.Equal(manager.imageRecordsLen(), 2)
@ -170,7 +173,7 @@ func TestDetectImagesWithNewImage(t *testing.T) {
detectedTime := zero.Add(time.Second) detectedTime := zero.Add(time.Second)
startTime := time.Now().Add(-time.Millisecond) startTime := time.Now().Add(-time.Millisecond)
_, err = manager.detectImages(detectedTime) _, err = manager.detectImages(ctx, detectedTime)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(manager.imageRecordsLen(), 3) assert.Equal(manager.imageRecordsLen(), 3)
noContainer, ok := manager.getImageRecord(imageID(0)) noContainer, ok := manager.getImageRecord(imageID(0))
@ -188,6 +191,7 @@ func TestDetectImagesWithNewImage(t *testing.T) {
} }
func TestDeleteUnusedImagesExemptSandboxImage(t *testing.T) { func TestDeleteUnusedImagesExemptSandboxImage(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() defer mockCtrl.Finish()
mockStatsProvider := statstest.NewMockProvider(mockCtrl) mockStatsProvider := statstest.NewMockProvider(mockCtrl)
@ -200,13 +204,14 @@ func TestDeleteUnusedImagesExemptSandboxImage(t *testing.T) {
}, },
} }
err := manager.DeleteUnusedImages() err := manager.DeleteUnusedImages(ctx)
assert := assert.New(t) assert := assert.New(t)
assert.Len(fakeRuntime.ImageList, 1) assert.Len(fakeRuntime.ImageList, 1)
require.NoError(t, err) require.NoError(t, err)
} }
func TestDeletePinnedImage(t *testing.T) { func TestDeletePinnedImage(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
mockStatsProvider := statstest.NewMockProvider(mockCtrl) mockStatsProvider := statstest.NewMockProvider(mockCtrl)
@ -223,13 +228,14 @@ func TestDeletePinnedImage(t *testing.T) {
}, },
} }
err := manager.DeleteUnusedImages() err := manager.DeleteUnusedImages(ctx)
assert := assert.New(t) assert := assert.New(t)
assert.Len(fakeRuntime.ImageList, 2) assert.Len(fakeRuntime.ImageList, 2)
require.NoError(t, err) require.NoError(t, err)
} }
func TestDoNotDeletePinnedImage(t *testing.T) { func TestDoNotDeletePinnedImage(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
mockStatsProvider := statstest.NewMockProvider(mockCtrl) mockStatsProvider := statstest.NewMockProvider(mockCtrl)
@ -246,7 +252,7 @@ func TestDoNotDeletePinnedImage(t *testing.T) {
}, },
} }
spaceFreed, err := manager.freeSpace(4096, time.Now()) spaceFreed, err := manager.freeSpace(ctx, 4096, time.Now())
assert := assert.New(t) assert := assert.New(t)
require.NoError(t, err) require.NoError(t, err)
assert.EqualValues(1024, spaceFreed) assert.EqualValues(1024, spaceFreed)
@ -254,6 +260,7 @@ func TestDoNotDeletePinnedImage(t *testing.T) {
} }
func TestDeleteUnPinnedImage(t *testing.T) { func TestDeleteUnPinnedImage(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
mockStatsProvider := statstest.NewMockProvider(mockCtrl) mockStatsProvider := statstest.NewMockProvider(mockCtrl)
@ -270,7 +277,7 @@ func TestDeleteUnPinnedImage(t *testing.T) {
}, },
} }
spaceFreed, err := manager.freeSpace(2048, time.Now()) spaceFreed, err := manager.freeSpace(ctx, 2048, time.Now())
assert := assert.New(t) assert := assert.New(t)
require.NoError(t, err) require.NoError(t, err)
assert.EqualValues(2048, spaceFreed) assert.EqualValues(2048, spaceFreed)
@ -278,6 +285,7 @@ func TestDeleteUnPinnedImage(t *testing.T) {
} }
func TestAllPinnedImages(t *testing.T) { func TestAllPinnedImages(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
mockStatsProvider := statstest.NewMockProvider(mockCtrl) mockStatsProvider := statstest.NewMockProvider(mockCtrl)
@ -295,7 +303,7 @@ func TestAllPinnedImages(t *testing.T) {
}, },
} }
spaceFreed, err := manager.freeSpace(2048, time.Now()) spaceFreed, err := manager.freeSpace(ctx, 2048, time.Now())
assert := assert.New(t) assert := assert.New(t)
require.NoError(t, err) require.NoError(t, err)
assert.EqualValues(0, spaceFreed) assert.EqualValues(0, spaceFreed)
@ -303,6 +311,7 @@ func TestAllPinnedImages(t *testing.T) {
} }
func TestDetectImagesContainerStopped(t *testing.T) { func TestDetectImagesContainerStopped(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() defer mockCtrl.Finish()
mockStatsProvider := statstest.NewMockProvider(mockCtrl) mockStatsProvider := statstest.NewMockProvider(mockCtrl)
@ -320,7 +329,7 @@ func TestDetectImagesContainerStopped(t *testing.T) {
}}, }},
} }
_, err := manager.detectImages(zero) _, err := manager.detectImages(ctx, zero)
assert := assert.New(t) assert := assert.New(t)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(manager.imageRecordsLen(), 2) assert.Equal(manager.imageRecordsLen(), 2)
@ -329,7 +338,7 @@ func TestDetectImagesContainerStopped(t *testing.T) {
// Simulate container being stopped. // Simulate container being stopped.
fakeRuntime.AllPodList = []*containertest.FakePod{} fakeRuntime.AllPodList = []*containertest.FakePod{}
_, err = manager.detectImages(time.Now()) _, err = manager.detectImages(ctx, time.Now())
require.NoError(t, err) require.NoError(t, err)
assert.Equal(manager.imageRecordsLen(), 2) assert.Equal(manager.imageRecordsLen(), 2)
container1, ok := manager.getImageRecord(imageID(0)) container1, ok := manager.getImageRecord(imageID(0))
@ -343,6 +352,7 @@ func TestDetectImagesContainerStopped(t *testing.T) {
} }
func TestDetectImagesWithRemovedImages(t *testing.T) { func TestDetectImagesWithRemovedImages(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() defer mockCtrl.Finish()
mockStatsProvider := statstest.NewMockProvider(mockCtrl) mockStatsProvider := statstest.NewMockProvider(mockCtrl)
@ -360,19 +370,20 @@ func TestDetectImagesWithRemovedImages(t *testing.T) {
}}, }},
} }
_, err := manager.detectImages(zero) _, err := manager.detectImages(ctx, zero)
assert := assert.New(t) assert := assert.New(t)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(manager.imageRecordsLen(), 2) assert.Equal(manager.imageRecordsLen(), 2)
// Simulate both images being removed. // Simulate both images being removed.
fakeRuntime.ImageList = []container.Image{} fakeRuntime.ImageList = []container.Image{}
_, err = manager.detectImages(time.Now()) _, err = manager.detectImages(ctx, time.Now())
require.NoError(t, err) require.NoError(t, err)
assert.Equal(manager.imageRecordsLen(), 0) assert.Equal(manager.imageRecordsLen(), 0)
} }
func TestFreeSpaceImagesInUseContainersAreIgnored(t *testing.T) { func TestFreeSpaceImagesInUseContainersAreIgnored(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() defer mockCtrl.Finish()
mockStatsProvider := statstest.NewMockProvider(mockCtrl) mockStatsProvider := statstest.NewMockProvider(mockCtrl)
@ -390,7 +401,7 @@ func TestFreeSpaceImagesInUseContainersAreIgnored(t *testing.T) {
}}, }},
} }
spaceFreed, err := manager.freeSpace(2048, time.Now()) spaceFreed, err := manager.freeSpace(ctx, 2048, time.Now())
assert := assert.New(t) assert := assert.New(t)
require.NoError(t, err) require.NoError(t, err)
assert.EqualValues(1024, spaceFreed) assert.EqualValues(1024, spaceFreed)
@ -398,6 +409,7 @@ func TestFreeSpaceImagesInUseContainersAreIgnored(t *testing.T) {
} }
func TestDeleteUnusedImagesRemoveAllUnusedImages(t *testing.T) { func TestDeleteUnusedImagesRemoveAllUnusedImages(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() defer mockCtrl.Finish()
mockStatsProvider := statstest.NewMockProvider(mockCtrl) mockStatsProvider := statstest.NewMockProvider(mockCtrl)
@ -416,13 +428,14 @@ func TestDeleteUnusedImagesRemoveAllUnusedImages(t *testing.T) {
}}, }},
} }
err := manager.DeleteUnusedImages() err := manager.DeleteUnusedImages(ctx)
assert := assert.New(t) assert := assert.New(t)
require.NoError(t, err) require.NoError(t, err)
assert.Len(fakeRuntime.ImageList, 1) assert.Len(fakeRuntime.ImageList, 1)
} }
func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) { func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() defer mockCtrl.Finish()
mockStatsProvider := statstest.NewMockProvider(mockCtrl) mockStatsProvider := statstest.NewMockProvider(mockCtrl)
@ -442,7 +455,7 @@ func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) {
} }
// Make 1 be more recently used than 0. // Make 1 be more recently used than 0.
_, err := manager.detectImages(zero) _, err := manager.detectImages(ctx, zero)
require.NoError(t, err) require.NoError(t, err)
fakeRuntime.AllPodList = []*containertest.FakePod{ fakeRuntime.AllPodList = []*containertest.FakePod{
{Pod: &container.Pod{ {Pod: &container.Pod{
@ -451,20 +464,20 @@ func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) {
}, },
}}, }},
} }
_, err = manager.detectImages(time.Now()) _, err = manager.detectImages(ctx, time.Now())
require.NoError(t, err) require.NoError(t, err)
fakeRuntime.AllPodList = []*containertest.FakePod{ fakeRuntime.AllPodList = []*containertest.FakePod{
{Pod: &container.Pod{ {Pod: &container.Pod{
Containers: []*container.Container{}, Containers: []*container.Container{},
}}, }},
} }
_, err = manager.detectImages(time.Now()) _, err = manager.detectImages(ctx, time.Now())
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, manager.imageRecordsLen(), 2) require.Equal(t, manager.imageRecordsLen(), 2)
// We're setting the delete time one minute in the future, so the time the image // We're setting the delete time one minute in the future, so the time the image
// was first detected and the delete time are different. // was first detected and the delete time are different.
spaceFreed, err := manager.freeSpace(1024, time.Now().Add(time.Minute)) spaceFreed, err := manager.freeSpace(ctx, 1024, time.Now().Add(time.Minute))
assert := assert.New(t) assert := assert.New(t)
require.NoError(t, err) require.NoError(t, err)
assert.EqualValues(1024, spaceFreed) assert.EqualValues(1024, spaceFreed)
@ -472,6 +485,7 @@ func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) {
} }
func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) { func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() defer mockCtrl.Finish()
mockStatsProvider := statstest.NewMockProvider(mockCtrl) mockStatsProvider := statstest.NewMockProvider(mockCtrl)
@ -489,20 +503,20 @@ func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) {
} }
// Make 1 more recently detected but used at the same time as 0. // Make 1 more recently detected but used at the same time as 0.
_, err := manager.detectImages(zero) _, err := manager.detectImages(ctx, zero)
require.NoError(t, err) require.NoError(t, err)
fakeRuntime.ImageList = []container.Image{ fakeRuntime.ImageList = []container.Image{
makeImage(0, 1024), makeImage(0, 1024),
makeImage(1, 2048), makeImage(1, 2048),
} }
_, err = manager.detectImages(time.Now()) _, err = manager.detectImages(ctx, time.Now())
require.NoError(t, err) require.NoError(t, err)
fakeRuntime.AllPodList = []*containertest.FakePod{} fakeRuntime.AllPodList = []*containertest.FakePod{}
_, err = manager.detectImages(time.Now()) _, err = manager.detectImages(ctx, time.Now())
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, manager.imageRecordsLen(), 2) require.Equal(t, manager.imageRecordsLen(), 2)
spaceFreed, err := manager.freeSpace(1024, time.Now()) spaceFreed, err := manager.freeSpace(ctx, 1024, time.Now())
assert := assert.New(t) assert := assert.New(t)
require.NoError(t, err) require.NoError(t, err)
assert.EqualValues(2048, spaceFreed) assert.EqualValues(2048, spaceFreed)
@ -510,6 +524,7 @@ func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) {
} }
func TestGarbageCollectBelowLowThreshold(t *testing.T) { func TestGarbageCollectBelowLowThreshold(t *testing.T) {
ctx := context.Background()
policy := ImageGCPolicy{ policy := ImageGCPolicy{
HighThresholdPercent: 90, HighThresholdPercent: 90,
LowThresholdPercent: 80, LowThresholdPercent: 80,
@ -520,15 +535,16 @@ func TestGarbageCollectBelowLowThreshold(t *testing.T) {
manager, _ := newRealImageGCManager(policy, mockStatsProvider) manager, _ := newRealImageGCManager(policy, mockStatsProvider)
// Expect 40% usage. // Expect 40% usage.
mockStatsProvider.EXPECT().ImageFsStats().Return(&statsapi.FsStats{ mockStatsProvider.EXPECT().ImageFsStats(ctx).Return(&statsapi.FsStats{
AvailableBytes: uint64Ptr(600), AvailableBytes: uint64Ptr(600),
CapacityBytes: uint64Ptr(1000), CapacityBytes: uint64Ptr(1000),
}, nil) }, nil)
assert.NoError(t, manager.GarbageCollect()) assert.NoError(t, manager.GarbageCollect(ctx))
} }
func TestGarbageCollectCadvisorFailure(t *testing.T) { func TestGarbageCollectCadvisorFailure(t *testing.T) {
ctx := context.Background()
policy := ImageGCPolicy{ policy := ImageGCPolicy{
HighThresholdPercent: 90, HighThresholdPercent: 90,
LowThresholdPercent: 80, LowThresholdPercent: 80,
@ -538,11 +554,12 @@ func TestGarbageCollectCadvisorFailure(t *testing.T) {
mockStatsProvider := statstest.NewMockProvider(mockCtrl) mockStatsProvider := statstest.NewMockProvider(mockCtrl)
manager, _ := newRealImageGCManager(policy, mockStatsProvider) manager, _ := newRealImageGCManager(policy, mockStatsProvider)
mockStatsProvider.EXPECT().ImageFsStats().Return(&statsapi.FsStats{}, fmt.Errorf("error")) mockStatsProvider.EXPECT().ImageFsStats(ctx).Return(&statsapi.FsStats{}, fmt.Errorf("error"))
assert.NotNil(t, manager.GarbageCollect()) assert.NotNil(t, manager.GarbageCollect(ctx))
} }
func TestGarbageCollectBelowSuccess(t *testing.T) { func TestGarbageCollectBelowSuccess(t *testing.T) {
ctx := context.Background()
policy := ImageGCPolicy{ policy := ImageGCPolicy{
HighThresholdPercent: 90, HighThresholdPercent: 90,
LowThresholdPercent: 80, LowThresholdPercent: 80,
@ -554,7 +571,7 @@ func TestGarbageCollectBelowSuccess(t *testing.T) {
manager, fakeRuntime := newRealImageGCManager(policy, mockStatsProvider) manager, fakeRuntime := newRealImageGCManager(policy, mockStatsProvider)
// Expect 95% usage and most of it gets freed. // Expect 95% usage and most of it gets freed.
mockStatsProvider.EXPECT().ImageFsStats().Return(&statsapi.FsStats{ mockStatsProvider.EXPECT().ImageFsStats(ctx).Return(&statsapi.FsStats{
AvailableBytes: uint64Ptr(50), AvailableBytes: uint64Ptr(50),
CapacityBytes: uint64Ptr(1000), CapacityBytes: uint64Ptr(1000),
}, nil) }, nil)
@ -562,10 +579,11 @@ func TestGarbageCollectBelowSuccess(t *testing.T) {
makeImage(0, 450), makeImage(0, 450),
} }
assert.NoError(t, manager.GarbageCollect()) assert.NoError(t, manager.GarbageCollect(ctx))
} }
func TestGarbageCollectNotEnoughFreed(t *testing.T) { func TestGarbageCollectNotEnoughFreed(t *testing.T) {
ctx := context.Background()
policy := ImageGCPolicy{ policy := ImageGCPolicy{
HighThresholdPercent: 90, HighThresholdPercent: 90,
LowThresholdPercent: 80, LowThresholdPercent: 80,
@ -576,7 +594,7 @@ func TestGarbageCollectNotEnoughFreed(t *testing.T) {
manager, fakeRuntime := newRealImageGCManager(policy, mockStatsProvider) manager, fakeRuntime := newRealImageGCManager(policy, mockStatsProvider)
// Expect 95% usage and little of it gets freed. // Expect 95% usage and little of it gets freed.
mockStatsProvider.EXPECT().ImageFsStats().Return(&statsapi.FsStats{ mockStatsProvider.EXPECT().ImageFsStats(ctx).Return(&statsapi.FsStats{
AvailableBytes: uint64Ptr(50), AvailableBytes: uint64Ptr(50),
CapacityBytes: uint64Ptr(1000), CapacityBytes: uint64Ptr(1000),
}, nil) }, nil)
@ -584,10 +602,11 @@ func TestGarbageCollectNotEnoughFreed(t *testing.T) {
makeImage(0, 50), makeImage(0, 50),
} }
assert.NotNil(t, manager.GarbageCollect()) assert.NotNil(t, manager.GarbageCollect(ctx))
} }
func TestGarbageCollectImageNotOldEnough(t *testing.T) { func TestGarbageCollectImageNotOldEnough(t *testing.T) {
ctx := context.Background()
policy := ImageGCPolicy{ policy := ImageGCPolicy{
HighThresholdPercent: 90, HighThresholdPercent: 90,
LowThresholdPercent: 80, LowThresholdPercent: 80,
@ -620,11 +639,11 @@ func TestGarbageCollectImageNotOldEnough(t *testing.T) {
fakeClock := testingclock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
t.Log(fakeClock.Now()) t.Log(fakeClock.Now())
_, err := manager.detectImages(fakeClock.Now()) _, err := manager.detectImages(ctx, fakeClock.Now())
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, manager.imageRecordsLen(), 2) require.Equal(t, manager.imageRecordsLen(), 2)
// no space freed since one image is in used, and another one is not old enough // no space freed since one image is in used, and another one is not old enough
spaceFreed, err := manager.freeSpace(1024, fakeClock.Now()) spaceFreed, err := manager.freeSpace(ctx, 1024, fakeClock.Now())
assert := assert.New(t) assert := assert.New(t)
require.NoError(t, err) require.NoError(t, err)
assert.EqualValues(0, spaceFreed) assert.EqualValues(0, spaceFreed)
@ -632,7 +651,7 @@ func TestGarbageCollectImageNotOldEnough(t *testing.T) {
// move clock by minAge duration, then 1 image will be garbage collected // move clock by minAge duration, then 1 image will be garbage collected
fakeClock.Step(policy.MinAge) fakeClock.Step(policy.MinAge)
spaceFreed, err = manager.freeSpace(1024, fakeClock.Now()) spaceFreed, err = manager.freeSpace(ctx, 1024, fakeClock.Now())
require.NoError(t, err) require.NoError(t, err)
assert.EqualValues(1024, spaceFreed) assert.EqualValues(1024, spaceFreed)
assert.Len(fakeRuntime.ImageList, 1) assert.Len(fakeRuntime.ImageList, 1)

View File

@ -17,6 +17,7 @@ limitations under the License.
package images package images
import ( import (
"context"
"fmt" "fmt"
"time" "time"
@ -86,7 +87,7 @@ func (m *imageManager) logIt(ref *v1.ObjectReference, eventtype, event, prefix,
// EnsureImageExists pulls the image for the specified pod and container, and returns // EnsureImageExists pulls the image for the specified pod and container, and returns
// (imageRef, error message, error). // (imageRef, error message, error).
func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, string, error) { func (m *imageManager) EnsureImageExists(ctx context.Context, pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, string, error) {
logPrefix := fmt.Sprintf("%s/%s/%s", pod.Namespace, pod.Name, container.Image) logPrefix := fmt.Sprintf("%s/%s/%s", pod.Namespace, pod.Name, container.Image)
ref, err := kubecontainer.GenerateContainerRef(pod, container) ref, err := kubecontainer.GenerateContainerRef(pod, container)
if err != nil { if err != nil {
@ -113,7 +114,7 @@ func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, p
Image: image, Image: image,
Annotations: podAnnotations, Annotations: podAnnotations,
} }
imageRef, err := m.imageService.GetImageRef(spec) imageRef, err := m.imageService.GetImageRef(ctx, spec)
if err != nil { if err != nil {
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err) msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, klog.Warning) m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, klog.Warning)
@ -141,7 +142,7 @@ func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, p
m.logIt(ref, v1.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("Pulling image %q", container.Image), klog.Info) m.logIt(ref, v1.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("Pulling image %q", container.Image), klog.Info)
startTime := time.Now() startTime := time.Now()
pullChan := make(chan pullResult) pullChan := make(chan pullResult)
m.puller.pullImage(spec, pullSecrets, pullChan, podSandboxConfig) m.puller.pullImage(ctx, spec, pullSecrets, pullChan, podSandboxConfig)
imagePullResult := <-pullChan imagePullResult := <-pullChan
if imagePullResult.err != nil { if imagePullResult.err != nil {
m.logIt(ref, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, imagePullResult.err), klog.Warning) m.logIt(ref, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, imagePullResult.err), klog.Warning)

View File

@ -17,6 +17,7 @@ limitations under the License.
package images package images
import ( import (
"context"
"errors" "errors"
"testing" "testing"
"time" "time"
@ -196,10 +197,11 @@ func TestParallelPuller(t *testing.T) {
puller, fakeClock, fakeRuntime, container := pullerTestEnv(c, useSerializedEnv) puller, fakeClock, fakeRuntime, container := pullerTestEnv(c, useSerializedEnv)
t.Run(c.testName, func(t *testing.T) { t.Run(c.testName, func(t *testing.T) {
ctx := context.Background()
for _, expected := range c.expected { for _, expected := range c.expected {
fakeRuntime.CalledFunctions = nil fakeRuntime.CalledFunctions = nil
fakeClock.Step(time.Second) fakeClock.Step(time.Second)
_, _, err := puller.EnsureImageExists(pod, container, nil, nil) _, _, err := puller.EnsureImageExists(ctx, pod, container, nil, nil)
fakeRuntime.AssertCalls(expected.calls) fakeRuntime.AssertCalls(expected.calls)
assert.Equal(t, expected.err, err) assert.Equal(t, expected.err, err)
} }
@ -223,10 +225,11 @@ func TestSerializedPuller(t *testing.T) {
puller, fakeClock, fakeRuntime, container := pullerTestEnv(c, useSerializedEnv) puller, fakeClock, fakeRuntime, container := pullerTestEnv(c, useSerializedEnv)
t.Run(c.testName, func(t *testing.T) { t.Run(c.testName, func(t *testing.T) {
ctx := context.Background()
for _, expected := range c.expected { for _, expected := range c.expected {
fakeRuntime.CalledFunctions = nil fakeRuntime.CalledFunctions = nil
fakeClock.Step(time.Second) fakeClock.Step(time.Second)
_, _, err := puller.EnsureImageExists(pod, container, nil, nil) _, _, err := puller.EnsureImageExists(ctx, pod, container, nil, nil)
fakeRuntime.AssertCalls(expected.calls) fakeRuntime.AssertCalls(expected.calls)
assert.Equal(t, expected.err, err) assert.Equal(t, expected.err, err)
} }
@ -283,11 +286,12 @@ func TestPullAndListImageWithPodAnnotations(t *testing.T) {
fakeClock.Step(time.Second) fakeClock.Step(time.Second)
t.Run(c.testName, func(t *testing.T) { t.Run(c.testName, func(t *testing.T) {
_, _, err := puller.EnsureImageExists(pod, container, nil, nil) ctx := context.Background()
_, _, err := puller.EnsureImageExists(ctx, pod, container, nil, nil)
fakeRuntime.AssertCalls(c.expected[0].calls) fakeRuntime.AssertCalls(c.expected[0].calls)
assert.Equal(t, c.expected[0].err, err, "tick=%d", 0) assert.Equal(t, c.expected[0].err, err, "tick=%d", 0)
images, _ := fakeRuntime.ListImages() images, _ := fakeRuntime.ListImages(ctx)
assert.Equal(t, 1, len(images), "ListImages() count") assert.Equal(t, 1, len(images), "ListImages() count")
image := images[0] image := images[0]

View File

@ -17,9 +17,10 @@ limitations under the License.
package images package images
import ( import (
"context"
"time" "time"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -31,7 +32,7 @@ type pullResult struct {
} }
type imagePuller interface { type imagePuller interface {
pullImage(kubecontainer.ImageSpec, []v1.Secret, chan<- pullResult, *runtimeapi.PodSandboxConfig) pullImage(context.Context, kubecontainer.ImageSpec, []v1.Secret, chan<- pullResult, *runtimeapi.PodSandboxConfig)
} }
var _, _ imagePuller = &parallelImagePuller{}, &serialImagePuller{} var _, _ imagePuller = &parallelImagePuller{}, &serialImagePuller{}
@ -44,9 +45,9 @@ func newParallelImagePuller(imageService kubecontainer.ImageService) imagePuller
return &parallelImagePuller{imageService} return &parallelImagePuller{imageService}
} }
func (pip *parallelImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult, podSandboxConfig *runtimeapi.PodSandboxConfig) { func (pip *parallelImagePuller) pullImage(ctx context.Context, spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult, podSandboxConfig *runtimeapi.PodSandboxConfig) {
go func() { go func() {
imageRef, err := pip.imageService.PullImage(spec, pullSecrets, podSandboxConfig) imageRef, err := pip.imageService.PullImage(ctx, spec, pullSecrets, podSandboxConfig)
pullChan <- pullResult{ pullChan <- pullResult{
imageRef: imageRef, imageRef: imageRef,
err: err, err: err,
@ -69,14 +70,16 @@ func newSerialImagePuller(imageService kubecontainer.ImageService) imagePuller {
} }
type imagePullRequest struct { type imagePullRequest struct {
ctx context.Context
spec kubecontainer.ImageSpec spec kubecontainer.ImageSpec
pullSecrets []v1.Secret pullSecrets []v1.Secret
pullChan chan<- pullResult pullChan chan<- pullResult
podSandboxConfig *runtimeapi.PodSandboxConfig podSandboxConfig *runtimeapi.PodSandboxConfig
} }
func (sip *serialImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult, podSandboxConfig *runtimeapi.PodSandboxConfig) { func (sip *serialImagePuller) pullImage(ctx context.Context, spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult, podSandboxConfig *runtimeapi.PodSandboxConfig) {
sip.pullRequests <- &imagePullRequest{ sip.pullRequests <- &imagePullRequest{
ctx: ctx,
spec: spec, spec: spec,
pullSecrets: pullSecrets, pullSecrets: pullSecrets,
pullChan: pullChan, pullChan: pullChan,
@ -86,7 +89,7 @@ func (sip *serialImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecret
func (sip *serialImagePuller) processImagePullRequests() { func (sip *serialImagePuller) processImagePullRequests() {
for pullRequest := range sip.pullRequests { for pullRequest := range sip.pullRequests {
imageRef, err := sip.imageService.PullImage(pullRequest.spec, pullRequest.pullSecrets, pullRequest.podSandboxConfig) imageRef, err := sip.imageService.PullImage(pullRequest.ctx, pullRequest.spec, pullRequest.pullSecrets, pullRequest.podSandboxConfig)
pullRequest.pullChan <- pullResult{ pullRequest.pullChan <- pullResult{
imageRef: imageRef, imageRef: imageRef,
err: err, err: err,

View File

@ -17,9 +17,10 @@ limitations under the License.
package images package images
import ( import (
"context"
"errors" "errors"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
) )
@ -50,7 +51,7 @@ var (
// Implementations are expected to be thread safe. // Implementations are expected to be thread safe.
type ImageManager interface { type ImageManager interface {
// EnsureImageExists ensures that image specified in `container` exists. // EnsureImageExists ensures that image specified in `container` exists.
EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, string, error) EnsureImageExists(ctx context.Context, pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, string, error)
// TODO(ronl): consolidating image managing and deleting operation in this interface // TODO(ronl): consolidating image managing and deleting operation in this interface
} }

View File

@ -197,7 +197,7 @@ type SyncHandler interface {
HandlePodRemoves(pods []*v1.Pod) HandlePodRemoves(pods []*v1.Pod)
HandlePodReconcile(pods []*v1.Pod) HandlePodReconcile(pods []*v1.Pod)
HandlePodSyncs(pods []*v1.Pod) HandlePodSyncs(pods []*v1.Pod)
HandlePodCleanups() error HandlePodCleanups(ctx context.Context) error
} }
// Option is a functional option type for Kubelet // Option is a functional option type for Kubelet
@ -339,6 +339,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
nodeStatusMaxImages int32, nodeStatusMaxImages int32,
seccompDefault bool, seccompDefault bool,
) (*Kubelet, error) { ) (*Kubelet, error) {
ctx := context.Background()
logger := klog.TODO() logger := klog.TODO()
if rootDirectory == "" { if rootDirectory == "" {
@ -699,7 +700,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
klet.pleg = pleg.NewGenericPLEG(klet.containerRuntime, plegChannelCapacity, plegRelistPeriod, klet.podCache, clock.RealClock{}) klet.pleg = pleg.NewGenericPLEG(klet.containerRuntime, plegChannelCapacity, plegRelistPeriod, klet.podCache, clock.RealClock{})
klet.runtimeState = newRuntimeState(maxWaitForContainerRuntime) klet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
klet.runtimeState.addHealthCheck("PLEG", klet.pleg.Healthy) klet.runtimeState.addHealthCheck("PLEG", klet.pleg.Healthy)
if _, err := klet.updatePodCIDR(kubeCfg.PodCIDR); err != nil { if _, err := klet.updatePodCIDR(ctx, kubeCfg.PodCIDR); err != nil {
klog.ErrorS(err, "Pod CIDR update failed") klog.ErrorS(err, "Pod CIDR update failed")
} }
@ -1120,7 +1121,7 @@ type Kubelet struct {
clock clock.WithTicker clock clock.WithTicker
// handlers called during the tryUpdateNodeStatus cycle // handlers called during the tryUpdateNodeStatus cycle
setNodeStatusFuncs []func(*v1.Node) error setNodeStatusFuncs []func(context.Context, *v1.Node) error
lastNodeUnschedulableLock sync.Mutex lastNodeUnschedulableLock sync.Mutex
// maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus() // maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
@ -1196,23 +1197,23 @@ type Kubelet struct {
} }
// ListPodStats is delegated to StatsProvider, which implements stats.Provider interface // ListPodStats is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) ListPodStats() ([]statsapi.PodStats, error) { func (kl *Kubelet) ListPodStats(ctx context.Context) ([]statsapi.PodStats, error) {
return kl.StatsProvider.ListPodStats() return kl.StatsProvider.ListPodStats(ctx)
} }
// ListPodCPUAndMemoryStats is delegated to StatsProvider, which implements stats.Provider interface // ListPodCPUAndMemoryStats is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error) { func (kl *Kubelet) ListPodCPUAndMemoryStats(ctx context.Context) ([]statsapi.PodStats, error) {
return kl.StatsProvider.ListPodCPUAndMemoryStats() return kl.StatsProvider.ListPodCPUAndMemoryStats(ctx)
} }
// ListPodStatsAndUpdateCPUNanoCoreUsage is delegated to StatsProvider, which implements stats.Provider interface // ListPodStatsAndUpdateCPUNanoCoreUsage is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) ListPodStatsAndUpdateCPUNanoCoreUsage() ([]statsapi.PodStats, error) { func (kl *Kubelet) ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]statsapi.PodStats, error) {
return kl.StatsProvider.ListPodStatsAndUpdateCPUNanoCoreUsage() return kl.StatsProvider.ListPodStatsAndUpdateCPUNanoCoreUsage(ctx)
} }
// ImageFsStats is delegated to StatsProvider, which implements stats.Provider interface // ImageFsStats is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) ImageFsStats() (*statsapi.FsStats, error) { func (kl *Kubelet) ImageFsStats(ctx context.Context) (*statsapi.FsStats, error) {
return kl.StatsProvider.ImageFsStats() return kl.StatsProvider.ImageFsStats(ctx)
} }
// GetCgroupStats is delegated to StatsProvider, which implements stats.Provider interface // GetCgroupStats is delegated to StatsProvider, which implements stats.Provider interface
@ -1231,8 +1232,8 @@ func (kl *Kubelet) RootFsStats() (*statsapi.FsStats, error) {
} }
// GetContainerInfo is delegated to StatsProvider, which implements stats.Provider interface // GetContainerInfo is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { func (kl *Kubelet) GetContainerInfo(ctx context.Context, podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) {
return kl.StatsProvider.GetContainerInfo(podFullName, uid, containerName, req) return kl.StatsProvider.GetContainerInfo(ctx, podFullName, uid, containerName, req)
} }
// GetRawContainerInfo is delegated to StatsProvider, which implements stats.Provider interface // GetRawContainerInfo is delegated to StatsProvider, which implements stats.Provider interface
@ -1295,7 +1296,8 @@ func (kl *Kubelet) setupDataDirs() error {
func (kl *Kubelet) StartGarbageCollection() { func (kl *Kubelet) StartGarbageCollection() {
loggedContainerGCFailure := false loggedContainerGCFailure := false
go wait.Until(func() { go wait.Until(func() {
if err := kl.containerGC.GarbageCollect(); err != nil { ctx := context.Background()
if err := kl.containerGC.GarbageCollect(ctx); err != nil {
klog.ErrorS(err, "Container garbage collection failed") klog.ErrorS(err, "Container garbage collection failed")
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.ContainerGCFailed, err.Error()) kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.ContainerGCFailed, err.Error())
loggedContainerGCFailure = true loggedContainerGCFailure = true
@ -1318,7 +1320,8 @@ func (kl *Kubelet) StartGarbageCollection() {
prevImageGCFailed := false prevImageGCFailed := false
go wait.Until(func() { go wait.Until(func() {
if err := kl.imageManager.GarbageCollect(); err != nil { ctx := context.Background()
if err := kl.imageManager.GarbageCollect(ctx); err != nil {
if prevImageGCFailed { if prevImageGCFailed {
klog.ErrorS(err, "Image garbage collection failed multiple times in a row") klog.ErrorS(err, "Image garbage collection failed multiple times in a row")
// Only create an event for repeated failures // Only create an event for repeated failures
@ -1430,6 +1433,7 @@ func (kl *Kubelet) initializeRuntimeDependentModules() {
// Run starts the kubelet reacting to config updates // Run starts the kubelet reacting to config updates
func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
ctx := context.Background()
if kl.logServer == nil { if kl.logServer == nil {
kl.logServer = http.StripPrefix("/logs/", http.FileServer(http.Dir("/var/log/"))) kl.logServer = http.StripPrefix("/logs/", http.FileServer(http.Dir("/var/log/")))
} }
@ -1478,7 +1482,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
// Start the pod lifecycle event generator. // Start the pod lifecycle event generator.
kl.pleg.Start() kl.pleg.Start()
kl.syncLoop(updates, kl) kl.syncLoop(ctx, updates, kl)
} }
// syncPod is the transaction script for the sync of a single pod (setting up) // syncPod is the transaction script for the sync of a single pod (setting up)
@ -1616,7 +1620,7 @@ func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType
klog.V(2).InfoS("Pod is not runnable and must have running containers stopped", "pod", klog.KObj(pod), "podUID", pod.UID, "message", runnable.Message) klog.V(2).InfoS("Pod is not runnable and must have running containers stopped", "pod", klog.KObj(pod), "podUID", pod.UID, "message", runnable.Message)
var syncErr error var syncErr error
p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus) p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus)
if err := kl.killPod(pod, p, nil); err != nil { if err := kl.killPod(ctx, pod, p, nil); err != nil {
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err) kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err)
syncErr = fmt.Errorf("error killing pod: %v", err) syncErr = fmt.Errorf("error killing pod: %v", err)
utilruntime.HandleError(syncErr) utilruntime.HandleError(syncErr)
@ -1668,7 +1672,7 @@ func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType
podKilled := false podKilled := false
if !pcm.Exists(pod) && !firstSync { if !pcm.Exists(pod) && !firstSync {
p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus) p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus)
if err := kl.killPod(pod, p, nil); err == nil { if err := kl.killPod(ctx, pod, p, nil); err == nil {
podKilled = true podKilled = true
} else { } else {
klog.ErrorS(err, "KillPod failed", "pod", klog.KObj(pod), "podStatus", podStatus) klog.ErrorS(err, "KillPod failed", "pod", klog.KObj(pod), "podStatus", podStatus)
@ -1750,7 +1754,7 @@ func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType
kl.probeManager.AddPod(pod) kl.probeManager.AddPod(pod)
// Call the container runtime's SyncPod callback // Call the container runtime's SyncPod callback
result := kl.containerRuntime.SyncPod(pod, podStatus, pullSecrets, kl.backOff) result := kl.containerRuntime.SyncPod(ctx, pod, podStatus, pullSecrets, kl.backOff)
kl.reasonCache.Update(pod.UID, result) kl.reasonCache.Update(pod.UID, result)
if err := result.Error(); err != nil { if err := result.Error(); err != nil {
// Do not return error if the only failures were pods in backoff // Do not return error if the only failures were pods in backoff
@ -1785,7 +1789,7 @@ func (kl *Kubelet) syncTerminatingPod(ctx context.Context, pod *v1.Pod, podStatu
} else { } else {
klog.V(4).InfoS("Pod terminating with grace period", "pod", klog.KObj(pod), "podUID", pod.UID, "gracePeriod", nil) klog.V(4).InfoS("Pod terminating with grace period", "pod", klog.KObj(pod), "podUID", pod.UID, "gracePeriod", nil)
} }
if err := kl.killPod(pod, *runningPod, gracePeriod); err != nil { if err := kl.killPod(ctx, pod, *runningPod, gracePeriod); err != nil {
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err) kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err)
// there was an error killing the pod, so we return that error directly // there was an error killing the pod, so we return that error directly
utilruntime.HandleError(err) utilruntime.HandleError(err)
@ -1810,7 +1814,7 @@ func (kl *Kubelet) syncTerminatingPod(ctx context.Context, pod *v1.Pod, podStatu
kl.probeManager.StopLivenessAndStartup(pod) kl.probeManager.StopLivenessAndStartup(pod)
p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus) p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus)
if err := kl.killPod(pod, p, gracePeriod); err != nil { if err := kl.killPod(ctx, pod, p, gracePeriod); err != nil {
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err) kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err)
// there was an error killing the pod, so we return that error directly // there was an error killing the pod, so we return that error directly
utilruntime.HandleError(err) utilruntime.HandleError(err)
@ -1828,7 +1832,7 @@ func (kl *Kubelet) syncTerminatingPod(ctx context.Context, pod *v1.Pod, podStatu
// catch race conditions introduced by callers updating pod status out of order. // catch race conditions introduced by callers updating pod status out of order.
// TODO: have KillPod return the terminal status of stopped containers and write that into the // TODO: have KillPod return the terminal status of stopped containers and write that into the
// cache immediately // cache immediately
podStatus, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace) podStatus, err := kl.containerRuntime.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
if err != nil { if err != nil {
klog.ErrorS(err, "Unable to read pod status prior to final pod termination", "pod", klog.KObj(pod), "podUID", pod.UID) klog.ErrorS(err, "Unable to read pod status prior to final pod termination", "pod", klog.KObj(pod), "podUID", pod.UID)
return err return err
@ -2016,7 +2020,7 @@ func (kl *Kubelet) canRunPod(pod *v1.Pod) lifecycle.PodAdmitResult {
// any new change seen, will run a sync against desired state and running state. If // any new change seen, will run a sync against desired state and running state. If
// no changes are seen to the configuration, will synchronize the last known desired // no changes are seen to the configuration, will synchronize the last known desired
// state every sync-frequency seconds. Never returns. // state every sync-frequency seconds. Never returns.
func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHandler) { func (kl *Kubelet) syncLoop(ctx context.Context, updates <-chan kubetypes.PodUpdate, handler SyncHandler) {
klog.InfoS("Starting kubelet main sync loop") klog.InfoS("Starting kubelet main sync loop")
// The syncTicker wakes up kubelet to checks if there are any pod workers // The syncTicker wakes up kubelet to checks if there are any pod workers
// that need to be sync'd. A one-second period is sufficient because the // that need to be sync'd. A one-second period is sufficient because the
@ -2051,7 +2055,7 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand
duration = base duration = base
kl.syncLoopMonitor.Store(kl.clock.Now()) kl.syncLoopMonitor.Store(kl.clock.Now())
if !kl.syncLoopIteration(updates, handler, syncTicker.C, housekeepingTicker.C, plegCh) { if !kl.syncLoopIteration(ctx, updates, handler, syncTicker.C, housekeepingTicker.C, plegCh) {
break break
} }
kl.syncLoopMonitor.Store(kl.clock.Now()) kl.syncLoopMonitor.Store(kl.clock.Now())
@ -2090,7 +2094,7 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand
// - housekeepingCh: trigger cleanup of pods // - housekeepingCh: trigger cleanup of pods
// - health manager: sync pods that have failed or in which one or more // - health manager: sync pods that have failed or in which one or more
// containers have failed health checks // containers have failed health checks
func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handler SyncHandler, func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan kubetypes.PodUpdate, handler SyncHandler,
syncCh <-chan time.Time, housekeepingCh <-chan time.Time, plegCh <-chan *pleg.PodLifecycleEvent) bool { syncCh <-chan time.Time, housekeepingCh <-chan time.Time, plegCh <-chan *pleg.PodLifecycleEvent) bool {
select { select {
case u, open := <-configCh: case u, open := <-configCh:
@ -2186,7 +2190,7 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
} else { } else {
start := time.Now() start := time.Now()
klog.V(4).InfoS("SyncLoop (housekeeping)") klog.V(4).InfoS("SyncLoop (housekeeping)")
if err := handler.HandlePodCleanups(); err != nil { if err := handler.HandlePodCleanups(ctx); err != nil {
klog.ErrorS(err, "Failed cleaning pods") klog.ErrorS(err, "Failed cleaning pods")
} }
duration := time.Since(start) duration := time.Since(start)
@ -2360,8 +2364,9 @@ func (kl *Kubelet) LatestLoopEntryTime() time.Time {
func (kl *Kubelet) updateRuntimeUp() { func (kl *Kubelet) updateRuntimeUp() {
kl.updateRuntimeMux.Lock() kl.updateRuntimeMux.Lock()
defer kl.updateRuntimeMux.Unlock() defer kl.updateRuntimeMux.Unlock()
ctx := context.Background()
s, err := kl.containerRuntime.Status() s, err := kl.containerRuntime.Status(ctx)
if err != nil { if err != nil {
klog.ErrorS(err, "Container runtime sanity check failed") klog.ErrorS(err, "Container runtime sanity check failed")
return return
@ -2445,6 +2450,7 @@ func (kl *Kubelet) cleanUpContainersInPod(podID types.UID, exitedContainerID str
// Function is executed only during Kubelet start which improves latency to ready node by updating // Function is executed only during Kubelet start which improves latency to ready node by updating
// pod CIDR, runtime status and node statuses ASAP. // pod CIDR, runtime status and node statuses ASAP.
func (kl *Kubelet) fastStatusUpdateOnce() { func (kl *Kubelet) fastStatusUpdateOnce() {
ctx := context.Background()
for { for {
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
node, err := kl.GetNode() node, err := kl.GetNode()
@ -2454,7 +2460,7 @@ func (kl *Kubelet) fastStatusUpdateOnce() {
} }
if len(node.Spec.PodCIDRs) != 0 { if len(node.Spec.PodCIDRs) != 0 {
podCIDRs := strings.Join(node.Spec.PodCIDRs, ",") podCIDRs := strings.Join(node.Spec.PodCIDRs, ",")
if _, err := kl.updatePodCIDR(podCIDRs); err != nil { if _, err := kl.updatePodCIDR(ctx, podCIDRs); err != nil {
klog.ErrorS(err, "Pod CIDR update failed", "CIDR", podCIDRs) klog.ErrorS(err, "Pod CIDR update failed", "CIDR", podCIDRs)
continue continue
} }
@ -2471,12 +2477,13 @@ func (kl *Kubelet) fastStatusUpdateOnce() {
// engine will be asked to checkpoint the given container into the kubelet's default // engine will be asked to checkpoint the given container into the kubelet's default
// checkpoint directory. // checkpoint directory.
func (kl *Kubelet) CheckpointContainer( func (kl *Kubelet) CheckpointContainer(
ctx context.Context,
podUID types.UID, podUID types.UID,
podFullName, podFullName,
containerName string, containerName string,
options *runtimeapi.CheckpointContainerRequest, options *runtimeapi.CheckpointContainerRequest,
) error { ) error {
container, err := kl.findContainer(podFullName, podUID, containerName) container, err := kl.findContainer(ctx, podFullName, podUID, containerName)
if err != nil { if err != nil {
return err return err
} }
@ -2496,7 +2503,7 @@ func (kl *Kubelet) CheckpointContainer(
options.ContainerId = string(container.ID.ID) options.ContainerId = string(container.ID.ID)
if err := kl.containerRuntime.CheckpointContainer(options); err != nil { if err := kl.containerRuntime.CheckpointContainer(ctx, options); err != nil {
return err return err
} }

View File

@ -191,8 +191,8 @@ func (kl *Kubelet) GetPods() []*v1.Pod {
// container runtime cache. This function converts kubecontainer.Pod to // container runtime cache. This function converts kubecontainer.Pod to
// v1.Pod, so only the fields that exist in both kubecontainer.Pod and // v1.Pod, so only the fields that exist in both kubecontainer.Pod and
// v1.Pod are considered meaningful. // v1.Pod are considered meaningful.
func (kl *Kubelet) GetRunningPods() ([]*v1.Pod, error) { func (kl *Kubelet) GetRunningPods(ctx context.Context) ([]*v1.Pod, error) {
pods, err := kl.runtimeCache.GetPods() pods, err := kl.runtimeCache.GetPods(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -17,9 +17,10 @@ limitations under the License.
package kubelet package kubelet
import ( import (
"context"
"fmt" "fmt"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
@ -40,7 +41,7 @@ func (kl *Kubelet) providerRequiresNetworkingConfiguration() bool {
// updatePodCIDR updates the pod CIDR in the runtime state if it is different // updatePodCIDR updates the pod CIDR in the runtime state if it is different
// from the current CIDR. Return true if pod CIDR is actually changed. // from the current CIDR. Return true if pod CIDR is actually changed.
func (kl *Kubelet) updatePodCIDR(cidr string) (bool, error) { func (kl *Kubelet) updatePodCIDR(ctx context.Context, cidr string) (bool, error) {
kl.updatePodCIDRMux.Lock() kl.updatePodCIDRMux.Lock()
defer kl.updatePodCIDRMux.Unlock() defer kl.updatePodCIDRMux.Unlock()
@ -52,7 +53,7 @@ func (kl *Kubelet) updatePodCIDR(cidr string) (bool, error) {
// kubelet -> generic runtime -> runtime shim -> network plugin // kubelet -> generic runtime -> runtime shim -> network plugin
// docker/non-cri implementations have a passthrough UpdatePodCIDR // docker/non-cri implementations have a passthrough UpdatePodCIDR
if err := kl.getRuntime().UpdatePodCIDR(cidr); err != nil { if err := kl.getRuntime().UpdatePodCIDR(ctx, cidr); err != nil {
// If updatePodCIDR would fail, theoretically pod CIDR could not change. // If updatePodCIDR would fail, theoretically pod CIDR could not change.
// But it is better to be on the safe side to still return true here. // But it is better to be on the safe side to still return true here.
return true, fmt.Errorf("failed to update pod CIDR: %v", err) return true, fmt.Errorf("failed to update pod CIDR: %v", err)

View File

@ -424,7 +424,7 @@ func (kl *Kubelet) initialNode(ctx context.Context) (*v1.Node, error) {
} }
} }
kl.setNodeStatus(node) kl.setNodeStatus(ctx, node)
return node, nil return node, nil
} }
@ -435,6 +435,7 @@ func (kl *Kubelet) initialNode(ctx context.Context) (*v1.Node, error) {
func (kl *Kubelet) syncNodeStatus() { func (kl *Kubelet) syncNodeStatus() {
kl.syncNodeStatusMux.Lock() kl.syncNodeStatusMux.Lock()
defer kl.syncNodeStatusMux.Unlock() defer kl.syncNodeStatusMux.Unlock()
ctx := context.Background()
if kl.kubeClient == nil || kl.heartbeatClient == nil { if kl.kubeClient == nil || kl.heartbeatClient == nil {
return return
@ -443,17 +444,17 @@ func (kl *Kubelet) syncNodeStatus() {
// This will exit immediately if it doesn't need to do anything. // This will exit immediately if it doesn't need to do anything.
kl.registerWithAPIServer() kl.registerWithAPIServer()
} }
if err := kl.updateNodeStatus(); err != nil { if err := kl.updateNodeStatus(ctx); err != nil {
klog.ErrorS(err, "Unable to update node status") klog.ErrorS(err, "Unable to update node status")
} }
} }
// updateNodeStatus updates node status to master with retries if there is any // updateNodeStatus updates node status to master with retries if there is any
// change or enough time passed from the last sync. // change or enough time passed from the last sync.
func (kl *Kubelet) updateNodeStatus() error { func (kl *Kubelet) updateNodeStatus(ctx context.Context) error {
klog.V(5).InfoS("Updating node status") klog.V(5).InfoS("Updating node status")
for i := 0; i < nodeStatusUpdateRetry; i++ { for i := 0; i < nodeStatusUpdateRetry; i++ {
if err := kl.tryUpdateNodeStatus(i); err != nil { if err := kl.tryUpdateNodeStatus(ctx, i); err != nil {
if i > 0 && kl.onRepeatedHeartbeatFailure != nil { if i > 0 && kl.onRepeatedHeartbeatFailure != nil {
kl.onRepeatedHeartbeatFailure() kl.onRepeatedHeartbeatFailure()
} }
@ -467,7 +468,7 @@ func (kl *Kubelet) updateNodeStatus() error {
// tryUpdateNodeStatus tries to update node status to master if there is any // tryUpdateNodeStatus tries to update node status to master if there is any
// change or enough time passed from the last sync. // change or enough time passed from the last sync.
func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error { func (kl *Kubelet) tryUpdateNodeStatus(ctx context.Context, tryNumber int) error {
// In large clusters, GET and PUT operations on Node objects coming // In large clusters, GET and PUT operations on Node objects coming
// from here are the majority of load on apiserver and etcd. // from here are the majority of load on apiserver and etcd.
// To reduce the load on etcd, we are serving GET operations from // To reduce the load on etcd, we are serving GET operations from
@ -478,7 +479,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
if tryNumber == 0 { if tryNumber == 0 {
util.FromApiserverCache(&opts) util.FromApiserverCache(&opts)
} }
node, err := kl.heartbeatClient.CoreV1().Nodes().Get(context.TODO(), string(kl.nodeName), opts) node, err := kl.heartbeatClient.CoreV1().Nodes().Get(ctx, string(kl.nodeName), opts)
if err != nil { if err != nil {
return fmt.Errorf("error getting node %q: %v", kl.nodeName, err) return fmt.Errorf("error getting node %q: %v", kl.nodeName, err)
} }
@ -494,7 +495,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
// node.Spec.PodCIDR being non-empty. We also need to know if pod CIDR is // node.Spec.PodCIDR being non-empty. We also need to know if pod CIDR is
// actually changed. // actually changed.
podCIDRs := strings.Join(node.Spec.PodCIDRs, ",") podCIDRs := strings.Join(node.Spec.PodCIDRs, ",")
if podCIDRChanged, err = kl.updatePodCIDR(podCIDRs); err != nil { if podCIDRChanged, err = kl.updatePodCIDR(ctx, podCIDRs); err != nil {
klog.ErrorS(err, "Error updating pod CIDR") klog.ErrorS(err, "Error updating pod CIDR")
} }
} }
@ -518,7 +519,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
areRequiredLabelsNotPresent = true areRequiredLabelsNotPresent = true
} }
kl.setNodeStatus(node) kl.setNodeStatus(ctx, node)
now := kl.clock.Now() now := kl.clock.Now()
if now.Before(kl.lastStatusReportTime.Add(kl.nodeStatusReportFrequency)) { if now.Before(kl.lastStatusReportTime.Add(kl.nodeStatusReportFrequency)) {
@ -570,7 +571,7 @@ func (kl *Kubelet) recordEvent(eventType, event, message string) {
} }
// record if node schedulable change. // record if node schedulable change.
func (kl *Kubelet) recordNodeSchedulableEvent(node *v1.Node) error { func (kl *Kubelet) recordNodeSchedulableEvent(ctx context.Context, node *v1.Node) error {
kl.lastNodeUnschedulableLock.Lock() kl.lastNodeUnschedulableLock.Lock()
defer kl.lastNodeUnschedulableLock.Unlock() defer kl.lastNodeUnschedulableLock.Unlock()
if kl.lastNodeUnschedulable != node.Spec.Unschedulable { if kl.lastNodeUnschedulable != node.Spec.Unschedulable {
@ -588,10 +589,10 @@ func (kl *Kubelet) recordNodeSchedulableEvent(node *v1.Node) error {
// any fields that are currently set. // any fields that are currently set.
// TODO(madhusudancs): Simplify the logic for setting node conditions and // TODO(madhusudancs): Simplify the logic for setting node conditions and
// refactor the node status condition code out to a different file. // refactor the node status condition code out to a different file.
func (kl *Kubelet) setNodeStatus(node *v1.Node) { func (kl *Kubelet) setNodeStatus(ctx context.Context, node *v1.Node) {
for i, f := range kl.setNodeStatusFuncs { for i, f := range kl.setNodeStatusFuncs {
klog.V(5).InfoS("Setting node status condition code", "position", i, "node", klog.KObj(node)) klog.V(5).InfoS("Setting node status condition code", "position", i, "node", klog.KObj(node))
if err := f(node); err != nil { if err := f(ctx, node); err != nil {
klog.ErrorS(err, "Failed to set some node status fields", "node", klog.KObj(node)) klog.ErrorS(err, "Failed to set some node status fields", "node", klog.KObj(node))
} }
} }
@ -610,7 +611,7 @@ func (kl *Kubelet) getLastObservedNodeAddresses() []v1.NodeAddress {
// defaultNodeStatusFuncs is a factory that generates the default set of // defaultNodeStatusFuncs is a factory that generates the default set of
// setNodeStatus funcs // setNodeStatus funcs
func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error { func (kl *Kubelet) defaultNodeStatusFuncs() []func(context.Context, *v1.Node) error {
// if cloud is not nil, we expect the cloud resource sync manager to exist // if cloud is not nil, we expect the cloud resource sync manager to exist
var nodeAddressesFunc func() ([]v1.NodeAddress, error) var nodeAddressesFunc func() ([]v1.NodeAddress, error)
if kl.cloud != nil { if kl.cloud != nil {
@ -620,7 +621,7 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error {
if kl.appArmorValidator != nil { if kl.appArmorValidator != nil {
validateHostFunc = kl.appArmorValidator.ValidateHost validateHostFunc = kl.appArmorValidator.ValidateHost
} }
var setters []func(n *v1.Node) error var setters []func(ctx context.Context, n *v1.Node) error
setters = append(setters, setters = append(setters,
nodestatus.NodeAddress(kl.nodeIPs, kl.nodeIPValidator, kl.hostname, kl.hostnameOverridden, kl.externalCloudProvider, kl.cloud, nodeAddressesFunc), nodestatus.NodeAddress(kl.nodeIPs, kl.nodeIPValidator, kl.hostname, kl.hostnameOverridden, kl.externalCloudProvider, kl.cloud, nodeAddressesFunc),
nodestatus.MachineInfo(string(kl.nodeName), kl.maxPods, kl.podsPerCore, kl.GetCachedMachineInfo, kl.containerManager.GetCapacity, nodestatus.MachineInfo(string(kl.nodeName), kl.maxPods, kl.podsPerCore, kl.GetCachedMachineInfo, kl.containerManager.GetCapacity,

View File

@ -177,6 +177,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) { t.Run(tc.desc, func(t *testing.T) {
ctx := context.Background()
// generate one more in inputImageList than we configure the Kubelet to report, // generate one more in inputImageList than we configure the Kubelet to report,
// or 5 images if unlimited // or 5 images if unlimited
numTestImages := int(tc.nodeStatusMaxImages) + 1 numTestImages := int(tc.nodeStatusMaxImages) + 1
@ -290,7 +291,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
} }
kubelet.updateRuntimeUp() kubelet.updateRuntimeUp()
assert.NoError(t, kubelet.updateNodeStatus()) assert.NoError(t, kubelet.updateNodeStatus(ctx))
actions := kubeClient.Actions() actions := kubeClient.Actions()
require.Len(t, actions, 2) require.Len(t, actions, 2)
require.True(t, actions[1].Matches("patch", "nodes")) require.True(t, actions[1].Matches("patch", "nodes"))
@ -315,6 +316,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
} }
func TestUpdateExistingNodeStatus(t *testing.T) { func TestUpdateExistingNodeStatus(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
@ -478,7 +480,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
} }
kubelet.updateRuntimeUp() kubelet.updateRuntimeUp()
assert.NoError(t, kubelet.updateNodeStatus()) assert.NoError(t, kubelet.updateNodeStatus(ctx))
actions := kubeClient.Actions() actions := kubeClient.Actions()
assert.Len(t, actions, 2) assert.Len(t, actions, 2)
@ -506,6 +508,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
} }
func TestUpdateExistingNodeStatusTimeout(t *testing.T) { func TestUpdateExistingNodeStatusTimeout(t *testing.T) {
ctx := context.Background()
if testing.Short() { if testing.Short() {
t.Skip("skipping test in short mode.") t.Skip("skipping test in short mode.")
} }
@ -559,7 +562,7 @@ func TestUpdateExistingNodeStatusTimeout(t *testing.T) {
} }
// should return an error, but not hang // should return an error, but not hang
assert.Error(t, kubelet.updateNodeStatus()) assert.Error(t, kubelet.updateNodeStatus(ctx))
// should have attempted multiple times // should have attempted multiple times
if actualAttempts := atomic.LoadInt64(&attempts); actualAttempts < nodeStatusUpdateRetry { if actualAttempts := atomic.LoadInt64(&attempts); actualAttempts < nodeStatusUpdateRetry {
@ -572,6 +575,7 @@ func TestUpdateExistingNodeStatusTimeout(t *testing.T) {
} }
func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
@ -681,13 +685,13 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
checkNodeStatus := func(status v1.ConditionStatus, reason string) { checkNodeStatus := func(status v1.ConditionStatus, reason string) {
kubeClient.ClearActions() kubeClient.ClearActions()
assert.NoError(t, kubelet.updateNodeStatus()) assert.NoError(t, kubelet.updateNodeStatus(ctx))
actions := kubeClient.Actions() actions := kubeClient.Actions()
require.Len(t, actions, 2) require.Len(t, actions, 2)
require.True(t, actions[1].Matches("patch", "nodes")) require.True(t, actions[1].Matches("patch", "nodes"))
require.Equal(t, actions[1].GetSubresource(), "status") require.Equal(t, actions[1].GetSubresource(), "status")
updatedNode, err := kubeClient.CoreV1().Nodes().Get(context.TODO(), testKubeletHostname, metav1.GetOptions{}) updatedNode, err := kubeClient.CoreV1().Nodes().Get(ctx, testKubeletHostname, metav1.GetOptions{})
require.NoError(t, err, "can't apply node status patch") require.NoError(t, err, "can't apply node status patch")
for i, cond := range updatedNode.Status.Conditions { for i, cond := range updatedNode.Status.Conditions {
@ -781,17 +785,19 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
} }
func TestUpdateNodeStatusError(t *testing.T) { func TestUpdateNodeStatusError(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
kubelet.kubeClient = nil // ensure only the heartbeat client is used kubelet.kubeClient = nil // ensure only the heartbeat client is used
// No matching node for the kubelet // No matching node for the kubelet
testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain
assert.Error(t, kubelet.updateNodeStatus()) assert.Error(t, kubelet.updateNodeStatus(ctx))
assert.Len(t, testKubelet.fakeKubeClient.Actions(), nodeStatusUpdateRetry) assert.Len(t, testKubelet.fakeKubeClient.Actions(), nodeStatusUpdateRetry)
} }
func TestUpdateNodeStatusWithLease(t *testing.T) { func TestUpdateNodeStatusWithLease(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
clock := testKubelet.fakeClock clock := testKubelet.fakeClock
@ -911,7 +917,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
// Update node status when node status is created. // Update node status when node status is created.
// Report node status. // Report node status.
kubelet.updateRuntimeUp() kubelet.updateRuntimeUp()
assert.NoError(t, kubelet.updateNodeStatus()) assert.NoError(t, kubelet.updateNodeStatus(ctx))
actions := kubeClient.Actions() actions := kubeClient.Actions()
assert.Len(t, actions, 2) assert.Len(t, actions, 2)
@ -934,7 +940,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
// Update node status again when nothing is changed (except heartbeat time). // Update node status again when nothing is changed (except heartbeat time).
// Report node status if it has exceeded the duration of nodeStatusReportFrequency. // Report node status if it has exceeded the duration of nodeStatusReportFrequency.
clock.Step(time.Minute) clock.Step(time.Minute)
assert.NoError(t, kubelet.updateNodeStatus()) assert.NoError(t, kubelet.updateNodeStatus(ctx))
// 2 more action (There were 2 actions before). // 2 more action (There were 2 actions before).
actions = kubeClient.Actions() actions = kubeClient.Actions()
@ -959,7 +965,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
// Update node status again when nothing is changed (except heartbeat time). // Update node status again when nothing is changed (except heartbeat time).
// Do not report node status if it is within the duration of nodeStatusReportFrequency. // Do not report node status if it is within the duration of nodeStatusReportFrequency.
clock.Step(10 * time.Second) clock.Step(10 * time.Second)
assert.NoError(t, kubelet.updateNodeStatus()) assert.NoError(t, kubelet.updateNodeStatus(ctx))
// Only 1 more action (There were 4 actions before). // Only 1 more action (There were 4 actions before).
actions = kubeClient.Actions() actions = kubeClient.Actions()
@ -977,7 +983,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
newMachineInfo := oldMachineInfo.Clone() newMachineInfo := oldMachineInfo.Clone()
newMachineInfo.MemoryCapacity = uint64(newMemoryCapacity) newMachineInfo.MemoryCapacity = uint64(newMemoryCapacity)
kubelet.setCachedMachineInfo(newMachineInfo) kubelet.setCachedMachineInfo(newMachineInfo)
assert.NoError(t, kubelet.updateNodeStatus()) assert.NoError(t, kubelet.updateNodeStatus(ctx))
// 2 more action (There were 5 actions before). // 2 more action (There were 5 actions before).
actions = kubeClient.Actions() actions = kubeClient.Actions()
@ -1009,7 +1015,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
updatedNode.Spec.PodCIDR = podCIDRs[0] updatedNode.Spec.PodCIDR = podCIDRs[0]
updatedNode.Spec.PodCIDRs = podCIDRs updatedNode.Spec.PodCIDRs = podCIDRs
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*updatedNode}}).ReactionChain kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*updatedNode}}).ReactionChain
assert.NoError(t, kubelet.updateNodeStatus()) assert.NoError(t, kubelet.updateNodeStatus(ctx))
assert.Equal(t, strings.Join(podCIDRs, ","), kubelet.runtimeState.podCIDR(), "Pod CIDR should be updated now") assert.Equal(t, strings.Join(podCIDRs, ","), kubelet.runtimeState.podCIDR(), "Pod CIDR should be updated now")
// 2 more action (There were 7 actions before). // 2 more action (There were 7 actions before).
actions = kubeClient.Actions() actions = kubeClient.Actions()
@ -1022,7 +1028,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
clock.Step(10 * time.Second) clock.Step(10 * time.Second)
assert.Equal(t, strings.Join(podCIDRs, ","), kubelet.runtimeState.podCIDR(), "Pod CIDR should already be updated") assert.Equal(t, strings.Join(podCIDRs, ","), kubelet.runtimeState.podCIDR(), "Pod CIDR should already be updated")
assert.NoError(t, kubelet.updateNodeStatus()) assert.NoError(t, kubelet.updateNodeStatus(ctx))
// Only 1 more action (There were 9 actions before). // Only 1 more action (There were 9 actions before).
actions = kubeClient.Actions() actions = kubeClient.Actions()
assert.Len(t, actions, 10) assert.Len(t, actions, 10)
@ -1078,6 +1084,7 @@ func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) { t.Run(tc.desc, func(t *testing.T) {
ctx := context.Background()
// Setup // Setup
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
@ -1094,7 +1101,7 @@ func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
kubelet.volumeManager = fakeVolumeManager kubelet.volumeManager = fakeVolumeManager
// Only test VolumesInUse setter // Only test VolumesInUse setter
kubelet.setNodeStatusFuncs = []func(*v1.Node) error{ kubelet.setNodeStatusFuncs = []func(context.Context, *v1.Node) error{
nodestatus.VolumesInUse(kubelet.volumeManager.ReconcilerStatesHasBeenSynced, nodestatus.VolumesInUse(kubelet.volumeManager.ReconcilerStatesHasBeenSynced,
kubelet.volumeManager.GetVolumesInUse), kubelet.volumeManager.GetVolumesInUse),
} }
@ -1103,7 +1110,7 @@ func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*tc.existingNode}}).ReactionChain kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*tc.existingNode}}).ReactionChain
// Execute // Execute
assert.NoError(t, kubelet.updateNodeStatus()) assert.NoError(t, kubelet.updateNodeStatus(ctx))
// Validate // Validate
actions := kubeClient.Actions() actions := kubeClient.Actions()
@ -1345,6 +1352,7 @@ func TestTryRegisterWithApiServer(t *testing.T) {
} }
func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) { func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
ctx := context.Background()
const nodeStatusMaxImages = 5 const nodeStatusMaxImages = 5
// generate one more in inputImageList than we configure the Kubelet to report // generate one more in inputImageList than we configure the Kubelet to report
@ -1403,7 +1411,7 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
} }
kubelet.updateRuntimeUp() kubelet.updateRuntimeUp()
assert.NoError(t, kubelet.updateNodeStatus()) assert.NoError(t, kubelet.updateNodeStatus(ctx))
actions := kubeClient.Actions() actions := kubeClient.Actions()
require.Len(t, actions, 2) require.Len(t, actions, 2)
require.True(t, actions[1].Matches("patch", "nodes")) require.True(t, actions[1].Matches("patch", "nodes"))
@ -2817,6 +2825,7 @@ func TestUpdateNodeAddresses(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.Name, func(t *testing.T) { t.Run(test.Name, func(t *testing.T) {
ctx := context.Background()
oldNode := &v1.Node{ oldNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Spec: v1.NodeSpec{}, Spec: v1.NodeSpec{},
@ -2832,15 +2841,15 @@ func TestUpdateNodeAddresses(t *testing.T) {
}, },
} }
_, err := kubeClient.CoreV1().Nodes().Update(context.TODO(), oldNode, metav1.UpdateOptions{}) _, err := kubeClient.CoreV1().Nodes().Update(ctx, oldNode, metav1.UpdateOptions{})
assert.NoError(t, err) assert.NoError(t, err)
kubelet.setNodeStatusFuncs = []func(*v1.Node) error{ kubelet.setNodeStatusFuncs = []func(context.Context, *v1.Node) error{
func(node *v1.Node) error { func(_ context.Context, node *v1.Node) error {
node.Status.Addresses = expectedNode.Status.Addresses node.Status.Addresses = expectedNode.Status.Addresses
return nil return nil
}, },
} }
assert.NoError(t, kubelet.updateNodeStatus()) assert.NoError(t, kubelet.updateNodeStatus(ctx))
actions := kubeClient.Actions() actions := kubeClient.Actions()
lastAction := actions[len(actions)-1] lastAction := actions[len(actions)-1]

View File

@ -466,7 +466,7 @@ func (kl *Kubelet) GetPodCgroupParent(pod *v1.Pod) string {
// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by // GenerateRunContainerOptions generates the RunContainerOptions, which can be used by
// the container runtime to set parameters for launching a container. // the container runtime to set parameters for launching a container.
func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) { func (kl *Kubelet) GenerateRunContainerOptions(ctx context.Context, pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) {
opts, err := kl.containerManager.GetResources(pod, container) opts, err := kl.containerManager.GetResources(pod, container)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
@ -519,7 +519,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai
// only do this check if the experimental behavior is enabled, otherwise allow it to default to false // only do this check if the experimental behavior is enabled, otherwise allow it to default to false
if kl.experimentalHostUserNamespaceDefaulting { if kl.experimentalHostUserNamespaceDefaulting {
opts.EnableHostUserNamespace = kl.enableHostUserNamespace(pod) opts.EnableHostUserNamespace = kl.enableHostUserNamespace(ctx, pod)
} }
return opts, cleanupAction, nil return opts, cleanupAction, nil
@ -854,9 +854,9 @@ func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, co
// killPod instructs the container runtime to kill the pod. This method requires that // killPod instructs the container runtime to kill the pod. This method requires that
// the pod status contains the result of the last syncPod, otherwise it may fail to // the pod status contains the result of the last syncPod, otherwise it may fail to
// terminate newly created containers and sandboxes. // terminate newly created containers and sandboxes.
func (kl *Kubelet) killPod(pod *v1.Pod, p kubecontainer.Pod, gracePeriodOverride *int64) error { func (kl *Kubelet) killPod(ctx context.Context, pod *v1.Pod, p kubecontainer.Pod, gracePeriodOverride *int64) error {
// Call the container runtime KillPod method which stops all known running containers of the pod // Call the container runtime KillPod method which stops all known running containers of the pod
if err := kl.containerRuntime.KillPod(pod, p, gracePeriodOverride); err != nil { if err := kl.containerRuntime.KillPod(ctx, pod, p, gracePeriodOverride); err != nil {
return err return err
} }
if err := kl.containerManager.UpdateQOSCgroups(); err != nil { if err := kl.containerManager.UpdateQOSCgroups(); err != nil {
@ -1054,7 +1054,7 @@ func (kl *Kubelet) deleteOrphanedMirrorPods() {
// is executing which means no new pods can appear. // is executing which means no new pods can appear.
// NOTE: This function is executed by the main sync loop, so it // NOTE: This function is executed by the main sync loop, so it
// should not contain any blocking calls. // should not contain any blocking calls.
func (kl *Kubelet) HandlePodCleanups() error { func (kl *Kubelet) HandlePodCleanups(ctx context.Context) error {
// The kubelet lacks checkpointing, so we need to introspect the set of pods // The kubelet lacks checkpointing, so we need to introspect the set of pods
// in the cgroup tree prior to inspecting the set of pods in our pod manager. // in the cgroup tree prior to inspecting the set of pods in our pod manager.
// this ensures our view of the cgroup tree does not mistakenly observe pods // this ensures our view of the cgroup tree does not mistakenly observe pods
@ -1118,7 +1118,7 @@ func (kl *Kubelet) HandlePodCleanups() error {
// Terminate any pods that are observed in the runtime but not // Terminate any pods that are observed in the runtime but not
// present in the list of known running pods from config. // present in the list of known running pods from config.
runningRuntimePods, err := kl.runtimeCache.GetPods() runningRuntimePods, err := kl.runtimeCache.GetPods(ctx)
if err != nil { if err != nil {
klog.ErrorS(err, "Error listing containers") klog.ErrorS(err, "Error listing containers")
return err return err
@ -1156,7 +1156,7 @@ func (kl *Kubelet) HandlePodCleanups() error {
// in the cache. We need to bypass the cache to get the latest set of // in the cache. We need to bypass the cache to get the latest set of
// running pods to clean up the volumes. // running pods to clean up the volumes.
// TODO: Evaluate the performance impact of bypassing the runtime cache. // TODO: Evaluate the performance impact of bypassing the runtime cache.
runningRuntimePods, err = kl.containerRuntime.GetPods(false) runningRuntimePods, err = kl.containerRuntime.GetPods(ctx, false)
if err != nil { if err != nil {
klog.ErrorS(err, "Error listing containers") klog.ErrorS(err, "Error listing containers")
return err return err
@ -1876,8 +1876,8 @@ func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
// findContainer finds and returns the container with the given pod ID, full name, and container name. // findContainer finds and returns the container with the given pod ID, full name, and container name.
// It returns nil if not found. // It returns nil if not found.
func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) { func (kl *Kubelet) findContainer(ctx context.Context, podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) {
pods, err := kl.containerRuntime.GetPods(false) pods, err := kl.containerRuntime.GetPods(ctx, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1889,8 +1889,8 @@ func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, container
} }
// RunInContainer runs a command in a container, returns the combined stdout, stderr as an array of bytes // RunInContainer runs a command in a container, returns the combined stdout, stderr as an array of bytes
func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) { func (kl *Kubelet) RunInContainer(ctx context.Context, podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) {
container, err := kl.findContainer(podFullName, podUID, containerName) container, err := kl.findContainer(ctx, podFullName, podUID, containerName)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1898,24 +1898,24 @@ func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containe
return nil, fmt.Errorf("container not found (%q)", containerName) return nil, fmt.Errorf("container not found (%q)", containerName)
} }
// TODO(tallclair): Pass a proper timeout value. // TODO(tallclair): Pass a proper timeout value.
return kl.runner.RunInContainer(container.ID, cmd, 0) return kl.runner.RunInContainer(ctx, container.ID, cmd, 0)
} }
// GetExec gets the URL the exec will be served from, or nil if the Kubelet will serve it. // GetExec gets the URL the exec will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) { func (kl *Kubelet) GetExec(ctx context.Context, podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) {
container, err := kl.findContainer(podFullName, podUID, containerName) container, err := kl.findContainer(ctx, podFullName, podUID, containerName)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if container == nil { if container == nil {
return nil, fmt.Errorf("container not found (%q)", containerName) return nil, fmt.Errorf("container not found (%q)", containerName)
} }
return kl.streamingRuntime.GetExec(container.ID, cmd, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, streamOpts.TTY) return kl.streamingRuntime.GetExec(ctx, container.ID, cmd, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, streamOpts.TTY)
} }
// GetAttach gets the URL the attach will be served from, or nil if the Kubelet will serve it. // GetAttach gets the URL the attach will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) { func (kl *Kubelet) GetAttach(ctx context.Context, podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) {
container, err := kl.findContainer(podFullName, podUID, containerName) container, err := kl.findContainer(ctx, podFullName, podUID, containerName)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1936,12 +1936,12 @@ func (kl *Kubelet) GetAttach(podFullName string, podUID types.UID, containerName
} }
tty := containerSpec.TTY tty := containerSpec.TTY
return kl.streamingRuntime.GetAttach(container.ID, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, tty) return kl.streamingRuntime.GetAttach(ctx, container.ID, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, tty)
} }
// GetPortForward gets the URL the port-forward will be served from, or nil if the Kubelet will serve it. // GetPortForward gets the URL the port-forward will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetPortForward(podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) { func (kl *Kubelet) GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) {
pods, err := kl.containerRuntime.GetPods(false) pods, err := kl.containerRuntime.GetPods(ctx, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1954,7 +1954,7 @@ func (kl *Kubelet) GetPortForward(podName, podNamespace string, podUID types.UID
return nil, fmt.Errorf("pod not found (%q)", podFullName) return nil, fmt.Errorf("pod not found (%q)", podFullName)
} }
return kl.streamingRuntime.GetPortForward(podName, podNamespace, podUID, portForwardOpts.Ports) return kl.streamingRuntime.GetPortForward(ctx, podName, podNamespace, podUID, portForwardOpts.Ports)
} }
// cleanupOrphanedPodCgroups removes cgroups that should no longer exist. // cleanupOrphanedPodCgroups removes cgroups that should no longer exist.
@ -1995,9 +1995,9 @@ func (kl *Kubelet) cleanupOrphanedPodCgroups(pcm cm.PodContainerManager, cgroupP
// NOTE: when if a container shares any namespace with another container it must also share the user namespace // NOTE: when if a container shares any namespace with another container it must also share the user namespace
// or it will not have the correct capabilities in the namespace. This means that host user namespace // or it will not have the correct capabilities in the namespace. This means that host user namespace
// is enabled per pod, not per container. // is enabled per pod, not per container.
func (kl *Kubelet) enableHostUserNamespace(pod *v1.Pod) bool { func (kl *Kubelet) enableHostUserNamespace(ctx context.Context, pod *v1.Pod) bool {
if kubecontainer.HasPrivilegedContainer(pod) || hasHostNamespace(pod) || if kubecontainer.HasPrivilegedContainer(pod) || hasHostNamespace(pod) ||
hasHostVolume(pod) || hasNonNamespacedCapability(pod) || kl.hasHostMountPVC(pod) { hasHostVolume(pod) || hasNonNamespacedCapability(pod) || kl.hasHostMountPVC(ctx, pod) {
return true return true
} }
return false return false
@ -2037,7 +2037,7 @@ func hasHostNamespace(pod *v1.Pod) bool {
} }
// hasHostMountPVC returns true if a PVC is referencing a HostPath volume. // hasHostMountPVC returns true if a PVC is referencing a HostPath volume.
func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool { func (kl *Kubelet) hasHostMountPVC(ctx context.Context, pod *v1.Pod) bool {
for _, volume := range pod.Spec.Volumes { for _, volume := range pod.Spec.Volumes {
pvcName := "" pvcName := ""
switch { switch {
@ -2048,13 +2048,13 @@ func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool {
default: default:
continue continue
} }
pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(context.TODO(), pvcName, metav1.GetOptions{}) pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(ctx, pvcName, metav1.GetOptions{})
if err != nil { if err != nil {
klog.InfoS("Unable to retrieve pvc", "pvc", klog.KRef(pod.Namespace, pvcName), "err", err) klog.InfoS("Unable to retrieve pvc", "pvc", klog.KRef(pod.Namespace, pvcName), "err", err)
continue continue
} }
if pvc != nil { if pvc != nil {
referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{})
if err != nil { if err != nil {
klog.InfoS("Unable to retrieve pv", "pvName", pvc.Spec.VolumeName, "err", err) klog.InfoS("Unable to retrieve pv", "pvName", pvc.Spec.VolumeName, "err", err)
continue continue

View File

@ -17,6 +17,7 @@ limitations under the License.
package kubelet package kubelet
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"net" "net"
@ -298,6 +299,7 @@ fd00::6 podFoo.domainFoo podFoo
} }
func TestRunInContainerNoSuchPod(t *testing.T) { func TestRunInContainerNoSuchPod(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
@ -308,6 +310,7 @@ func TestRunInContainerNoSuchPod(t *testing.T) {
podNamespace := "nsFoo" podNamespace := "nsFoo"
containerName := "containerFoo" containerName := "containerFoo"
output, err := kubelet.RunInContainer( output, err := kubelet.RunInContainer(
ctx,
kubecontainer.GetPodFullName(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: podNamespace}}), kubecontainer.GetPodFullName(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: podNamespace}}),
"", "",
containerName, containerName,
@ -317,6 +320,7 @@ func TestRunInContainerNoSuchPod(t *testing.T) {
} }
func TestRunInContainer(t *testing.T) { func TestRunInContainer(t *testing.T) {
ctx := context.Background()
for _, testError := range []error{nil, errors.New("bar")} { for _, testError := range []error{nil, errors.New("bar")} {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
@ -342,7 +346,7 @@ func TestRunInContainer(t *testing.T) {
}}, }},
} }
cmd := []string{"ls"} cmd := []string{"ls"}
actualOutput, err := kubelet.RunInContainer("podFoo_nsFoo", "", "containerFoo", cmd) actualOutput, err := kubelet.RunInContainer(ctx, "podFoo_nsFoo", "", "containerFoo", cmd)
assert.Equal(t, containerID, fakeCommandRunner.ContainerID, "(testError=%v) ID", testError) assert.Equal(t, containerID, fakeCommandRunner.ContainerID, "(testError=%v) ID", testError)
assert.Equal(t, cmd, fakeCommandRunner.Cmd, "(testError=%v) command", testError) assert.Equal(t, cmd, fakeCommandRunner.Cmd, "(testError=%v) command", testError)
// this isn't 100% foolproof as a bug in a real CommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test // this isn't 100% foolproof as a bug in a real CommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test
@ -2962,6 +2966,7 @@ func TestGetExec(t *testing.T) {
for _, tc := range testcases { for _, tc := range testcases {
t.Run(tc.description, func(t *testing.T) { t.Run(tc.description, func(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
@ -2983,7 +2988,7 @@ func TestGetExec(t *testing.T) {
kubelet.containerRuntime = fakeRuntime kubelet.containerRuntime = fakeRuntime
kubelet.streamingRuntime = fakeRuntime kubelet.streamingRuntime = fakeRuntime
redirect, err := kubelet.GetExec(tc.podFullName, podUID, tc.container, tc.command, remotecommand.Options{}) redirect, err := kubelet.GetExec(ctx, tc.podFullName, podUID, tc.container, tc.command, remotecommand.Options{})
if tc.expectError { if tc.expectError {
assert.Error(t, err, description) assert.Error(t, err, description)
} else { } else {
@ -3016,6 +3021,7 @@ func TestGetPortForward(t *testing.T) {
}} }}
for _, tc := range testcases { for _, tc := range testcases {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
@ -3037,7 +3043,7 @@ func TestGetPortForward(t *testing.T) {
kubelet.containerRuntime = fakeRuntime kubelet.containerRuntime = fakeRuntime
kubelet.streamingRuntime = fakeRuntime kubelet.streamingRuntime = fakeRuntime
redirect, err := kubelet.GetPortForward(tc.podName, podNamespace, podUID, portforward.V4Options{}) redirect, err := kubelet.GetPortForward(ctx, tc.podName, podNamespace, podUID, portforward.V4Options{})
if tc.expectError { if tc.expectError {
assert.Error(t, err, description) assert.Error(t, err, description)
} else { } else {
@ -3086,6 +3092,7 @@ func TestHasHostMountPVC(t *testing.T) {
} }
run := func(t *testing.T, v testcase) { run := func(t *testing.T, v testcase) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false) testKubelet := newTestKubelet(t, false)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
pod := &v1.Pod{ pod := &v1.Pod{
@ -3134,7 +3141,7 @@ func TestHasHostMountPVC(t *testing.T) {
return true, volumeToReturn, v.pvError return true, volumeToReturn, v.pvError
}) })
actual := testKubelet.kubelet.hasHostMountPVC(pod) actual := testKubelet.kubelet.hasHostMountPVC(ctx, pod)
if actual != v.expected { if actual != v.expected {
t.Errorf("expected %t but got %t", v.expected, actual) t.Errorf("expected %t but got %t", v.expected, actual)
} }

View File

@ -109,7 +109,7 @@ type fakeImageGCManager struct {
} }
func (f *fakeImageGCManager) GetImageList() ([]kubecontainer.Image, error) { func (f *fakeImageGCManager) GetImageList() ([]kubecontainer.Image, error) {
return f.fakeImageService.ListImages() return f.fakeImageService.ListImages(context.Background())
} }
type TestKubelet struct { type TestKubelet struct {
@ -408,6 +408,7 @@ func newTestPods(count int) []*v1.Pod {
} }
func TestSyncLoopAbort(t *testing.T) { func TestSyncLoopAbort(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
@ -420,11 +421,11 @@ func TestSyncLoopAbort(t *testing.T) {
close(ch) close(ch)
// sanity check (also prevent this test from hanging in the next step) // sanity check (also prevent this test from hanging in the next step)
ok := kubelet.syncLoopIteration(ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1)) ok := kubelet.syncLoopIteration(ctx, ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1))
require.False(t, ok, "Expected syncLoopIteration to return !ok since update chan was closed") require.False(t, ok, "Expected syncLoopIteration to return !ok since update chan was closed")
// this should terminate immediately; if it hangs then the syncLoopIteration isn't aborting properly // this should terminate immediately; if it hangs then the syncLoopIteration isn't aborting properly
kubelet.syncLoop(ch, kubelet) kubelet.syncLoop(ctx, ch, kubelet)
} }
func TestSyncPodsStartPod(t *testing.T) { func TestSyncPodsStartPod(t *testing.T) {
@ -445,6 +446,7 @@ func TestSyncPodsStartPod(t *testing.T) {
} }
func TestHandlePodCleanupsPerQOS(t *testing.T) { func TestHandlePodCleanupsPerQOS(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
@ -472,7 +474,7 @@ func TestHandlePodCleanupsPerQOS(t *testing.T) {
// within a goroutine so a two second delay should be enough time to // within a goroutine so a two second delay should be enough time to
// mark the pod as killed (within this test case). // mark the pod as killed (within this test case).
kubelet.HandlePodCleanups() kubelet.HandlePodCleanups(ctx)
// assert that unwanted pods were killed // assert that unwanted pods were killed
if actual, expected := kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion, []types.UID{"12345678"}; !reflect.DeepEqual(actual, expected) { if actual, expected := kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion, []types.UID{"12345678"}; !reflect.DeepEqual(actual, expected) {
@ -483,9 +485,9 @@ func TestHandlePodCleanupsPerQOS(t *testing.T) {
// simulate Runtime.KillPod // simulate Runtime.KillPod
fakeRuntime.PodList = nil fakeRuntime.PodList = nil
kubelet.HandlePodCleanups() kubelet.HandlePodCleanups(ctx)
kubelet.HandlePodCleanups() kubelet.HandlePodCleanups(ctx)
kubelet.HandlePodCleanups() kubelet.HandlePodCleanups(ctx)
destroyCount := 0 destroyCount := 0
err := wait.Poll(100*time.Millisecond, 10*time.Second, func() (bool, error) { err := wait.Poll(100*time.Millisecond, 10*time.Second, func() (bool, error) {
@ -642,6 +644,7 @@ func TestDispatchWorkOfActivePod(t *testing.T) {
} }
func TestHandlePodCleanups(t *testing.T) { func TestHandlePodCleanups(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
@ -660,7 +663,7 @@ func TestHandlePodCleanups(t *testing.T) {
} }
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
kubelet.HandlePodCleanups() kubelet.HandlePodCleanups(ctx)
// assert that unwanted pods were queued to kill // assert that unwanted pods were queued to kill
if actual, expected := kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion, []types.UID{"12345678"}; !reflect.DeepEqual(actual, expected) { if actual, expected := kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion, []types.UID{"12345678"}; !reflect.DeepEqual(actual, expected) {
@ -1131,6 +1134,7 @@ func TestHandlePluginResources(t *testing.T) {
// TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal. // TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal.
func TestPurgingObsoleteStatusMapEntries(t *testing.T) { func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
@ -1147,7 +1151,7 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
} }
// Sync with empty pods so that the entry in status map will be removed. // Sync with empty pods so that the entry in status map will be removed.
kl.podManager.SetPods([]*v1.Pod{}) kl.podManager.SetPods([]*v1.Pod{})
kl.HandlePodCleanups() kl.HandlePodCleanups(ctx)
if _, found := kl.statusManager.GetPodStatus(podToTest.UID); found { if _, found := kl.statusManager.GetPodStatus(podToTest.UID); found {
t.Fatalf("expected to not have status cached for pod2") t.Fatalf("expected to not have status cached for pod2")
} }
@ -1377,6 +1381,7 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
} }
func TestDeleteOrphanedMirrorPods(t *testing.T) { func TestDeleteOrphanedMirrorPods(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
@ -1426,7 +1431,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) {
} }
// Sync with an empty pod list to delete all mirror pods. // Sync with an empty pod list to delete all mirror pods.
kl.HandlePodCleanups() kl.HandlePodCleanups(ctx)
assert.Len(t, manager.GetPods(), 0, "Expected 0 mirror pods") assert.Len(t, manager.GetPods(), 0, "Expected 0 mirror pods")
for i, pod := range orphanPods { for i, pod := range orphanPods {
name := kubecontainer.GetPodFullName(pod) name := kubecontainer.GetPodFullName(pod)
@ -1445,6 +1450,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) {
} }
func TestGetContainerInfoForMirrorPods(t *testing.T) { func TestGetContainerInfoForMirrorPods(t *testing.T) {
ctx := context.Background()
// pods contain one static and one mirror pod with the same name but // pods contain one static and one mirror pod with the same name but
// different UIDs. // different UIDs.
pods := []*v1.Pod{ pods := []*v1.Pod{
@ -1503,7 +1509,7 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) {
kubelet.podManager.SetPods(pods) kubelet.podManager.SetPods(pods)
// Use the mirror pod UID to retrieve the stats. // Use the mirror pod UID to retrieve the stats.
stats, err := kubelet.GetContainerInfo("qux_ns", "5678", "foo", cadvisorReq) stats, err := kubelet.GetContainerInfo(ctx, "qux_ns", "5678", "foo", cadvisorReq)
assert.NoError(t, err) assert.NoError(t, err)
require.NotNil(t, stats) require.NotNil(t, stats)
} }
@ -1664,11 +1670,13 @@ func TestCheckpointContainer(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
ctx := context.Background()
options := &runtimeapi.CheckpointContainerRequest{} options := &runtimeapi.CheckpointContainerRequest{}
if test.checkpointLocation != "" { if test.checkpointLocation != "" {
options.Location = test.checkpointLocation options.Location = test.checkpointLocation
} }
status := kubelet.CheckpointContainer( status := kubelet.CheckpointContainer(
ctx,
fakePod.Pod.ID, fakePod.Pod.ID,
fmt.Sprintf( fmt.Sprintf(
"%s_%s", "%s_%s",
@ -1816,6 +1824,7 @@ func podWithUIDNameNsSpec(uid types.UID, name, namespace string, spec v1.PodSpec
} }
func TestDeletePodDirsForDeletedPods(t *testing.T) { func TestDeletePodDirsForDeletedPods(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
kl := testKubelet.kubelet kl := testKubelet.kubelet
@ -1833,18 +1842,19 @@ func TestDeletePodDirsForDeletedPods(t *testing.T) {
// Pod 1 has been deleted and no longer exists. // Pod 1 has been deleted and no longer exists.
kl.podManager.SetPods([]*v1.Pod{pods[0]}) kl.podManager.SetPods([]*v1.Pod{pods[0]})
kl.HandlePodCleanups() kl.HandlePodCleanups(ctx)
assert.True(t, dirExists(kl.getPodDir(pods[0].UID)), "Expected directory to exist for pod 0") assert.True(t, dirExists(kl.getPodDir(pods[0].UID)), "Expected directory to exist for pod 0")
assert.False(t, dirExists(kl.getPodDir(pods[1].UID)), "Expected directory to be deleted for pod 1") assert.False(t, dirExists(kl.getPodDir(pods[1].UID)), "Expected directory to be deleted for pod 1")
} }
func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*v1.Pod, podsToCheck []*v1.Pod, shouldExist bool) { func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*v1.Pod, podsToCheck []*v1.Pod, shouldExist bool) {
ctx := context.Background()
t.Helper() t.Helper()
kl := testKubelet.kubelet kl := testKubelet.kubelet
kl.podManager.SetPods(pods) kl.podManager.SetPods(pods)
kl.HandlePodSyncs(pods) kl.HandlePodSyncs(pods)
kl.HandlePodCleanups() kl.HandlePodCleanups(ctx)
for i, pod := range podsToCheck { for i, pod := range podsToCheck {
exist := dirExists(kl.getPodDir(pod.UID)) exist := dirExists(kl.getPodDir(pod.UID))
assert.Equal(t, shouldExist, exist, "directory of pod %d", i) assert.Equal(t, shouldExist, exist, "directory of pod %d", i)

View File

@ -17,6 +17,7 @@ limitations under the License.
package kuberuntime package kuberuntime
import ( import (
"context"
"net/http" "net/http"
"time" "time"
@ -83,6 +84,7 @@ func (f *fakePodStateProvider) ShouldPodContentBeRemoved(uid types.UID) bool {
} }
func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, keyring credentialprovider.DockerKeyring) (*kubeGenericRuntimeManager, error) { func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, keyring credentialprovider.DockerKeyring) (*kubeGenericRuntimeManager, error) {
ctx := context.Background()
recorder := &record.FakeRecorder{} recorder := &record.FakeRecorder{}
logManager, err := logs.NewContainerLogManager(runtimeService, osInterface, "1", 2) logManager, err := logs.NewContainerLogManager(runtimeService, osInterface, "1", 2)
if err != nil { if err != nil {
@ -107,7 +109,7 @@ func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS
memoryThrottlingFactor: 0.8, memoryThrottlingFactor: 0.8,
} }
typedVersion, err := runtimeService.Version(kubeRuntimeAPIVersion) typedVersion, err := runtimeService.Version(ctx, kubeRuntimeAPIVersion)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package kuberuntime package kuberuntime
import ( import (
"context"
"fmt" "fmt"
"path/filepath" "path/filepath"
"strconv" "strconv"
@ -119,8 +120,8 @@ func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *runtimeapi.PodSand
// getImageUser gets uid or user name that will run the command(s) from image. The function // getImageUser gets uid or user name that will run the command(s) from image. The function
// guarantees that only one of them is set. // guarantees that only one of them is set.
func (m *kubeGenericRuntimeManager) getImageUser(image string) (*int64, string, error) { func (m *kubeGenericRuntimeManager) getImageUser(ctx context.Context, image string) (*int64, string, error) {
resp, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image}, false) resp, err := m.imageService.ImageStatus(ctx, &runtimeapi.ImageSpec{Image: image}, false)
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package kuberuntime package kuberuntime
import ( import (
"context"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -31,7 +32,7 @@ import (
type podStatusProviderFunc func(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) type podStatusProviderFunc func(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error)
func (f podStatusProviderFunc) GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) { func (f podStatusProviderFunc) GetPodStatus(_ context.Context, uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
return f(uid, name, namespace) return f(uid, name, namespace)
} }
@ -217,10 +218,11 @@ func TestGetImageUser(t *testing.T) {
i.SetFakeImages([]string{"test-image-ref1", "test-image-ref2", "test-image-ref3"}) i.SetFakeImages([]string{"test-image-ref1", "test-image-ref2", "test-image-ref3"})
for j, test := range tests { for j, test := range tests {
ctx := context.Background()
i.Images[test.originalImage.name].Username = test.originalImage.username i.Images[test.originalImage.name].Username = test.originalImage.username
i.Images[test.originalImage.name].Uid = test.originalImage.uid i.Images[test.originalImage.name].Uid = test.originalImage.uid
uid, username, err := m.getImageUser(test.originalImage.name) uid, username, err := m.getImageUser(ctx, test.originalImage.name)
assert.NoError(t, err, "TestCase[%d]", j) assert.NoError(t, err, "TestCase[%d]", j)
if test.expectedImageUserValues.uid == (*int64)(nil) { if test.expectedImageUserValues.uid == (*int64)(nil) {

View File

@ -17,6 +17,7 @@ limitations under the License.
package kuberuntime package kuberuntime
import ( import (
"context"
"time" "time"
internalapi "k8s.io/cri-api/pkg/apis" internalapi "k8s.io/cri-api/pkg/apis"
@ -59,130 +60,130 @@ func recordError(operation string, err error) {
} }
} }
func (in instrumentedRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) { func (in instrumentedRuntimeService) Version(ctx context.Context, apiVersion string) (*runtimeapi.VersionResponse, error) {
const operation = "version" const operation = "version"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
out, err := in.service.Version(apiVersion) out, err := in.service.Version(ctx, apiVersion)
recordError(operation, err) recordError(operation, err)
return out, err return out, err
} }
func (in instrumentedRuntimeService) Status(verbose bool) (*runtimeapi.StatusResponse, error) { func (in instrumentedRuntimeService) Status(ctx context.Context, verbose bool) (*runtimeapi.StatusResponse, error) {
const operation = "status" const operation = "status"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
out, err := in.service.Status(verbose) out, err := in.service.Status(ctx, verbose)
recordError(operation, err) recordError(operation, err)
return out, err return out, err
} }
func (in instrumentedRuntimeService) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { func (in instrumentedRuntimeService) CreateContainer(ctx context.Context, podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
const operation = "create_container" const operation = "create_container"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
out, err := in.service.CreateContainer(podSandboxID, config, sandboxConfig) out, err := in.service.CreateContainer(ctx, podSandboxID, config, sandboxConfig)
recordError(operation, err) recordError(operation, err)
return out, err return out, err
} }
func (in instrumentedRuntimeService) StartContainer(containerID string) error { func (in instrumentedRuntimeService) StartContainer(ctx context.Context, containerID string) error {
const operation = "start_container" const operation = "start_container"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
err := in.service.StartContainer(containerID) err := in.service.StartContainer(ctx, containerID)
recordError(operation, err) recordError(operation, err)
return err return err
} }
func (in instrumentedRuntimeService) StopContainer(containerID string, timeout int64) error { func (in instrumentedRuntimeService) StopContainer(ctx context.Context, containerID string, timeout int64) error {
const operation = "stop_container" const operation = "stop_container"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
err := in.service.StopContainer(containerID, timeout) err := in.service.StopContainer(ctx, containerID, timeout)
recordError(operation, err) recordError(operation, err)
return err return err
} }
func (in instrumentedRuntimeService) RemoveContainer(containerID string) error { func (in instrumentedRuntimeService) RemoveContainer(ctx context.Context, containerID string) error {
const operation = "remove_container" const operation = "remove_container"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
err := in.service.RemoveContainer(containerID) err := in.service.RemoveContainer(ctx, containerID)
recordError(operation, err) recordError(operation, err)
return err return err
} }
func (in instrumentedRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) { func (in instrumentedRuntimeService) ListContainers(ctx context.Context, filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
const operation = "list_containers" const operation = "list_containers"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
out, err := in.service.ListContainers(filter) out, err := in.service.ListContainers(ctx, filter)
recordError(operation, err) recordError(operation, err)
return out, err return out, err
} }
func (in instrumentedRuntimeService) ContainerStatus(containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) { func (in instrumentedRuntimeService) ContainerStatus(ctx context.Context, containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) {
const operation = "container_status" const operation = "container_status"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
out, err := in.service.ContainerStatus(containerID, verbose) out, err := in.service.ContainerStatus(ctx, containerID, verbose)
recordError(operation, err) recordError(operation, err)
return out, err return out, err
} }
func (in instrumentedRuntimeService) UpdateContainerResources(containerID string, resources *runtimeapi.ContainerResources) error { func (in instrumentedRuntimeService) UpdateContainerResources(ctx context.Context, containerID string, resources *runtimeapi.ContainerResources) error {
const operation = "update_container" const operation = "update_container"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
err := in.service.UpdateContainerResources(containerID, resources) err := in.service.UpdateContainerResources(ctx, containerID, resources)
recordError(operation, err) recordError(operation, err)
return err return err
} }
func (in instrumentedRuntimeService) ReopenContainerLog(containerID string) error { func (in instrumentedRuntimeService) ReopenContainerLog(ctx context.Context, containerID string) error {
const operation = "reopen_container_log" const operation = "reopen_container_log"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
err := in.service.ReopenContainerLog(containerID) err := in.service.ReopenContainerLog(ctx, containerID)
recordError(operation, err) recordError(operation, err)
return err return err
} }
func (in instrumentedRuntimeService) ExecSync(containerID string, cmd []string, timeout time.Duration) ([]byte, []byte, error) { func (in instrumentedRuntimeService) ExecSync(ctx context.Context, containerID string, cmd []string, timeout time.Duration) ([]byte, []byte, error) {
const operation = "exec_sync" const operation = "exec_sync"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
stdout, stderr, err := in.service.ExecSync(containerID, cmd, timeout) stdout, stderr, err := in.service.ExecSync(ctx, containerID, cmd, timeout)
recordError(operation, err) recordError(operation, err)
return stdout, stderr, err return stdout, stderr, err
} }
func (in instrumentedRuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { func (in instrumentedRuntimeService) Exec(ctx context.Context, req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
const operation = "exec" const operation = "exec"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
resp, err := in.service.Exec(req) resp, err := in.service.Exec(ctx, req)
recordError(operation, err) recordError(operation, err)
return resp, err return resp, err
} }
func (in instrumentedRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { func (in instrumentedRuntimeService) Attach(ctx context.Context, req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
const operation = "attach" const operation = "attach"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
resp, err := in.service.Attach(req) resp, err := in.service.Attach(ctx, req)
recordError(operation, err) recordError(operation, err)
return resp, err return resp, err
} }
func (in instrumentedRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) { func (in instrumentedRuntimeService) RunPodSandbox(ctx context.Context, config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) {
const operation = "run_podsandbox" const operation = "run_podsandbox"
startTime := time.Now() startTime := time.Now()
defer recordOperation(operation, startTime) defer recordOperation(operation, startTime)
defer metrics.RunPodSandboxDuration.WithLabelValues(runtimeHandler).Observe(metrics.SinceInSeconds(startTime)) defer metrics.RunPodSandboxDuration.WithLabelValues(runtimeHandler).Observe(metrics.SinceInSeconds(startTime))
out, err := in.service.RunPodSandbox(config, runtimeHandler) out, err := in.service.RunPodSandbox(ctx, config, runtimeHandler)
recordError(operation, err) recordError(operation, err)
if err != nil { if err != nil {
metrics.RunPodSandboxErrors.WithLabelValues(runtimeHandler).Inc() metrics.RunPodSandboxErrors.WithLabelValues(runtimeHandler).Inc()
@ -190,146 +191,146 @@ func (in instrumentedRuntimeService) RunPodSandbox(config *runtimeapi.PodSandbox
return out, err return out, err
} }
func (in instrumentedRuntimeService) StopPodSandbox(podSandboxID string) error { func (in instrumentedRuntimeService) StopPodSandbox(ctx context.Context, podSandboxID string) error {
const operation = "stop_podsandbox" const operation = "stop_podsandbox"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
err := in.service.StopPodSandbox(podSandboxID) err := in.service.StopPodSandbox(ctx, podSandboxID)
recordError(operation, err) recordError(operation, err)
return err return err
} }
func (in instrumentedRuntimeService) RemovePodSandbox(podSandboxID string) error { func (in instrumentedRuntimeService) RemovePodSandbox(ctx context.Context, podSandboxID string) error {
const operation = "remove_podsandbox" const operation = "remove_podsandbox"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
err := in.service.RemovePodSandbox(podSandboxID) err := in.service.RemovePodSandbox(ctx, podSandboxID)
recordError(operation, err) recordError(operation, err)
return err return err
} }
func (in instrumentedRuntimeService) PodSandboxStatus(podSandboxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) { func (in instrumentedRuntimeService) PodSandboxStatus(ctx context.Context, podSandboxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) {
const operation = "podsandbox_status" const operation = "podsandbox_status"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
out, err := in.service.PodSandboxStatus(podSandboxID, verbose) out, err := in.service.PodSandboxStatus(ctx, podSandboxID, verbose)
recordError(operation, err) recordError(operation, err)
return out, err return out, err
} }
func (in instrumentedRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) { func (in instrumentedRuntimeService) ListPodSandbox(ctx context.Context, filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
const operation = "list_podsandbox" const operation = "list_podsandbox"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
out, err := in.service.ListPodSandbox(filter) out, err := in.service.ListPodSandbox(ctx, filter)
recordError(operation, err) recordError(operation, err)
return out, err return out, err
} }
func (in instrumentedRuntimeService) ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) { func (in instrumentedRuntimeService) ContainerStats(ctx context.Context, containerID string) (*runtimeapi.ContainerStats, error) {
const operation = "container_stats" const operation = "container_stats"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
out, err := in.service.ContainerStats(containerID) out, err := in.service.ContainerStats(ctx, containerID)
recordError(operation, err) recordError(operation, err)
return out, err return out, err
} }
func (in instrumentedRuntimeService) ListContainerStats(filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { func (in instrumentedRuntimeService) ListContainerStats(ctx context.Context, filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) {
const operation = "list_container_stats" const operation = "list_container_stats"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
out, err := in.service.ListContainerStats(filter) out, err := in.service.ListContainerStats(ctx, filter)
recordError(operation, err) recordError(operation, err)
return out, err return out, err
} }
func (in instrumentedRuntimeService) PodSandboxStats(podSandboxID string) (*runtimeapi.PodSandboxStats, error) { func (in instrumentedRuntimeService) PodSandboxStats(ctx context.Context, podSandboxID string) (*runtimeapi.PodSandboxStats, error) {
const operation = "podsandbox_stats" const operation = "podsandbox_stats"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
out, err := in.service.PodSandboxStats(podSandboxID) out, err := in.service.PodSandboxStats(ctx, podSandboxID)
recordError(operation, err) recordError(operation, err)
return out, err return out, err
} }
func (in instrumentedRuntimeService) ListPodSandboxStats(filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) { func (in instrumentedRuntimeService) ListPodSandboxStats(ctx context.Context, filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) {
const operation = "list_podsandbox_stats" const operation = "list_podsandbox_stats"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
out, err := in.service.ListPodSandboxStats(filter) out, err := in.service.ListPodSandboxStats(ctx, filter)
recordError(operation, err) recordError(operation, err)
return out, err return out, err
} }
func (in instrumentedRuntimeService) PortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) { func (in instrumentedRuntimeService) PortForward(ctx context.Context, req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
const operation = "port_forward" const operation = "port_forward"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
resp, err := in.service.PortForward(req) resp, err := in.service.PortForward(ctx, req)
recordError(operation, err) recordError(operation, err)
return resp, err return resp, err
} }
func (in instrumentedRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error { func (in instrumentedRuntimeService) UpdateRuntimeConfig(ctx context.Context, runtimeConfig *runtimeapi.RuntimeConfig) error {
const operation = "update_runtime_config" const operation = "update_runtime_config"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
err := in.service.UpdateRuntimeConfig(runtimeConfig) err := in.service.UpdateRuntimeConfig(ctx, runtimeConfig)
recordError(operation, err) recordError(operation, err)
return err return err
} }
func (in instrumentedImageManagerService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) { func (in instrumentedImageManagerService) ListImages(ctx context.Context, filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
const operation = "list_images" const operation = "list_images"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
out, err := in.service.ListImages(filter) out, err := in.service.ListImages(ctx, filter)
recordError(operation, err) recordError(operation, err)
return out, err return out, err
} }
func (in instrumentedImageManagerService) ImageStatus(image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) { func (in instrumentedImageManagerService) ImageStatus(ctx context.Context, image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) {
const operation = "image_status" const operation = "image_status"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
out, err := in.service.ImageStatus(image, verbose) out, err := in.service.ImageStatus(ctx, image, verbose)
recordError(operation, err) recordError(operation, err)
return out, err return out, err
} }
func (in instrumentedImageManagerService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { func (in instrumentedImageManagerService) PullImage(ctx context.Context, image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
const operation = "pull_image" const operation = "pull_image"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
imageRef, err := in.service.PullImage(image, auth, podSandboxConfig) imageRef, err := in.service.PullImage(ctx, image, auth, podSandboxConfig)
recordError(operation, err) recordError(operation, err)
return imageRef, err return imageRef, err
} }
func (in instrumentedImageManagerService) RemoveImage(image *runtimeapi.ImageSpec) error { func (in instrumentedImageManagerService) RemoveImage(ctx context.Context, image *runtimeapi.ImageSpec) error {
const operation = "remove_image" const operation = "remove_image"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
err := in.service.RemoveImage(image) err := in.service.RemoveImage(ctx, image)
recordError(operation, err) recordError(operation, err)
return err return err
} }
func (in instrumentedImageManagerService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) { func (in instrumentedImageManagerService) ImageFsInfo(ctx context.Context) ([]*runtimeapi.FilesystemUsage, error) {
const operation = "image_fs_info" const operation = "image_fs_info"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
fsInfo, err := in.service.ImageFsInfo() fsInfo, err := in.service.ImageFsInfo(ctx)
recordError(operation, err) recordError(operation, err)
return fsInfo, nil return fsInfo, nil
} }
func (in instrumentedRuntimeService) CheckpointContainer(options *runtimeapi.CheckpointContainerRequest) error { func (in instrumentedRuntimeService) CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error {
const operation = "checkpoint_container" const operation = "checkpoint_container"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
err := in.service.CheckpointContainer(options) err := in.service.CheckpointContainer(ctx, options)
recordError(operation, err) recordError(operation, err)
return err return err
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package kuberuntime package kuberuntime
import ( import (
"context"
"net" "net"
"net/http" "net/http"
"testing" "testing"
@ -70,14 +71,16 @@ func TestRecordOperation(t *testing.T) {
} }
func TestInstrumentedVersion(t *testing.T) { func TestInstrumentedVersion(t *testing.T) {
ctx := context.Background()
fakeRuntime, _, _, _ := createTestRuntimeManager() fakeRuntime, _, _, _ := createTestRuntimeManager()
irs := newInstrumentedRuntimeService(fakeRuntime) irs := newInstrumentedRuntimeService(fakeRuntime)
vr, err := irs.Version("1") vr, err := irs.Version(ctx, "1")
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, kubeRuntimeAPIVersion, vr.Version) assert.Equal(t, kubeRuntimeAPIVersion, vr.Version)
} }
func TestStatus(t *testing.T) { func TestStatus(t *testing.T) {
ctx := context.Background()
fakeRuntime, _, _, _ := createTestRuntimeManager() fakeRuntime, _, _, _ := createTestRuntimeManager()
fakeRuntime.FakeStatus = &runtimeapi.RuntimeStatus{ fakeRuntime.FakeStatus = &runtimeapi.RuntimeStatus{
Conditions: []*runtimeapi.RuntimeCondition{ Conditions: []*runtimeapi.RuntimeCondition{
@ -86,7 +89,7 @@ func TestStatus(t *testing.T) {
}, },
} }
irs := newInstrumentedRuntimeService(fakeRuntime) irs := newInstrumentedRuntimeService(fakeRuntime)
actural, err := irs.Status(false) actural, err := irs.Status(ctx, false)
assert.NoError(t, err) assert.NoError(t, err)
expected := &runtimeapi.RuntimeStatus{ expected := &runtimeapi.RuntimeStatus{
Conditions: []*runtimeapi.RuntimeCondition{ Conditions: []*runtimeapi.RuntimeCondition{

View File

@ -170,11 +170,11 @@ func calcRestartCountByLogDir(path string) (int, error) {
// * create the container // * create the container
// * start the container // * start the container
// * run the post start lifecycle hooks (if applicable) // * run the post start lifecycle hooks (if applicable)
func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, spec *startSpec, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string, podIPs []string) (string, error) { func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, spec *startSpec, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string, podIPs []string) (string, error) {
container := spec.container container := spec.container
// Step 1: pull the image. // Step 1: pull the image.
imageRef, msg, err := m.imagePuller.EnsureImageExists(pod, container, pullSecrets, podSandboxConfig) imageRef, msg, err := m.imagePuller.EnsureImageExists(ctx, pod, container, pullSecrets, podSandboxConfig)
if err != nil { if err != nil {
s, _ := grpcstatus.FromError(err) s, _ := grpcstatus.FromError(err)
m.recordContainerEvent(pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message()) m.recordContainerEvent(pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
@ -212,7 +212,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
return s.Message(), ErrCreateContainerConfig return s.Message(), ErrCreateContainerConfig
} }
containerConfig, cleanupAction, err := m.generateContainerConfig(container, pod, restartCount, podIP, imageRef, podIPs, target) containerConfig, cleanupAction, err := m.generateContainerConfig(ctx, container, pod, restartCount, podIP, imageRef, podIPs, target)
if cleanupAction != nil { if cleanupAction != nil {
defer cleanupAction() defer cleanupAction()
} }
@ -229,7 +229,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
return s.Message(), ErrPreCreateHook return s.Message(), ErrPreCreateHook
} }
containerID, err := m.runtimeService.CreateContainer(podSandboxID, containerConfig, podSandboxConfig) containerID, err := m.runtimeService.CreateContainer(ctx, podSandboxID, containerConfig, podSandboxConfig)
if err != nil { if err != nil {
s, _ := grpcstatus.FromError(err) s, _ := grpcstatus.FromError(err)
m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message()) m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
@ -244,7 +244,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.CreatedContainer, fmt.Sprintf("Created container %s", container.Name)) m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.CreatedContainer, fmt.Sprintf("Created container %s", container.Name))
// Step 3: start the container. // Step 3: start the container.
err = m.runtimeService.StartContainer(containerID) err = m.runtimeService.StartContainer(ctx, containerID)
if err != nil { if err != nil {
s, _ := grpcstatus.FromError(err) s, _ := grpcstatus.FromError(err)
m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Error: %v", s.Message()) m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Error: %v", s.Message())
@ -277,13 +277,13 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
Type: m.runtimeName, Type: m.runtimeName,
ID: containerID, ID: containerID,
} }
msg, handlerErr := m.runner.Run(kubeContainerID, pod, container, container.Lifecycle.PostStart) msg, handlerErr := m.runner.Run(ctx, kubeContainerID, pod, container, container.Lifecycle.PostStart)
if handlerErr != nil { if handlerErr != nil {
klog.ErrorS(handlerErr, "Failed to execute PostStartHook", "pod", klog.KObj(pod), klog.ErrorS(handlerErr, "Failed to execute PostStartHook", "pod", klog.KObj(pod),
"podUID", pod.UID, "containerName", container.Name, "containerID", kubeContainerID.String()) "podUID", pod.UID, "containerName", container.Name, "containerID", kubeContainerID.String())
// do not record the message in the event so that secrets won't leak from the server. // do not record the message in the event so that secrets won't leak from the server.
m.recordContainerEvent(pod, container, kubeContainerID.ID, v1.EventTypeWarning, events.FailedPostStartHook, "PostStartHook failed") m.recordContainerEvent(pod, container, kubeContainerID.ID, v1.EventTypeWarning, events.FailedPostStartHook, "PostStartHook failed")
if err := m.killContainer(pod, kubeContainerID, container.Name, "FailedPostStartHook", reasonFailedPostStartHook, nil); err != nil { if err := m.killContainer(ctx, pod, kubeContainerID, container.Name, "FailedPostStartHook", reasonFailedPostStartHook, nil); err != nil {
klog.ErrorS(err, "Failed to kill container", "pod", klog.KObj(pod), klog.ErrorS(err, "Failed to kill container", "pod", klog.KObj(pod),
"podUID", pod.UID, "containerName", container.Name, "containerID", kubeContainerID.String()) "podUID", pod.UID, "containerName", container.Name, "containerID", kubeContainerID.String())
} }
@ -295,13 +295,13 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
} }
// generateContainerConfig generates container config for kubelet runtime v1. // generateContainerConfig generates container config for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string, podIPs []string, nsTarget *kubecontainer.ContainerID) (*runtimeapi.ContainerConfig, func(), error) { func (m *kubeGenericRuntimeManager) generateContainerConfig(ctx context.Context, container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string, podIPs []string, nsTarget *kubecontainer.ContainerID) (*runtimeapi.ContainerConfig, func(), error) {
opts, cleanupAction, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, podIPs) opts, cleanupAction, err := m.runtimeHelper.GenerateRunContainerOptions(ctx, pod, container, podIP, podIPs)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
uid, username, err := m.getImageUser(container.Image) uid, username, err := m.getImageUser(ctx, container.Image)
if err != nil { if err != nil {
return nil, cleanupAction, err return nil, cleanupAction, err
} }
@ -432,7 +432,7 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO
// getKubeletContainers lists containers managed by kubelet. // getKubeletContainers lists containers managed by kubelet.
// The boolean parameter specifies whether returns all containers including // The boolean parameter specifies whether returns all containers including
// those already exited and dead containers (used for garbage collection). // those already exited and dead containers (used for garbage collection).
func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([]*runtimeapi.Container, error) { func (m *kubeGenericRuntimeManager) getKubeletContainers(ctx context.Context, allContainers bool) ([]*runtimeapi.Container, error) {
filter := &runtimeapi.ContainerFilter{} filter := &runtimeapi.ContainerFilter{}
if !allContainers { if !allContainers {
filter.State = &runtimeapi.ContainerStateValue{ filter.State = &runtimeapi.ContainerStateValue{
@ -440,7 +440,7 @@ func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([]
} }
} }
containers, err := m.runtimeService.ListContainers(filter) containers, err := m.runtimeService.ListContainers(ctx, filter)
if err != nil { if err != nil {
klog.ErrorS(err, "ListContainers failed") klog.ErrorS(err, "ListContainers failed")
return nil, err return nil, err
@ -491,9 +491,9 @@ func (m *kubeGenericRuntimeManager) readLastStringFromContainerLogs(path string)
} }
// getPodContainerStatuses gets all containers' statuses for the pod. // getPodContainerStatuses gets all containers' statuses for the pod.
func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, name, namespace string) ([]*kubecontainer.Status, error) { func (m *kubeGenericRuntimeManager) getPodContainerStatuses(ctx context.Context, uid kubetypes.UID, name, namespace string) ([]*kubecontainer.Status, error) {
// Select all containers of the given pod. // Select all containers of the given pod.
containers, err := m.runtimeService.ListContainers(&runtimeapi.ContainerFilter{ containers, err := m.runtimeService.ListContainers(ctx, &runtimeapi.ContainerFilter{
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(uid)}, LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(uid)},
}) })
if err != nil { if err != nil {
@ -504,7 +504,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
statuses := []*kubecontainer.Status{} statuses := []*kubecontainer.Status{}
// TODO: optimization: set maximum number of containers per container name to examine. // TODO: optimization: set maximum number of containers per container name to examine.
for _, c := range containers { for _, c := range containers {
resp, err := m.runtimeService.ContainerStatus(c.Id, false) resp, err := m.runtimeService.ContainerStatus(ctx, c.Id, false)
// Between List (ListContainers) and check (ContainerStatus) another thread might remove a container, and that is normal. // Between List (ListContainers) and check (ContainerStatus) another thread might remove a container, and that is normal.
// The previous call (ListContainers) never fails due to a pod container not existing. // The previous call (ListContainers) never fails due to a pod container not existing.
// Therefore, this method should not either, but instead act as if the previous call failed, // Therefore, this method should not either, but instead act as if the previous call failed,
@ -579,7 +579,7 @@ func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName strin
} }
// executePreStopHook runs the pre-stop lifecycle hooks if applicable and returns the duration it takes. // executePreStopHook runs the pre-stop lifecycle hooks if applicable and returns the duration it takes.
func (m *kubeGenericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID kubecontainer.ContainerID, containerSpec *v1.Container, gracePeriod int64) int64 { func (m *kubeGenericRuntimeManager) executePreStopHook(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, containerSpec *v1.Container, gracePeriod int64) int64 {
klog.V(3).InfoS("Running preStop hook", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", containerSpec.Name, "containerID", containerID.String()) klog.V(3).InfoS("Running preStop hook", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", containerSpec.Name, "containerID", containerID.String())
start := metav1.Now() start := metav1.Now()
@ -587,7 +587,7 @@ func (m *kubeGenericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID
go func() { go func() {
defer close(done) defer close(done)
defer utilruntime.HandleCrash() defer utilruntime.HandleCrash()
if _, err := m.runner.Run(containerID, pod, containerSpec, containerSpec.Lifecycle.PreStop); err != nil { if _, err := m.runner.Run(ctx, containerID, pod, containerSpec, containerSpec.Lifecycle.PreStop); err != nil {
klog.ErrorS(err, "PreStop hook failed", "pod", klog.KObj(pod), "podUID", pod.UID, klog.ErrorS(err, "PreStop hook failed", "pod", klog.KObj(pod), "podUID", pod.UID,
"containerName", containerSpec.Name, "containerID", containerID.String()) "containerName", containerSpec.Name, "containerID", containerID.String())
// do not record the message in the event so that secrets won't leak from the server. // do not record the message in the event so that secrets won't leak from the server.
@ -615,10 +615,10 @@ func (m *kubeGenericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID
// TODO(random-liu): Add a node e2e test to test this behaviour. // TODO(random-liu): Add a node e2e test to test this behaviour.
// TODO(random-liu): Change the lifecycle handler to just accept information needed, so that we can // TODO(random-liu): Change the lifecycle handler to just accept information needed, so that we can
// just pass the needed function not create the fake object. // just pass the needed function not create the fake object.
func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID kubecontainer.ContainerID) (*v1.Pod, *v1.Container, error) { func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(ctx context.Context, containerID kubecontainer.ContainerID) (*v1.Pod, *v1.Container, error) {
var pod *v1.Pod var pod *v1.Pod
var container *v1.Container var container *v1.Container
resp, err := m.runtimeService.ContainerStatus(containerID.ID, false) resp, err := m.runtimeService.ContainerStatus(ctx, containerID.ID, false)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -658,7 +658,7 @@ func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID
// killContainer kills a container through the following steps: // killContainer kills a container through the following steps:
// * Run the pre-stop lifecycle hooks (if applicable). // * Run the pre-stop lifecycle hooks (if applicable).
// * Stop the container. // * Stop the container.
func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubecontainer.ContainerID, containerName string, message string, reason containerKillReason, gracePeriodOverride *int64) error { func (m *kubeGenericRuntimeManager) killContainer(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, containerName string, message string, reason containerKillReason, gracePeriodOverride *int64) error {
var containerSpec *v1.Container var containerSpec *v1.Container
if pod != nil { if pod != nil {
if containerSpec = kubecontainer.GetContainerSpec(pod, containerName); containerSpec == nil { if containerSpec = kubecontainer.GetContainerSpec(pod, containerName); containerSpec == nil {
@ -667,7 +667,7 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec
} }
} else { } else {
// Restore necessary information if one of the specs is nil. // Restore necessary information if one of the specs is nil.
restoredPod, restoredContainer, err := m.restoreSpecsFromContainerLabels(containerID) restoredPod, restoredContainer, err := m.restoreSpecsFromContainerLabels(ctx, containerID)
if err != nil { if err != nil {
return err return err
} }
@ -689,7 +689,7 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec
// Run the pre-stop lifecycle hooks if applicable and if there is enough time to run it // Run the pre-stop lifecycle hooks if applicable and if there is enough time to run it
if containerSpec.Lifecycle != nil && containerSpec.Lifecycle.PreStop != nil && gracePeriod > 0 { if containerSpec.Lifecycle != nil && containerSpec.Lifecycle.PreStop != nil && gracePeriod > 0 {
gracePeriod = gracePeriod - m.executePreStopHook(pod, containerID, containerSpec, gracePeriod) gracePeriod = gracePeriod - m.executePreStopHook(ctx, pod, containerID, containerSpec, gracePeriod)
} }
// always give containers a minimal shutdown window to avoid unnecessary SIGKILLs // always give containers a minimal shutdown window to avoid unnecessary SIGKILLs
if gracePeriod < minimumGracePeriodInSeconds { if gracePeriod < minimumGracePeriodInSeconds {
@ -704,7 +704,7 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec
klog.V(2).InfoS("Killing container with a grace period", "pod", klog.KObj(pod), "podUID", pod.UID, klog.V(2).InfoS("Killing container with a grace period", "pod", klog.KObj(pod), "podUID", pod.UID,
"containerName", containerName, "containerID", containerID.String(), "gracePeriod", gracePeriod) "containerName", containerName, "containerID", containerID.String(), "gracePeriod", gracePeriod)
err := m.runtimeService.StopContainer(containerID.ID, gracePeriod) err := m.runtimeService.StopContainer(ctx, containerID.ID, gracePeriod)
if err != nil && !crierror.IsNotFound(err) { if err != nil && !crierror.IsNotFound(err) {
klog.ErrorS(err, "Container termination failed with gracePeriod", "pod", klog.KObj(pod), "podUID", pod.UID, klog.ErrorS(err, "Container termination failed with gracePeriod", "pod", klog.KObj(pod), "podUID", pod.UID,
"containerName", containerName, "containerID", containerID.String(), "gracePeriod", gracePeriod) "containerName", containerName, "containerID", containerID.String(), "gracePeriod", gracePeriod)
@ -717,7 +717,7 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec
} }
// killContainersWithSyncResult kills all pod's containers with sync results. // killContainersWithSyncResult kills all pod's containers with sync results.
func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (syncResults []*kubecontainer.SyncResult) { func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(ctx context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (syncResults []*kubecontainer.SyncResult) {
containerResults := make(chan *kubecontainer.SyncResult, len(runningPod.Containers)) containerResults := make(chan *kubecontainer.SyncResult, len(runningPod.Containers))
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
@ -728,7 +728,7 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *v1.Pod, ru
defer wg.Done() defer wg.Done()
killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, container.Name) killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, container.Name)
if err := m.killContainer(pod, container.ID, container.Name, "", reasonUnknown, gracePeriodOverride); err != nil { if err := m.killContainer(ctx, pod, container.ID, container.Name, "", reasonUnknown, gracePeriodOverride); err != nil {
killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error()) killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
// Use runningPod for logging as the pod passed in could be *nil*. // Use runningPod for logging as the pod passed in could be *nil*.
klog.ErrorS(err, "Kill container failed", "pod", klog.KRef(runningPod.Namespace, runningPod.Name), "podUID", runningPod.ID, klog.ErrorS(err, "Kill container failed", "pod", klog.KRef(runningPod.Namespace, runningPod.Name), "podUID", runningPod.ID,
@ -750,7 +750,7 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *v1.Pod, ru
// containers, we have reduced the number of outstanding init containers still // containers, we have reduced the number of outstanding init containers still
// present. This reduces load on the container garbage collector by only // present. This reduces load on the container garbage collector by only
// preserving the most recent terminated init container. // preserving the most recent terminated init container.
func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *v1.Pod, podStatus *kubecontainer.PodStatus) { func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) {
// only the last execution of each init container should be preserved, and only preserve it if it is in the // only the last execution of each init container should be preserved, and only preserve it if it is in the
// list of init containers to keep. // list of init containers to keep.
initContainerNames := sets.NewString() initContainerNames := sets.NewString()
@ -775,7 +775,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *v1.Pod,
} }
// prune all other init containers that match this container name // prune all other init containers that match this container name
klog.V(4).InfoS("Removing init container", "containerName", status.Name, "containerID", status.ID.ID, "count", count) klog.V(4).InfoS("Removing init container", "containerName", status.Name, "containerID", status.ID.ID, "count", count)
if err := m.removeContainer(status.ID.ID); err != nil { if err := m.removeContainer(ctx, status.ID.ID); err != nil {
utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod))) utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod)))
continue continue
} }
@ -786,7 +786,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *v1.Pod,
// Remove all init containers. Note that this function does not check the state // Remove all init containers. Note that this function does not check the state
// of the container because it assumes all init containers have been stopped // of the container because it assumes all init containers have been stopped
// before the call happens. // before the call happens.
func (m *kubeGenericRuntimeManager) purgeInitContainers(pod *v1.Pod, podStatus *kubecontainer.PodStatus) { func (m *kubeGenericRuntimeManager) purgeInitContainers(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) {
initContainerNames := sets.NewString() initContainerNames := sets.NewString()
for _, container := range pod.Spec.InitContainers { for _, container := range pod.Spec.InitContainers {
initContainerNames.Insert(container.Name) initContainerNames.Insert(container.Name)
@ -800,7 +800,7 @@ func (m *kubeGenericRuntimeManager) purgeInitContainers(pod *v1.Pod, podStatus *
count++ count++
// Purge all init containers that match this container name // Purge all init containers that match this container name
klog.V(4).InfoS("Removing init container", "containerName", status.Name, "containerID", status.ID.ID, "count", count) klog.V(4).InfoS("Removing init container", "containerName", status.Name, "containerID", status.ID.ID, "count", count)
if err := m.removeContainer(status.ID.ID); err != nil { if err := m.removeContainer(ctx, status.ID.ID); err != nil {
utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod))) utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod)))
continue continue
} }
@ -867,7 +867,7 @@ func findNextInitContainerToRun(pod *v1.Pod, podStatus *kubecontainer.PodStatus)
// GetContainerLogs returns logs of a specific container. // GetContainerLogs returns logs of a specific container.
func (m *kubeGenericRuntimeManager) GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) { func (m *kubeGenericRuntimeManager) GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
resp, err := m.runtimeService.ContainerStatus(containerID.ID, false) resp, err := m.runtimeService.ContainerStatus(ctx, containerID.ID, false)
if err != nil { if err != nil {
klog.V(4).InfoS("Failed to get container status", "containerID", containerID.String(), "err", err) klog.V(4).InfoS("Failed to get container status", "containerID", containerID.String(), "err", err)
return fmt.Errorf("unable to retrieve container logs for %v", containerID.String()) return fmt.Errorf("unable to retrieve container logs for %v", containerID.String())
@ -880,7 +880,7 @@ func (m *kubeGenericRuntimeManager) GetContainerLogs(ctx context.Context, pod *v
} }
// GetExec gets the endpoint the runtime will serve the exec request from. // GetExec gets the endpoint the runtime will serve the exec request from.
func (m *kubeGenericRuntimeManager) GetExec(id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) { func (m *kubeGenericRuntimeManager) GetExec(ctx context.Context, id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
req := &runtimeapi.ExecRequest{ req := &runtimeapi.ExecRequest{
ContainerId: id.ID, ContainerId: id.ID,
Cmd: cmd, Cmd: cmd,
@ -889,7 +889,7 @@ func (m *kubeGenericRuntimeManager) GetExec(id kubecontainer.ContainerID, cmd []
Stdout: stdout, Stdout: stdout,
Stderr: stderr, Stderr: stderr,
} }
resp, err := m.runtimeService.Exec(req) resp, err := m.runtimeService.Exec(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -898,7 +898,7 @@ func (m *kubeGenericRuntimeManager) GetExec(id kubecontainer.ContainerID, cmd []
} }
// GetAttach gets the endpoint the runtime will serve the attach request from. // GetAttach gets the endpoint the runtime will serve the attach request from.
func (m *kubeGenericRuntimeManager) GetAttach(id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) { func (m *kubeGenericRuntimeManager) GetAttach(ctx context.Context, id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) {
req := &runtimeapi.AttachRequest{ req := &runtimeapi.AttachRequest{
ContainerId: id.ID, ContainerId: id.ID,
Stdin: stdin, Stdin: stdin,
@ -906,7 +906,7 @@ func (m *kubeGenericRuntimeManager) GetAttach(id kubecontainer.ContainerID, stdi
Stderr: stderr, Stderr: stderr,
Tty: tty, Tty: tty,
} }
resp, err := m.runtimeService.Attach(req) resp, err := m.runtimeService.Attach(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -914,8 +914,8 @@ func (m *kubeGenericRuntimeManager) GetAttach(id kubecontainer.ContainerID, stdi
} }
// RunInContainer synchronously executes the command in the container, and returns the output. // RunInContainer synchronously executes the command in the container, and returns the output.
func (m *kubeGenericRuntimeManager) RunInContainer(id kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { func (m *kubeGenericRuntimeManager) RunInContainer(ctx context.Context, id kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
stdout, stderr, err := m.runtimeService.ExecSync(id.ID, cmd, timeout) stdout, stderr, err := m.runtimeService.ExecSync(ctx, id.ID, cmd, timeout)
// NOTE(tallclair): This does not correctly interleave stdout & stderr, but should be sufficient // NOTE(tallclair): This does not correctly interleave stdout & stderr, but should be sufficient
// for logging purposes. A combined output option will need to be added to the ExecSyncRequest // for logging purposes. A combined output option will need to be added to the ExecSyncRequest
// if more precise output ordering is ever required. // if more precise output ordering is ever required.
@ -928,7 +928,7 @@ func (m *kubeGenericRuntimeManager) RunInContainer(id kubecontainer.ContainerID,
// that container logs to be removed with the container. // that container logs to be removed with the container.
// Notice that we assume that the container should only be removed in non-running state, and // Notice that we assume that the container should only be removed in non-running state, and
// it will not write container logs anymore in that state. // it will not write container logs anymore in that state.
func (m *kubeGenericRuntimeManager) removeContainer(containerID string) error { func (m *kubeGenericRuntimeManager) removeContainer(ctx context.Context, containerID string) error {
klog.V(4).InfoS("Removing container", "containerID", containerID) klog.V(4).InfoS("Removing container", "containerID", containerID)
// Call internal container post-stop lifecycle hook. // Call internal container post-stop lifecycle hook.
if err := m.internalLifecycle.PostStopContainer(containerID); err != nil { if err := m.internalLifecycle.PostStopContainer(containerID); err != nil {
@ -937,22 +937,22 @@ func (m *kubeGenericRuntimeManager) removeContainer(containerID string) error {
// Remove the container log. // Remove the container log.
// TODO: Separate log and container lifecycle management. // TODO: Separate log and container lifecycle management.
if err := m.removeContainerLog(containerID); err != nil { if err := m.removeContainerLog(ctx, containerID); err != nil {
return err return err
} }
// Remove the container. // Remove the container.
return m.runtimeService.RemoveContainer(containerID) return m.runtimeService.RemoveContainer(ctx, containerID)
} }
// removeContainerLog removes the container log. // removeContainerLog removes the container log.
func (m *kubeGenericRuntimeManager) removeContainerLog(containerID string) error { func (m *kubeGenericRuntimeManager) removeContainerLog(ctx context.Context, containerID string) error {
// Use log manager to remove rotated logs. // Use log manager to remove rotated logs.
err := m.logManager.Clean(containerID) err := m.logManager.Clean(ctx, containerID)
if err != nil { if err != nil {
return err return err
} }
resp, err := m.runtimeService.ContainerStatus(containerID, false) resp, err := m.runtimeService.ContainerStatus(ctx, containerID, false)
if err != nil { if err != nil {
return fmt.Errorf("failed to get container status %q: %v", containerID, err) return fmt.Errorf("failed to get container status %q: %v", containerID, err)
} }
@ -973,8 +973,8 @@ func (m *kubeGenericRuntimeManager) removeContainerLog(containerID string) error
} }
// DeleteContainer removes a container. // DeleteContainer removes a container.
func (m *kubeGenericRuntimeManager) DeleteContainer(containerID kubecontainer.ContainerID) error { func (m *kubeGenericRuntimeManager) DeleteContainer(ctx context.Context, containerID kubecontainer.ContainerID) error {
return m.removeContainer(containerID.ID) return m.removeContainer(ctx, containerID.ID)
} }
// setTerminationGracePeriod determines the grace period to use when killing a container // setTerminationGracePeriod determines the grace period to use when killing a container

View File

@ -20,6 +20,7 @@ limitations under the License.
package kuberuntime package kuberuntime
import ( import (
"context"
"reflect" "reflect"
"strconv" "strconv"
"testing" "testing"
@ -39,10 +40,11 @@ import (
) )
func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerIndex int, enforceMemoryQoS bool) *runtimeapi.ContainerConfig { func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerIndex int, enforceMemoryQoS bool) *runtimeapi.ContainerConfig {
ctx := context.Background()
container := &pod.Spec.Containers[containerIndex] container := &pod.Spec.Containers[containerIndex]
podIP := "" podIP := ""
restartCount := 0 restartCount := 0
opts, _, _ := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, []string{podIP}) opts, _, _ := m.runtimeHelper.GenerateRunContainerOptions(ctx, pod, container, podIP, []string{podIP})
containerLogsPath := buildContainerLogsPath(container.Name, restartCount) containerLogsPath := buildContainerLogsPath(container.Name, restartCount)
restartCountUint32 := uint32(restartCount) restartCountUint32 := uint32(restartCount)
envs := make([]*runtimeapi.KeyValue, len(opts.Envs)) envs := make([]*runtimeapi.KeyValue, len(opts.Envs))
@ -73,6 +75,7 @@ func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerInde
} }
func TestGenerateContainerConfig(t *testing.T) { func TestGenerateContainerConfig(t *testing.T) {
ctx := context.Background()
_, imageService, m, err := createTestRuntimeManager() _, imageService, m, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
@ -102,7 +105,7 @@ func TestGenerateContainerConfig(t *testing.T) {
} }
expectedConfig := makeExpectedConfig(m, pod, 0, false) expectedConfig := makeExpectedConfig(m, pod, 0, false)
containerConfig, _, err := m.generateContainerConfig(&pod.Spec.Containers[0], pod, 0, "", pod.Spec.Containers[0].Image, []string{}, nil) containerConfig, _, err := m.generateContainerConfig(ctx, &pod.Spec.Containers[0], pod, 0, "", pod.Spec.Containers[0].Image, []string{}, nil)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, expectedConfig, containerConfig, "generate container config for kubelet runtime v1.") assert.Equal(t, expectedConfig, containerConfig, "generate container config for kubelet runtime v1.")
assert.Equal(t, runAsUser, containerConfig.GetLinux().GetSecurityContext().GetRunAsUser().GetValue(), "RunAsUser should be set") assert.Equal(t, runAsUser, containerConfig.GetLinux().GetSecurityContext().GetRunAsUser().GetValue(), "RunAsUser should be set")
@ -133,11 +136,11 @@ func TestGenerateContainerConfig(t *testing.T) {
}, },
} }
_, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil) _, _, err = m.generateContainerConfig(ctx, &podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil)
assert.Error(t, err) assert.Error(t, err)
imageID, _ := imageService.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil, nil) imageID, _ := imageService.PullImage(ctx, &runtimeapi.ImageSpec{Image: "busybox"}, nil, nil)
resp, _ := imageService.ImageStatus(&runtimeapi.ImageSpec{Image: imageID}, false) resp, _ := imageService.ImageStatus(ctx, &runtimeapi.ImageSpec{Image: imageID}, false)
resp.Image.Uid = nil resp.Image.Uid = nil
resp.Image.Username = "test" resp.Image.Username = "test"
@ -145,7 +148,7 @@ func TestGenerateContainerConfig(t *testing.T) {
podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsUser = nil podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsUser = nil
podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsNonRoot = &runAsNonRootTrue podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsNonRoot = &runAsNonRootTrue
_, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil) _, _, err = m.generateContainerConfig(ctx, &podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil)
assert.Error(t, err, "RunAsNonRoot should fail for non-numeric username") assert.Error(t, err, "RunAsNonRoot should fail for non-numeric username")
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package kuberuntime package kuberuntime
import ( import (
"context"
"os" "os"
"path/filepath" "path/filepath"
"regexp" "regexp"
@ -43,6 +44,7 @@ import (
// TestRemoveContainer tests removing the container and its corresponding container logs. // TestRemoveContainer tests removing the container and its corresponding container logs.
func TestRemoveContainer(t *testing.T) { func TestRemoveContainer(t *testing.T) {
ctx := context.Background()
fakeRuntime, _, m, err := createTestRuntimeManager() fakeRuntime, _, m, err := createTestRuntimeManager()
require.NoError(t, err) require.NoError(t, err)
pod := &v1.Pod{ pod := &v1.Pod{
@ -80,7 +82,7 @@ func TestRemoveContainer(t *testing.T) {
fakeOS.Create(expectedContainerLogPath) fakeOS.Create(expectedContainerLogPath)
fakeOS.Create(expectedContainerLogPathRotated) fakeOS.Create(expectedContainerLogPathRotated)
err = m.removeContainer(containerID) err = m.removeContainer(ctx, containerID)
assert.NoError(t, err) assert.NoError(t, err)
// Verify container log is removed. // Verify container log is removed.
@ -90,7 +92,7 @@ func TestRemoveContainer(t *testing.T) {
fakeOS.Removes) fakeOS.Removes)
// Verify container is removed // Verify container is removed
assert.Contains(t, fakeRuntime.Called, "RemoveContainer") assert.Contains(t, fakeRuntime.Called, "RemoveContainer")
containers, err := fakeRuntime.ListContainers(&runtimeapi.ContainerFilter{Id: containerID}) containers, err := fakeRuntime.ListContainers(ctx, &runtimeapi.ContainerFilter{Id: containerID})
assert.NoError(t, err) assert.NoError(t, err)
assert.Empty(t, containers) assert.Empty(t, containers)
} }
@ -123,7 +125,8 @@ func TestKillContainer(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
err := m.killContainer(test.pod, test.containerID, test.containerName, test.reason, "", &test.gracePeriodOverride) ctx := context.Background()
err := m.killContainer(ctx, test.pod, test.containerID, test.containerName, test.reason, "", &test.gracePeriodOverride)
if test.succeed != (err == nil) { if test.succeed != (err == nil) {
t.Errorf("%s: expected %v, got %v (%v)", test.caseName, test.succeed, (err == nil), err) t.Errorf("%s: expected %v, got %v (%v)", test.caseName, test.succeed, (err == nil), err)
} }
@ -303,8 +306,9 @@ func TestLifeCycleHook(t *testing.T) {
// Configured and works as expected // Configured and works as expected
t.Run("PreStop-CMDExec", func(t *testing.T) { t.Run("PreStop-CMDExec", func(t *testing.T) {
ctx := context.Background()
testPod.Spec.Containers[0].Lifecycle = cmdLifeCycle testPod.Spec.Containers[0].Lifecycle = cmdLifeCycle
m.killContainer(testPod, cID, "foo", "testKill", "", &gracePeriod) m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
if fakeRunner.Cmd[0] != cmdLifeCycle.PreStop.Exec.Command[0] { if fakeRunner.Cmd[0] != cmdLifeCycle.PreStop.Exec.Command[0] {
t.Errorf("CMD Prestop hook was not invoked") t.Errorf("CMD Prestop hook was not invoked")
} }
@ -313,21 +317,23 @@ func TestLifeCycleHook(t *testing.T) {
// Configured and working HTTP hook // Configured and working HTTP hook
t.Run("PreStop-HTTPGet", func(t *testing.T) { t.Run("PreStop-HTTPGet", func(t *testing.T) {
t.Run("inconsistent", func(t *testing.T) { t.Run("inconsistent", func(t *testing.T) {
ctx := context.Background()
defer func() { fakeHTTP.req = nil }() defer func() { fakeHTTP.req = nil }()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, false)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, false)()
httpLifeCycle.PreStop.HTTPGet.Port = intstr.IntOrString{} httpLifeCycle.PreStop.HTTPGet.Port = intstr.IntOrString{}
testPod.Spec.Containers[0].Lifecycle = httpLifeCycle testPod.Spec.Containers[0].Lifecycle = httpLifeCycle
m.killContainer(testPod, cID, "foo", "testKill", "", &gracePeriod) m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
if fakeHTTP.req == nil || !strings.Contains(fakeHTTP.req.URL.String(), httpLifeCycle.PreStop.HTTPGet.Host) { if fakeHTTP.req == nil || !strings.Contains(fakeHTTP.req.URL.String(), httpLifeCycle.PreStop.HTTPGet.Host) {
t.Errorf("HTTP Prestop hook was not invoked") t.Errorf("HTTP Prestop hook was not invoked")
} }
}) })
t.Run("consistent", func(t *testing.T) { t.Run("consistent", func(t *testing.T) {
ctx := context.Background()
defer func() { fakeHTTP.req = nil }() defer func() { fakeHTTP.req = nil }()
httpLifeCycle.PreStop.HTTPGet.Port = intstr.FromInt(80) httpLifeCycle.PreStop.HTTPGet.Port = intstr.FromInt(80)
testPod.Spec.Containers[0].Lifecycle = httpLifeCycle testPod.Spec.Containers[0].Lifecycle = httpLifeCycle
m.killContainer(testPod, cID, "foo", "testKill", "", &gracePeriod) m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
if fakeHTTP.req == nil || !strings.Contains(fakeHTTP.req.URL.String(), httpLifeCycle.PreStop.HTTPGet.Host) { if fakeHTTP.req == nil || !strings.Contains(fakeHTTP.req.URL.String(), httpLifeCycle.PreStop.HTTPGet.Host) {
t.Errorf("HTTP Prestop hook was not invoked") t.Errorf("HTTP Prestop hook was not invoked")
@ -337,12 +343,13 @@ func TestLifeCycleHook(t *testing.T) {
// When there is no time to run PreStopHook // When there is no time to run PreStopHook
t.Run("PreStop-NoTimeToRun", func(t *testing.T) { t.Run("PreStop-NoTimeToRun", func(t *testing.T) {
ctx := context.Background()
gracePeriodLocal := int64(0) gracePeriodLocal := int64(0)
testPod.DeletionGracePeriodSeconds = &gracePeriodLocal testPod.DeletionGracePeriodSeconds = &gracePeriodLocal
testPod.Spec.TerminationGracePeriodSeconds = &gracePeriodLocal testPod.Spec.TerminationGracePeriodSeconds = &gracePeriodLocal
m.killContainer(testPod, cID, "foo", "testKill", "", &gracePeriodLocal) m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriodLocal)
if fakeHTTP.req != nil { if fakeHTTP.req != nil {
t.Errorf("HTTP Prestop hook Should not execute when gracePeriod is 0") t.Errorf("HTTP Prestop hook Should not execute when gracePeriod is 0")
@ -351,7 +358,7 @@ func TestLifeCycleHook(t *testing.T) {
// Post Start script // Post Start script
t.Run("PostStart-CmdExe", func(t *testing.T) { t.Run("PostStart-CmdExe", func(t *testing.T) {
ctx := context.Background()
// Fake all the things you need before trying to create a container // Fake all the things you need before trying to create a container
fakeSandBox, _ := makeAndSetFakePod(t, m, fakeRuntime, testPod) fakeSandBox, _ := makeAndSetFakePod(t, m, fakeRuntime, testPod)
fakeSandBoxConfig, _ := m.generatePodSandboxConfig(testPod, 0) fakeSandBoxConfig, _ := m.generatePodSandboxConfig(testPod, 0)
@ -372,7 +379,7 @@ func TestLifeCycleHook(t *testing.T) {
} }
// Now try to create a container, which should in turn invoke PostStart Hook // Now try to create a container, which should in turn invoke PostStart Hook
_, err := m.startContainer(fakeSandBox.Id, fakeSandBoxConfig, containerStartSpec(testContainer), testPod, fakePodStatus, nil, "", []string{}) _, err := m.startContainer(ctx, fakeSandBox.Id, fakeSandBoxConfig, containerStartSpec(testContainer), testPod, fakePodStatus, nil, "", []string{})
if err != nil { if err != nil {
t.Errorf("startContainer error =%v", err) t.Errorf("startContainer error =%v", err)
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package kuberuntime package kuberuntime
import ( import (
"context"
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
@ -111,18 +112,18 @@ func (a sandboxByCreated) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a sandboxByCreated) Less(i, j int) bool { return a[i].createTime.After(a[j].createTime) } func (a sandboxByCreated) Less(i, j int) bool { return a[i].createTime.After(a[j].createTime) }
// enforceMaxContainersPerEvictUnit enforces MaxPerPodContainer for each evictUnit. // enforceMaxContainersPerEvictUnit enforces MaxPerPodContainer for each evictUnit.
func (cgc *containerGC) enforceMaxContainersPerEvictUnit(evictUnits containersByEvictUnit, MaxContainers int) { func (cgc *containerGC) enforceMaxContainersPerEvictUnit(ctx context.Context, evictUnits containersByEvictUnit, MaxContainers int) {
for key := range evictUnits { for key := range evictUnits {
toRemove := len(evictUnits[key]) - MaxContainers toRemove := len(evictUnits[key]) - MaxContainers
if toRemove > 0 { if toRemove > 0 {
evictUnits[key] = cgc.removeOldestN(evictUnits[key], toRemove) evictUnits[key] = cgc.removeOldestN(ctx, evictUnits[key], toRemove)
} }
} }
} }
// removeOldestN removes the oldest toRemove containers and returns the resulting slice. // removeOldestN removes the oldest toRemove containers and returns the resulting slice.
func (cgc *containerGC) removeOldestN(containers []containerGCInfo, toRemove int) []containerGCInfo { func (cgc *containerGC) removeOldestN(ctx context.Context, containers []containerGCInfo, toRemove int) []containerGCInfo {
// Remove from oldest to newest (last to first). // Remove from oldest to newest (last to first).
numToKeep := len(containers) - toRemove numToKeep := len(containers) - toRemove
if numToKeep > 0 { if numToKeep > 0 {
@ -137,12 +138,12 @@ func (cgc *containerGC) removeOldestN(containers []containerGCInfo, toRemove int
ID: containers[i].id, ID: containers[i].id,
} }
message := "Container is in unknown state, try killing it before removal" message := "Container is in unknown state, try killing it before removal"
if err := cgc.manager.killContainer(nil, id, containers[i].name, message, reasonUnknown, nil); err != nil { if err := cgc.manager.killContainer(ctx, nil, id, containers[i].name, message, reasonUnknown, nil); err != nil {
klog.ErrorS(err, "Failed to stop container", "containerID", containers[i].id) klog.ErrorS(err, "Failed to stop container", "containerID", containers[i].id)
continue continue
} }
} }
if err := cgc.manager.removeContainer(containers[i].id); err != nil { if err := cgc.manager.removeContainer(ctx, containers[i].id); err != nil {
klog.ErrorS(err, "Failed to remove container", "containerID", containers[i].id) klog.ErrorS(err, "Failed to remove container", "containerID", containers[i].id)
} }
} }
@ -153,7 +154,7 @@ func (cgc *containerGC) removeOldestN(containers []containerGCInfo, toRemove int
// removeOldestNSandboxes removes the oldest inactive toRemove sandboxes and // removeOldestNSandboxes removes the oldest inactive toRemove sandboxes and
// returns the resulting slice. // returns the resulting slice.
func (cgc *containerGC) removeOldestNSandboxes(sandboxes []sandboxGCInfo, toRemove int) { func (cgc *containerGC) removeOldestNSandboxes(ctx context.Context, sandboxes []sandboxGCInfo, toRemove int) {
numToKeep := len(sandboxes) - toRemove numToKeep := len(sandboxes) - toRemove
if numToKeep > 0 { if numToKeep > 0 {
sort.Sort(sandboxByCreated(sandboxes)) sort.Sort(sandboxByCreated(sandboxes))
@ -161,30 +162,30 @@ func (cgc *containerGC) removeOldestNSandboxes(sandboxes []sandboxGCInfo, toRemo
// Remove from oldest to newest (last to first). // Remove from oldest to newest (last to first).
for i := len(sandboxes) - 1; i >= numToKeep; i-- { for i := len(sandboxes) - 1; i >= numToKeep; i-- {
if !sandboxes[i].active { if !sandboxes[i].active {
cgc.removeSandbox(sandboxes[i].id) cgc.removeSandbox(ctx, sandboxes[i].id)
} }
} }
} }
// removeSandbox removes the sandbox by sandboxID. // removeSandbox removes the sandbox by sandboxID.
func (cgc *containerGC) removeSandbox(sandboxID string) { func (cgc *containerGC) removeSandbox(ctx context.Context, sandboxID string) {
klog.V(4).InfoS("Removing sandbox", "sandboxID", sandboxID) klog.V(4).InfoS("Removing sandbox", "sandboxID", sandboxID)
// In normal cases, kubelet should've already called StopPodSandbox before // In normal cases, kubelet should've already called StopPodSandbox before
// GC kicks in. To guard against the rare cases where this is not true, try // GC kicks in. To guard against the rare cases where this is not true, try
// stopping the sandbox before removing it. // stopping the sandbox before removing it.
if err := cgc.client.StopPodSandbox(sandboxID); err != nil { if err := cgc.client.StopPodSandbox(ctx, sandboxID); err != nil {
klog.ErrorS(err, "Failed to stop sandbox before removing", "sandboxID", sandboxID) klog.ErrorS(err, "Failed to stop sandbox before removing", "sandboxID", sandboxID)
return return
} }
if err := cgc.client.RemovePodSandbox(sandboxID); err != nil { if err := cgc.client.RemovePodSandbox(ctx, sandboxID); err != nil {
klog.ErrorS(err, "Failed to remove sandbox", "sandboxID", sandboxID) klog.ErrorS(err, "Failed to remove sandbox", "sandboxID", sandboxID)
} }
} }
// evictableContainers gets all containers that are evictable. Evictable containers are: not running // evictableContainers gets all containers that are evictable. Evictable containers are: not running
// and created more than MinAge ago. // and created more than MinAge ago.
func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByEvictUnit, error) { func (cgc *containerGC) evictableContainers(ctx context.Context, minAge time.Duration) (containersByEvictUnit, error) {
containers, err := cgc.manager.getKubeletContainers(true) containers, err := cgc.manager.getKubeletContainers(ctx, true)
if err != nil { if err != nil {
return containersByEvictUnit{}, err return containersByEvictUnit{}, err
} }
@ -220,9 +221,9 @@ func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByE
} }
// evict all containers that are evictable // evict all containers that are evictable
func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error { func (cgc *containerGC) evictContainers(ctx context.Context, gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
// Separate containers by evict units. // Separate containers by evict units.
evictUnits, err := cgc.evictableContainers(gcPolicy.MinAge) evictUnits, err := cgc.evictableContainers(ctx, gcPolicy.MinAge)
if err != nil { if err != nil {
return err return err
} }
@ -231,7 +232,7 @@ func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.GCPolicy, allSour
if allSourcesReady { if allSourcesReady {
for key, unit := range evictUnits { for key, unit := range evictUnits {
if cgc.podStateProvider.ShouldPodContentBeRemoved(key.uid) || (evictNonDeletedPods && cgc.podStateProvider.ShouldPodRuntimeBeRemoved(key.uid)) { if cgc.podStateProvider.ShouldPodContentBeRemoved(key.uid) || (evictNonDeletedPods && cgc.podStateProvider.ShouldPodRuntimeBeRemoved(key.uid)) {
cgc.removeOldestN(unit, len(unit)) // Remove all. cgc.removeOldestN(ctx, unit, len(unit)) // Remove all.
delete(evictUnits, key) delete(evictUnits, key)
} }
} }
@ -239,7 +240,7 @@ func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.GCPolicy, allSour
// Enforce max containers per evict unit. // Enforce max containers per evict unit.
if gcPolicy.MaxPerPodContainer >= 0 { if gcPolicy.MaxPerPodContainer >= 0 {
cgc.enforceMaxContainersPerEvictUnit(evictUnits, gcPolicy.MaxPerPodContainer) cgc.enforceMaxContainersPerEvictUnit(ctx, evictUnits, gcPolicy.MaxPerPodContainer)
} }
// Enforce max total number of containers. // Enforce max total number of containers.
@ -249,7 +250,7 @@ func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.GCPolicy, allSour
if numContainersPerEvictUnit < 1 { if numContainersPerEvictUnit < 1 {
numContainersPerEvictUnit = 1 numContainersPerEvictUnit = 1
} }
cgc.enforceMaxContainersPerEvictUnit(evictUnits, numContainersPerEvictUnit) cgc.enforceMaxContainersPerEvictUnit(ctx, evictUnits, numContainersPerEvictUnit)
// If we still need to evict, evict oldest first. // If we still need to evict, evict oldest first.
numContainers := evictUnits.NumContainers() numContainers := evictUnits.NumContainers()
@ -260,7 +261,7 @@ func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.GCPolicy, allSour
} }
sort.Sort(byCreated(flattened)) sort.Sort(byCreated(flattened))
cgc.removeOldestN(flattened, numContainers-gcPolicy.MaxContainers) cgc.removeOldestN(ctx, flattened, numContainers-gcPolicy.MaxContainers)
} }
} }
return nil return nil
@ -272,13 +273,13 @@ func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.GCPolicy, allSour
// 2. contains no containers. // 2. contains no containers.
// 3. belong to a non-existent (i.e., already removed) pod, or is not the // 3. belong to a non-existent (i.e., already removed) pod, or is not the
// most recently created sandbox for the pod. // most recently created sandbox for the pod.
func (cgc *containerGC) evictSandboxes(evictNonDeletedPods bool) error { func (cgc *containerGC) evictSandboxes(ctx context.Context, evictNonDeletedPods bool) error {
containers, err := cgc.manager.getKubeletContainers(true) containers, err := cgc.manager.getKubeletContainers(ctx, true)
if err != nil { if err != nil {
return err return err
} }
sandboxes, err := cgc.manager.getKubeletSandboxes(true) sandboxes, err := cgc.manager.getKubeletSandboxes(ctx, true)
if err != nil { if err != nil {
return err return err
} }
@ -315,10 +316,10 @@ func (cgc *containerGC) evictSandboxes(evictNonDeletedPods bool) error {
// Remove all evictable sandboxes if the pod has been removed. // Remove all evictable sandboxes if the pod has been removed.
// Note that the latest dead sandbox is also removed if there is // Note that the latest dead sandbox is also removed if there is
// already an active one. // already an active one.
cgc.removeOldestNSandboxes(sandboxes, len(sandboxes)) cgc.removeOldestNSandboxes(ctx, sandboxes, len(sandboxes))
} else { } else {
// Keep latest one if the pod still exists. // Keep latest one if the pod still exists.
cgc.removeOldestNSandboxes(sandboxes, len(sandboxes)-1) cgc.removeOldestNSandboxes(ctx, sandboxes, len(sandboxes)-1)
} }
} }
return nil return nil
@ -326,7 +327,7 @@ func (cgc *containerGC) evictSandboxes(evictNonDeletedPods bool) error {
// evictPodLogsDirectories evicts all evictable pod logs directories. Pod logs directories // evictPodLogsDirectories evicts all evictable pod logs directories. Pod logs directories
// are evictable if there are no corresponding pods. // are evictable if there are no corresponding pods.
func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error { func (cgc *containerGC) evictPodLogsDirectories(ctx context.Context, allSourcesReady bool) error {
osInterface := cgc.manager.osInterface osInterface := cgc.manager.osInterface
if allSourcesReady { if allSourcesReady {
// Only remove pod logs directories when all sources are ready. // Only remove pod logs directories when all sources are ready.
@ -354,7 +355,7 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error {
for _, logSymlink := range logSymlinks { for _, logSymlink := range logSymlinks {
if _, err := osInterface.Stat(logSymlink); os.IsNotExist(err) { if _, err := osInterface.Stat(logSymlink); os.IsNotExist(err) {
if containerID, err := getContainerIDFromLegacyLogSymlink(logSymlink); err == nil { if containerID, err := getContainerIDFromLegacyLogSymlink(logSymlink); err == nil {
resp, err := cgc.manager.runtimeService.ContainerStatus(containerID, false) resp, err := cgc.manager.runtimeService.ContainerStatus(ctx, containerID, false)
if err != nil { if err != nil {
// TODO: we should handle container not found (i.e. container was deleted) case differently // TODO: we should handle container not found (i.e. container was deleted) case differently
// once https://github.com/kubernetes/kubernetes/issues/63336 is resolved // once https://github.com/kubernetes/kubernetes/issues/63336 is resolved
@ -405,20 +406,20 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error {
// * removes oldest dead containers by enforcing gcPolicy.MaxContainers. // * removes oldest dead containers by enforcing gcPolicy.MaxContainers.
// * gets evictable sandboxes which are not ready and contains no containers. // * gets evictable sandboxes which are not ready and contains no containers.
// * removes evictable sandboxes. // * removes evictable sandboxes.
func (cgc *containerGC) GarbageCollect(gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error { func (cgc *containerGC) GarbageCollect(ctx context.Context, gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
errors := []error{} errors := []error{}
// Remove evictable containers // Remove evictable containers
if err := cgc.evictContainers(gcPolicy, allSourcesReady, evictNonDeletedPods); err != nil { if err := cgc.evictContainers(ctx, gcPolicy, allSourcesReady, evictNonDeletedPods); err != nil {
errors = append(errors, err) errors = append(errors, err)
} }
// Remove sandboxes with zero containers // Remove sandboxes with zero containers
if err := cgc.evictSandboxes(evictNonDeletedPods); err != nil { if err := cgc.evictSandboxes(ctx, evictNonDeletedPods); err != nil {
errors = append(errors, err) errors = append(errors, err)
} }
// Remove pod sandbox log directory // Remove pod sandbox log directory
if err := cgc.evictPodLogsDirectories(allSourcesReady); err != nil { if err := cgc.evictPodLogsDirectories(ctx, allSourcesReady); err != nil {
errors = append(errors, err) errors = append(errors, err)
} }
return utilerrors.NewAggregate(errors) return utilerrors.NewAggregate(errors)

View File

@ -17,6 +17,7 @@ limitations under the License.
package kuberuntime package kuberuntime
import ( import (
"context"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -160,6 +161,7 @@ func TestSandboxGC(t *testing.T) {
}, },
} { } {
t.Run(test.description, func(t *testing.T) { t.Run(test.description, func(t *testing.T) {
ctx := context.Background()
podStateProvider.removed = make(map[types.UID]struct{}) podStateProvider.removed = make(map[types.UID]struct{})
podStateProvider.terminated = make(map[types.UID]struct{}) podStateProvider.terminated = make(map[types.UID]struct{})
fakeSandboxes := makeFakePodSandboxes(t, m, test.sandboxes) fakeSandboxes := makeFakePodSandboxes(t, m, test.sandboxes)
@ -175,13 +177,13 @@ func TestSandboxGC(t *testing.T) {
fakeRuntime.SetFakeSandboxes(fakeSandboxes) fakeRuntime.SetFakeSandboxes(fakeSandboxes)
fakeRuntime.SetFakeContainers(fakeContainers) fakeRuntime.SetFakeContainers(fakeContainers)
err := m.containerGC.evictSandboxes(test.evictTerminatingPods) err := m.containerGC.evictSandboxes(ctx, test.evictTerminatingPods)
assert.NoError(t, err) assert.NoError(t, err)
realRemain, err := fakeRuntime.ListPodSandbox(nil) realRemain, err := fakeRuntime.ListPodSandbox(ctx, nil)
assert.NoError(t, err) assert.NoError(t, err)
assert.Len(t, realRemain, len(test.remain)) assert.Len(t, realRemain, len(test.remain))
for _, remain := range test.remain { for _, remain := range test.remain {
resp, err := fakeRuntime.PodSandboxStatus(fakeSandboxes[remain].Id, false) resp, err := fakeRuntime.PodSandboxStatus(ctx, fakeSandboxes[remain].Id, false)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, &fakeSandboxes[remain].PodSandboxStatus, resp.Status) assert.Equal(t, &fakeSandboxes[remain].PodSandboxStatus, resp.Status)
} }
@ -387,6 +389,7 @@ func TestContainerGC(t *testing.T) {
}, },
} { } {
t.Run(test.description, func(t *testing.T) { t.Run(test.description, func(t *testing.T) {
ctx := context.Background()
podStateProvider.removed = make(map[types.UID]struct{}) podStateProvider.removed = make(map[types.UID]struct{})
podStateProvider.terminated = make(map[types.UID]struct{}) podStateProvider.terminated = make(map[types.UID]struct{})
fakeContainers := makeFakeContainers(t, m, test.containers) fakeContainers := makeFakeContainers(t, m, test.containers)
@ -403,13 +406,13 @@ func TestContainerGC(t *testing.T) {
if test.policy == nil { if test.policy == nil {
test.policy = &defaultGCPolicy test.policy = &defaultGCPolicy
} }
err := m.containerGC.evictContainers(*test.policy, test.allSourcesReady, test.evictTerminatingPods) err := m.containerGC.evictContainers(ctx, *test.policy, test.allSourcesReady, test.evictTerminatingPods)
assert.NoError(t, err) assert.NoError(t, err)
realRemain, err := fakeRuntime.ListContainers(nil) realRemain, err := fakeRuntime.ListContainers(ctx, nil)
assert.NoError(t, err) assert.NoError(t, err)
assert.Len(t, realRemain, len(test.remain)) assert.Len(t, realRemain, len(test.remain))
for _, remain := range test.remain { for _, remain := range test.remain {
resp, err := fakeRuntime.ContainerStatus(fakeContainers[remain].Id, false) resp, err := fakeRuntime.ContainerStatus(ctx, fakeContainers[remain].Id, false)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, &fakeContainers[remain].ContainerStatus, resp.Status) assert.Equal(t, &fakeContainers[remain].ContainerStatus, resp.Status)
} }
@ -419,6 +422,7 @@ func TestContainerGC(t *testing.T) {
// Notice that legacy container symlink is not tested since it may be deprecated soon. // Notice that legacy container symlink is not tested since it may be deprecated soon.
func TestPodLogDirectoryGC(t *testing.T) { func TestPodLogDirectoryGC(t *testing.T) {
ctx := context.Background()
_, _, m, err := createTestRuntimeManager() _, _, m, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
fakeOS := m.osInterface.(*containertest.FakeOS) fakeOS := m.osInterface.(*containertest.FakeOS)
@ -449,18 +453,19 @@ func TestPodLogDirectoryGC(t *testing.T) {
} }
// allSourcesReady == true, pod log directories without corresponding pod should be removed. // allSourcesReady == true, pod log directories without corresponding pod should be removed.
err = m.containerGC.evictPodLogsDirectories(true) err = m.containerGC.evictPodLogsDirectories(ctx, true)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, removed, fakeOS.Removes) assert.Equal(t, removed, fakeOS.Removes)
// allSourcesReady == false, pod log directories should not be removed. // allSourcesReady == false, pod log directories should not be removed.
fakeOS.Removes = []string{} fakeOS.Removes = []string{}
err = m.containerGC.evictPodLogsDirectories(false) err = m.containerGC.evictPodLogsDirectories(ctx, false)
assert.NoError(t, err) assert.NoError(t, err)
assert.Empty(t, fakeOS.Removes) assert.Empty(t, fakeOS.Removes)
} }
func TestUnknownStateContainerGC(t *testing.T) { func TestUnknownStateContainerGC(t *testing.T) {
ctx := context.Background()
fakeRuntime, _, m, err := createTestRuntimeManager() fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
@ -472,13 +477,13 @@ func TestUnknownStateContainerGC(t *testing.T) {
}) })
fakeRuntime.SetFakeContainers(fakeContainers) fakeRuntime.SetFakeContainers(fakeContainers)
err = m.containerGC.evictContainers(defaultGCPolicy, true, false) err = m.containerGC.evictContainers(ctx, defaultGCPolicy, true, false)
assert.NoError(t, err) assert.NoError(t, err)
assert.Contains(t, fakeRuntime.GetCalls(), "StopContainer", "RemoveContainer", assert.Contains(t, fakeRuntime.GetCalls(), "StopContainer", "RemoveContainer",
"container in unknown state should be stopped before being removed") "container in unknown state should be stopped before being removed")
remain, err := fakeRuntime.ListContainers(nil) remain, err := fakeRuntime.ListContainers(ctx, nil)
assert.NoError(t, err) assert.NoError(t, err)
assert.Empty(t, remain) assert.Empty(t, remain)
} }

View File

@ -17,6 +17,8 @@ limitations under the License.
package kuberuntime package kuberuntime
import ( import (
"context"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
@ -28,7 +30,7 @@ import (
// PullImage pulls an image from the network to local storage using the supplied // PullImage pulls an image from the network to local storage using the supplied
// secrets if necessary. // secrets if necessary.
func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { func (m *kubeGenericRuntimeManager) PullImage(ctx context.Context, image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
img := image.Image img := image.Image
repoToPull, _, _, err := parsers.ParseImageName(img) repoToPull, _, _, err := parsers.ParseImageName(img)
if err != nil { if err != nil {
@ -46,7 +48,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
if !withCredentials { if !withCredentials {
klog.V(3).InfoS("Pulling image without credentials", "image", img) klog.V(3).InfoS("Pulling image without credentials", "image", img)
imageRef, err := m.imageService.PullImage(imgSpec, nil, podSandboxConfig) imageRef, err := m.imageService.PullImage(ctx, imgSpec, nil, podSandboxConfig)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to pull image", "image", img) klog.ErrorS(err, "Failed to pull image", "image", img)
return "", err return "", err
@ -66,7 +68,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
RegistryToken: currentCreds.RegistryToken, RegistryToken: currentCreds.RegistryToken,
} }
imageRef, err := m.imageService.PullImage(imgSpec, auth, podSandboxConfig) imageRef, err := m.imageService.PullImage(ctx, imgSpec, auth, podSandboxConfig)
// If there was no error, return success // If there was no error, return success
if err == nil { if err == nil {
return imageRef, nil return imageRef, nil
@ -80,8 +82,8 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
// GetImageRef gets the ID of the image which has already been in // GetImageRef gets the ID of the image which has already been in
// the local storage. It returns ("", nil) if the image isn't in the local storage. // the local storage. It returns ("", nil) if the image isn't in the local storage.
func (m *kubeGenericRuntimeManager) GetImageRef(image kubecontainer.ImageSpec) (string, error) { func (m *kubeGenericRuntimeManager) GetImageRef(ctx context.Context, image kubecontainer.ImageSpec) (string, error) {
resp, err := m.imageService.ImageStatus(toRuntimeAPIImageSpec(image), false) resp, err := m.imageService.ImageStatus(ctx, toRuntimeAPIImageSpec(image), false)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to get image status", "image", image.Image) klog.ErrorS(err, "Failed to get image status", "image", image.Image)
return "", err return "", err
@ -93,10 +95,10 @@ func (m *kubeGenericRuntimeManager) GetImageRef(image kubecontainer.ImageSpec) (
} }
// ListImages gets all images currently on the machine. // ListImages gets all images currently on the machine.
func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error) { func (m *kubeGenericRuntimeManager) ListImages(ctx context.Context) ([]kubecontainer.Image, error) {
var images []kubecontainer.Image var images []kubecontainer.Image
allImages, err := m.imageService.ListImages(nil) allImages, err := m.imageService.ListImages(ctx, nil)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to list images") klog.ErrorS(err, "Failed to list images")
return nil, err return nil, err
@ -116,8 +118,8 @@ func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error)
} }
// RemoveImage removes the specified image. // RemoveImage removes the specified image.
func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) error { func (m *kubeGenericRuntimeManager) RemoveImage(ctx context.Context, image kubecontainer.ImageSpec) error {
err := m.imageService.RemoveImage(&runtimeapi.ImageSpec{Image: image.Image}) err := m.imageService.RemoveImage(ctx, &runtimeapi.ImageSpec{Image: image.Image})
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to remove image", "image", image.Image) klog.ErrorS(err, "Failed to remove image", "image", image.Image)
return err return err
@ -130,8 +132,8 @@ func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) e
// Notice that current logic doesn't really work for images which share layers (e.g. docker image), // Notice that current logic doesn't really work for images which share layers (e.g. docker image),
// this is a known issue, and we'll address this by getting imagefs stats directly from CRI. // this is a known issue, and we'll address this by getting imagefs stats directly from CRI.
// TODO: Get imagefs stats directly from CRI. // TODO: Get imagefs stats directly from CRI.
func (m *kubeGenericRuntimeManager) ImageStats() (*kubecontainer.ImageStats, error) { func (m *kubeGenericRuntimeManager) ImageStats(ctx context.Context) (*kubecontainer.ImageStats, error) {
allImages, err := m.imageService.ListImages(nil) allImages, err := m.imageService.ListImages(ctx, nil)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to list images") klog.ErrorS(err, "Failed to list images")
return nil, err return nil, err

View File

@ -17,6 +17,7 @@ limitations under the License.
package kuberuntime package kuberuntime
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"testing" "testing"
@ -32,34 +33,37 @@ import (
) )
func TestPullImage(t *testing.T) { func TestPullImage(t *testing.T) {
ctx := context.Background()
_, _, fakeManager, err := createTestRuntimeManager() _, _, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
imageRef, err := fakeManager.PullImage(kubecontainer.ImageSpec{Image: "busybox"}, nil, nil) imageRef, err := fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "busybox", imageRef) assert.Equal(t, "busybox", imageRef)
images, err := fakeManager.ListImages() images, err := fakeManager.ListImages(ctx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 1, len(images)) assert.Equal(t, 1, len(images))
assert.Equal(t, images[0].RepoTags, []string{"busybox"}) assert.Equal(t, images[0].RepoTags, []string{"busybox"})
} }
func TestPullImageWithError(t *testing.T) { func TestPullImageWithError(t *testing.T) {
ctx := context.Background()
_, fakeImageService, fakeManager, err := createTestRuntimeManager() _, fakeImageService, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
fakeImageService.InjectError("PullImage", fmt.Errorf("test-error")) fakeImageService.InjectError("PullImage", fmt.Errorf("test-error"))
imageRef, err := fakeManager.PullImage(kubecontainer.ImageSpec{Image: "busybox"}, nil, nil) imageRef, err := fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
assert.Error(t, err) assert.Error(t, err)
assert.Equal(t, "", imageRef) assert.Equal(t, "", imageRef)
images, err := fakeManager.ListImages() images, err := fakeManager.ListImages(ctx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 0, len(images)) assert.Equal(t, 0, len(images))
} }
func TestListImages(t *testing.T) { func TestListImages(t *testing.T) {
ctx := context.Background()
_, fakeImageService, fakeManager, err := createTestRuntimeManager() _, fakeImageService, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
@ -67,7 +71,7 @@ func TestListImages(t *testing.T) {
expected := sets.NewString(images...) expected := sets.NewString(images...)
fakeImageService.SetFakeImages(images) fakeImageService.SetFakeImages(images)
actualImages, err := fakeManager.ListImages() actualImages, err := fakeManager.ListImages(ctx)
assert.NoError(t, err) assert.NoError(t, err)
actual := sets.NewString() actual := sets.NewString()
for _, i := range actualImages { for _, i := range actualImages {
@ -78,34 +82,37 @@ func TestListImages(t *testing.T) {
} }
func TestListImagesWithError(t *testing.T) { func TestListImagesWithError(t *testing.T) {
ctx := context.Background()
_, fakeImageService, fakeManager, err := createTestRuntimeManager() _, fakeImageService, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
fakeImageService.InjectError("ListImages", fmt.Errorf("test-failure")) fakeImageService.InjectError("ListImages", fmt.Errorf("test-failure"))
actualImages, err := fakeManager.ListImages() actualImages, err := fakeManager.ListImages(ctx)
assert.Error(t, err) assert.Error(t, err)
assert.Nil(t, actualImages) assert.Nil(t, actualImages)
} }
func TestGetImageRef(t *testing.T) { func TestGetImageRef(t *testing.T) {
ctx := context.Background()
_, fakeImageService, fakeManager, err := createTestRuntimeManager() _, fakeImageService, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
image := "busybox" image := "busybox"
fakeImageService.SetFakeImages([]string{image}) fakeImageService.SetFakeImages([]string{image})
imageRef, err := fakeManager.GetImageRef(kubecontainer.ImageSpec{Image: image}) imageRef, err := fakeManager.GetImageRef(ctx, kubecontainer.ImageSpec{Image: image})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, image, imageRef) assert.Equal(t, image, imageRef)
} }
func TestGetImageRefImageNotAvailableLocally(t *testing.T) { func TestGetImageRefImageNotAvailableLocally(t *testing.T) {
ctx := context.Background()
_, _, fakeManager, err := createTestRuntimeManager() _, _, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
image := "busybox" image := "busybox"
imageRef, err := fakeManager.GetImageRef(kubecontainer.ImageSpec{Image: image}) imageRef, err := fakeManager.GetImageRef(ctx, kubecontainer.ImageSpec{Image: image})
assert.NoError(t, err) assert.NoError(t, err)
imageNotAvailableLocallyRef := "" imageNotAvailableLocallyRef := ""
@ -113,6 +120,7 @@ func TestGetImageRefImageNotAvailableLocally(t *testing.T) {
} }
func TestGetImageRefWithError(t *testing.T) { func TestGetImageRefWithError(t *testing.T) {
ctx := context.Background()
_, fakeImageService, fakeManager, err := createTestRuntimeManager() _, fakeImageService, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
@ -120,48 +128,52 @@ func TestGetImageRefWithError(t *testing.T) {
fakeImageService.InjectError("ImageStatus", fmt.Errorf("test-error")) fakeImageService.InjectError("ImageStatus", fmt.Errorf("test-error"))
imageRef, err := fakeManager.GetImageRef(kubecontainer.ImageSpec{Image: image}) imageRef, err := fakeManager.GetImageRef(ctx, kubecontainer.ImageSpec{Image: image})
assert.Error(t, err) assert.Error(t, err)
assert.Equal(t, "", imageRef) assert.Equal(t, "", imageRef)
} }
func TestRemoveImage(t *testing.T) { func TestRemoveImage(t *testing.T) {
ctx := context.Background()
_, fakeImageService, fakeManager, err := createTestRuntimeManager() _, fakeImageService, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
_, err = fakeManager.PullImage(kubecontainer.ImageSpec{Image: "busybox"}, nil, nil) _, err = fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 1, len(fakeImageService.Images)) assert.Equal(t, 1, len(fakeImageService.Images))
err = fakeManager.RemoveImage(kubecontainer.ImageSpec{Image: "busybox"}) err = fakeManager.RemoveImage(ctx, kubecontainer.ImageSpec{Image: "busybox"})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 0, len(fakeImageService.Images)) assert.Equal(t, 0, len(fakeImageService.Images))
} }
func TestRemoveImageNoOpIfImageNotLocal(t *testing.T) { func TestRemoveImageNoOpIfImageNotLocal(t *testing.T) {
ctx := context.Background()
_, _, fakeManager, err := createTestRuntimeManager() _, _, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
err = fakeManager.RemoveImage(kubecontainer.ImageSpec{Image: "busybox"}) err = fakeManager.RemoveImage(ctx, kubecontainer.ImageSpec{Image: "busybox"})
assert.NoError(t, err) assert.NoError(t, err)
} }
func TestRemoveImageWithError(t *testing.T) { func TestRemoveImageWithError(t *testing.T) {
ctx := context.Background()
_, fakeImageService, fakeManager, err := createTestRuntimeManager() _, fakeImageService, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
_, err = fakeManager.PullImage(kubecontainer.ImageSpec{Image: "busybox"}, nil, nil) _, err = fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 1, len(fakeImageService.Images)) assert.Equal(t, 1, len(fakeImageService.Images))
fakeImageService.InjectError("RemoveImage", fmt.Errorf("test-failure")) fakeImageService.InjectError("RemoveImage", fmt.Errorf("test-failure"))
err = fakeManager.RemoveImage(kubecontainer.ImageSpec{Image: "busybox"}) err = fakeManager.RemoveImage(ctx, kubecontainer.ImageSpec{Image: "busybox"})
assert.Error(t, err) assert.Error(t, err)
assert.Equal(t, 1, len(fakeImageService.Images)) assert.Equal(t, 1, len(fakeImageService.Images))
} }
func TestImageStats(t *testing.T) { func TestImageStats(t *testing.T) {
ctx := context.Background()
_, fakeImageService, fakeManager, err := createTestRuntimeManager() _, fakeImageService, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
@ -170,24 +182,26 @@ func TestImageStats(t *testing.T) {
images := []string{"1111", "2222", "3333"} images := []string{"1111", "2222", "3333"}
fakeImageService.SetFakeImages(images) fakeImageService.SetFakeImages(images)
actualStats, err := fakeManager.ImageStats() actualStats, err := fakeManager.ImageStats(ctx)
assert.NoError(t, err) assert.NoError(t, err)
expectedStats := &kubecontainer.ImageStats{TotalStorageBytes: imageSize * uint64(len(images))} expectedStats := &kubecontainer.ImageStats{TotalStorageBytes: imageSize * uint64(len(images))}
assert.Equal(t, expectedStats, actualStats) assert.Equal(t, expectedStats, actualStats)
} }
func TestImageStatsWithError(t *testing.T) { func TestImageStatsWithError(t *testing.T) {
ctx := context.Background()
_, fakeImageService, fakeManager, err := createTestRuntimeManager() _, fakeImageService, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
fakeImageService.InjectError("ListImages", fmt.Errorf("test-failure")) fakeImageService.InjectError("ListImages", fmt.Errorf("test-failure"))
actualImageStats, err := fakeManager.ImageStats() actualImageStats, err := fakeManager.ImageStats(ctx)
assert.Error(t, err) assert.Error(t, err)
assert.Nil(t, actualImageStats) assert.Nil(t, actualImageStats)
} }
func TestPullWithSecrets(t *testing.T) { func TestPullWithSecrets(t *testing.T) {
ctx := context.Background()
// auth value is equivalent to: "username":"passed-user","password":"passed-password" // auth value is equivalent to: "username":"passed-user","password":"passed-password"
dockerCfg := map[string]map[string]string{"index.docker.io/v1/": {"email": "passed-email", "auth": "cGFzc2VkLXVzZXI6cGFzc2VkLXBhc3N3b3Jk"}} dockerCfg := map[string]map[string]string{"index.docker.io/v1/": {"email": "passed-email", "auth": "cGFzc2VkLXVzZXI6cGFzc2VkLXBhc3N3b3Jk"}}
dockercfgContent, err := json.Marshal(dockerCfg) dockercfgContent, err := json.Marshal(dockerCfg)
@ -252,13 +266,14 @@ func TestPullWithSecrets(t *testing.T) {
_, fakeImageService, fakeManager, err := customTestRuntimeManager(builtInKeyRing) _, fakeImageService, fakeManager, err := customTestRuntimeManager(builtInKeyRing)
require.NoError(t, err) require.NoError(t, err)
_, err = fakeManager.PullImage(kubecontainer.ImageSpec{Image: test.imageName}, test.passedSecrets, nil) _, err = fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: test.imageName}, test.passedSecrets, nil)
require.NoError(t, err) require.NoError(t, err)
fakeImageService.AssertImagePulledWithAuth(t, &runtimeapi.ImageSpec{Image: test.imageName, Annotations: make(map[string]string)}, test.expectedAuth, description) fakeImageService.AssertImagePulledWithAuth(t, &runtimeapi.ImageSpec{Image: test.imageName, Annotations: make(map[string]string)}, test.expectedAuth, description)
} }
} }
func TestPullThenListWithAnnotations(t *testing.T) { func TestPullThenListWithAnnotations(t *testing.T) {
ctx := context.Background()
_, _, fakeManager, err := createTestRuntimeManager() _, _, fakeManager, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
@ -269,10 +284,10 @@ func TestPullThenListWithAnnotations(t *testing.T) {
}, },
} }
_, err = fakeManager.PullImage(imageSpec, nil, nil) _, err = fakeManager.PullImage(ctx, imageSpec, nil, nil)
assert.NoError(t, err) assert.NoError(t, err)
images, err := fakeManager.ListImages() images, err := fakeManager.ListImages(ctx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 1, len(images)) assert.Equal(t, 1, len(images))
assert.Equal(t, images[0].Spec, imageSpec) assert.Equal(t, images[0].Spec, imageSpec)

View File

@ -17,6 +17,7 @@ limitations under the License.
package kuberuntime package kuberuntime
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"os" "os"
@ -195,6 +196,7 @@ func NewKubeGenericRuntimeManager(
getNodeAllocatable func() v1.ResourceList, getNodeAllocatable func() v1.ResourceList,
memoryThrottlingFactor float64, memoryThrottlingFactor float64,
) (KubeGenericRuntime, error) { ) (KubeGenericRuntime, error) {
ctx := context.Background()
runtimeService = newInstrumentedRuntimeService(runtimeService) runtimeService = newInstrumentedRuntimeService(runtimeService)
imageService = newInstrumentedImageManagerService(imageService) imageService = newInstrumentedImageManagerService(imageService)
kubeRuntimeManager := &kubeGenericRuntimeManager{ kubeRuntimeManager := &kubeGenericRuntimeManager{
@ -220,7 +222,7 @@ func NewKubeGenericRuntimeManager(
memoryThrottlingFactor: memoryThrottlingFactor, memoryThrottlingFactor: memoryThrottlingFactor,
} }
typedVersion, err := kubeRuntimeManager.getTypedVersion() typedVersion, err := kubeRuntimeManager.getTypedVersion(ctx)
if err != nil { if err != nil {
klog.ErrorS(err, "Get runtime version failed") klog.ErrorS(err, "Get runtime version failed")
return nil, err return nil, err
@ -271,7 +273,7 @@ func NewKubeGenericRuntimeManager(
kubeRuntimeManager.versionCache = cache.NewObjectCache( kubeRuntimeManager.versionCache = cache.NewObjectCache(
func() (interface{}, error) { func() (interface{}, error) {
return kubeRuntimeManager.getTypedVersion() return kubeRuntimeManager.getTypedVersion(ctx)
}, },
versionCacheTTL, versionCacheTTL,
) )
@ -291,8 +293,8 @@ func newRuntimeVersion(version string) (*utilversion.Version, error) {
return utilversion.ParseGeneric(version) return utilversion.ParseGeneric(version)
} }
func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeapi.VersionResponse, error) { func (m *kubeGenericRuntimeManager) getTypedVersion(ctx context.Context) (*runtimeapi.VersionResponse, error) {
typedVersion, err := m.runtimeService.Version(kubeRuntimeAPIVersion) typedVersion, err := m.runtimeService.Version(ctx, kubeRuntimeAPIVersion)
if err != nil { if err != nil {
return nil, fmt.Errorf("get remote runtime typed version failed: %v", err) return nil, fmt.Errorf("get remote runtime typed version failed: %v", err)
} }
@ -300,8 +302,8 @@ func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeapi.VersionRespon
} }
// Version returns the version information of the container runtime. // Version returns the version information of the container runtime.
func (m *kubeGenericRuntimeManager) Version() (kubecontainer.Version, error) { func (m *kubeGenericRuntimeManager) Version(ctx context.Context) (kubecontainer.Version, error) {
typedVersion, err := m.getTypedVersion() typedVersion, err := m.getTypedVersion(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -324,8 +326,8 @@ func (m *kubeGenericRuntimeManager) APIVersion() (kubecontainer.Version, error)
// Status returns the status of the runtime. An error is returned if the Status // Status returns the status of the runtime. An error is returned if the Status
// function itself fails, nil otherwise. // function itself fails, nil otherwise.
func (m *kubeGenericRuntimeManager) Status() (*kubecontainer.RuntimeStatus, error) { func (m *kubeGenericRuntimeManager) Status(ctx context.Context) (*kubecontainer.RuntimeStatus, error) {
resp, err := m.runtimeService.Status(false) resp, err := m.runtimeService.Status(ctx, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -338,9 +340,9 @@ func (m *kubeGenericRuntimeManager) Status() (*kubecontainer.RuntimeStatus, erro
// GetPods returns a list of containers grouped by pods. The boolean parameter // GetPods returns a list of containers grouped by pods. The boolean parameter
// specifies whether the runtime returns all containers including those already // specifies whether the runtime returns all containers including those already
// exited and dead containers (used for garbage collection). // exited and dead containers (used for garbage collection).
func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, error) { func (m *kubeGenericRuntimeManager) GetPods(ctx context.Context, all bool) ([]*kubecontainer.Pod, error) {
pods := make(map[kubetypes.UID]*kubecontainer.Pod) pods := make(map[kubetypes.UID]*kubecontainer.Pod)
sandboxes, err := m.getKubeletSandboxes(all) sandboxes, err := m.getKubeletSandboxes(ctx, all)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -368,7 +370,7 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err
p.CreatedAt = uint64(s.GetCreatedAt()) p.CreatedAt = uint64(s.GetCreatedAt())
} }
containers, err := m.getKubeletContainers(all) containers, err := m.getKubeletContainers(ctx, all)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -669,7 +671,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku
// 5. Create ephemeral containers. // 5. Create ephemeral containers.
// 6. Create init containers. // 6. Create init containers.
// 7. Create normal containers. // 7. Create normal containers.
func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
// Step 1: Compute sandbox and container changes. // Step 1: Compute sandbox and container changes.
podContainerChanges := m.computePodActions(pod, podStatus) podContainerChanges := m.computePodActions(pod, podStatus)
klog.V(3).InfoS("computePodActions got for pod", "podActions", podContainerChanges, "pod", klog.KObj(pod)) klog.V(3).InfoS("computePodActions got for pod", "podActions", podContainerChanges, "pod", klog.KObj(pod))
@ -693,7 +695,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
klog.V(4).InfoS("Stopping PodSandbox for pod, because all other containers are dead", "pod", klog.KObj(pod)) klog.V(4).InfoS("Stopping PodSandbox for pod, because all other containers are dead", "pod", klog.KObj(pod))
} }
killResult := m.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil) killResult := m.killPodWithSyncResult(ctx, pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil)
result.AddPodSyncResult(killResult) result.AddPodSyncResult(killResult)
if killResult.Error() != nil { if killResult.Error() != nil {
klog.ErrorS(killResult.Error(), "killPodWithSyncResult failed") klog.ErrorS(killResult.Error(), "killPodWithSyncResult failed")
@ -701,7 +703,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
} }
if podContainerChanges.CreateSandbox { if podContainerChanges.CreateSandbox {
m.purgeInitContainers(pod, podStatus) m.purgeInitContainers(ctx, pod, podStatus)
} }
} else { } else {
// Step 3: kill any running containers in this pod which are not to keep. // Step 3: kill any running containers in this pod which are not to keep.
@ -709,7 +711,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
klog.V(3).InfoS("Killing unwanted container for pod", "containerName", containerInfo.name, "containerID", containerID, "pod", klog.KObj(pod)) klog.V(3).InfoS("Killing unwanted container for pod", "containerName", containerInfo.name, "containerID", containerID, "pod", klog.KObj(pod))
killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, containerInfo.name) killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, containerInfo.name)
result.AddSyncResult(killContainerResult) result.AddSyncResult(killContainerResult)
if err := m.killContainer(pod, containerID, containerInfo.name, containerInfo.message, containerInfo.reason, nil); err != nil { if err := m.killContainer(ctx, pod, containerID, containerInfo.name, containerInfo.message, containerInfo.reason, nil); err != nil {
killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error()) killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
klog.ErrorS(err, "killContainer for pod failed", "containerName", containerInfo.name, "containerID", containerID, "pod", klog.KObj(pod)) klog.ErrorS(err, "killContainer for pod failed", "containerName", containerInfo.name, "containerID", containerID, "pod", klog.KObj(pod))
return return
@ -720,7 +722,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
// Keep terminated init containers fairly aggressively controlled // Keep terminated init containers fairly aggressively controlled
// This is an optimization because container removals are typically handled // This is an optimization because container removals are typically handled
// by container garbage collector. // by container garbage collector.
m.pruneInitContainersBeforeStart(pod, podStatus) m.pruneInitContainersBeforeStart(ctx, pod, podStatus)
// We pass the value of the PRIMARY podIP and list of podIPs down to // We pass the value of the PRIMARY podIP and list of podIPs down to
// generatePodSandboxConfig and generateContainerConfig, which in turn // generatePodSandboxConfig and generateContainerConfig, which in turn
@ -758,7 +760,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
// When runc supports slash as sysctl separator, this function can no longer be used. // When runc supports slash as sysctl separator, this function can no longer be used.
sysctl.ConvertPodSysctlsVariableToDotsSeparator(pod.Spec.SecurityContext) sysctl.ConvertPodSysctlsVariableToDotsSeparator(pod.Spec.SecurityContext)
podSandboxID, msg, err = m.createPodSandbox(pod, podContainerChanges.Attempt) podSandboxID, msg, err = m.createPodSandbox(ctx, pod, podContainerChanges.Attempt)
if err != nil { if err != nil {
// createPodSandbox can return an error from CNI, CSI, // createPodSandbox can return an error from CNI, CSI,
// or CRI if the Pod has been deleted while the POD is // or CRI if the Pod has been deleted while the POD is
@ -783,7 +785,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
} }
klog.V(4).InfoS("Created PodSandbox for pod", "podSandboxID", podSandboxID, "pod", klog.KObj(pod)) klog.V(4).InfoS("Created PodSandbox for pod", "podSandboxID", podSandboxID, "pod", klog.KObj(pod))
resp, err := m.runtimeService.PodSandboxStatus(podSandboxID, false) resp, err := m.runtimeService.PodSandboxStatus(ctx, podSandboxID, false)
if err != nil { if err != nil {
ref, referr := ref.GetReference(legacyscheme.Scheme, pod) ref, referr := ref.GetReference(legacyscheme.Scheme, pod)
if referr != nil { if referr != nil {
@ -832,7 +834,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
// currently: "container", "init container" or "ephemeral container" // currently: "container", "init container" or "ephemeral container"
// metricLabel is the label used to describe this type of container in monitoring metrics. // metricLabel is the label used to describe this type of container in monitoring metrics.
// currently: "container", "init_container" or "ephemeral_container" // currently: "container", "init_container" or "ephemeral_container"
start := func(typeName, metricLabel string, spec *startSpec) error { start := func(ctx context.Context, typeName, metricLabel string, spec *startSpec) error {
startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, spec.container.Name) startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, spec.container.Name)
result.AddSyncResult(startContainerResult) result.AddSyncResult(startContainerResult)
@ -849,7 +851,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
} }
klog.V(4).InfoS("Creating container in pod", "containerType", typeName, "container", spec.container, "pod", klog.KObj(pod)) klog.V(4).InfoS("Creating container in pod", "containerType", typeName, "container", spec.container, "pod", klog.KObj(pod))
// NOTE (aramase) podIPs are populated for single stack and dual stack clusters. Send only podIPs. // NOTE (aramase) podIPs are populated for single stack and dual stack clusters. Send only podIPs.
if msg, err := m.startContainer(podSandboxID, podSandboxConfig, spec, pod, podStatus, pullSecrets, podIP, podIPs); err != nil { if msg, err := m.startContainer(ctx, podSandboxID, podSandboxConfig, spec, pod, podStatus, pullSecrets, podIP, podIPs); err != nil {
// startContainer() returns well-defined error codes that have reasonable cardinality for metrics and are // startContainer() returns well-defined error codes that have reasonable cardinality for metrics and are
// useful to cluster administrators to distinguish "server errors" from "user errors". // useful to cluster administrators to distinguish "server errors" from "user errors".
metrics.StartedContainersErrorsTotal.WithLabelValues(metricLabel, err.Error()).Inc() metrics.StartedContainersErrorsTotal.WithLabelValues(metricLabel, err.Error()).Inc()
@ -876,13 +878,13 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
// are errors starting an init container. In practice init containers will start first since ephemeral // are errors starting an init container. In practice init containers will start first since ephemeral
// containers cannot be specified on pod creation. // containers cannot be specified on pod creation.
for _, idx := range podContainerChanges.EphemeralContainersToStart { for _, idx := range podContainerChanges.EphemeralContainersToStart {
start("ephemeral container", metrics.EphemeralContainer, ephemeralContainerStartSpec(&pod.Spec.EphemeralContainers[idx])) start(ctx, "ephemeral container", metrics.EphemeralContainer, ephemeralContainerStartSpec(&pod.Spec.EphemeralContainers[idx]))
} }
// Step 6: start the init container. // Step 6: start the init container.
if container := podContainerChanges.NextInitContainerToStart; container != nil { if container := podContainerChanges.NextInitContainerToStart; container != nil {
// Start the next init container. // Start the next init container.
if err := start("init container", metrics.InitContainer, containerStartSpec(container)); err != nil { if err := start(ctx, "init container", metrics.InitContainer, containerStartSpec(container)); err != nil {
return return
} }
@ -892,7 +894,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
// Step 7: start containers in podContainerChanges.ContainersToStart. // Step 7: start containers in podContainerChanges.ContainersToStart.
for _, idx := range podContainerChanges.ContainersToStart { for _, idx := range podContainerChanges.ContainersToStart {
start("container", metrics.Container, containerStartSpec(&pod.Spec.Containers[idx])) start(ctx, "container", metrics.Container, containerStartSpec(&pod.Spec.Containers[idx]))
} }
return return
@ -935,15 +937,15 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Contain
// gracePeriodOverride if specified allows the caller to override the pod default grace period. // gracePeriodOverride if specified allows the caller to override the pod default grace period.
// only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data. // only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data.
// it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios. // it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios.
func (m *kubeGenericRuntimeManager) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { func (m *kubeGenericRuntimeManager) KillPod(ctx context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
err := m.killPodWithSyncResult(pod, runningPod, gracePeriodOverride) err := m.killPodWithSyncResult(ctx, pod, runningPod, gracePeriodOverride)
return err.Error() return err.Error()
} }
// killPodWithSyncResult kills a runningPod and returns SyncResult. // killPodWithSyncResult kills a runningPod and returns SyncResult.
// Note: The pod passed in could be *nil* when kubelet restarted. // Note: The pod passed in could be *nil* when kubelet restarted.
func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) { func (m *kubeGenericRuntimeManager) killPodWithSyncResult(ctx context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) {
killContainerResults := m.killContainersWithSyncResult(pod, runningPod, gracePeriodOverride) killContainerResults := m.killContainersWithSyncResult(ctx, pod, runningPod, gracePeriodOverride)
for _, containerResult := range killContainerResults { for _, containerResult := range killContainerResults {
result.AddSyncResult(containerResult) result.AddSyncResult(containerResult)
} }
@ -953,7 +955,7 @@ func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPo
result.AddSyncResult(killSandboxResult) result.AddSyncResult(killSandboxResult)
// Stop all sandboxes belongs to same pod // Stop all sandboxes belongs to same pod
for _, podSandbox := range runningPod.Sandboxes { for _, podSandbox := range runningPod.Sandboxes {
if err := m.runtimeService.StopPodSandbox(podSandbox.ID.ID); err != nil && !crierror.IsNotFound(err) { if err := m.runtimeService.StopPodSandbox(ctx, podSandbox.ID.ID); err != nil && !crierror.IsNotFound(err) {
killSandboxResult.Fail(kubecontainer.ErrKillPodSandbox, err.Error()) killSandboxResult.Fail(kubecontainer.ErrKillPodSandbox, err.Error())
klog.ErrorS(nil, "Failed to stop sandbox", "podSandboxID", podSandbox.ID) klog.ErrorS(nil, "Failed to stop sandbox", "podSandboxID", podSandbox.ID)
} }
@ -964,7 +966,7 @@ func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPo
// GetPodStatus retrieves the status of the pod, including the // GetPodStatus retrieves the status of the pod, including the
// information of all containers in the pod that are visible in Runtime. // information of all containers in the pod that are visible in Runtime.
func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) { func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
// Now we retain restart count of container as a container label. Each time a container // Now we retain restart count of container as a container label. Each time a container
// restarts, pod will read the restart count from the registered dead container, increment // restarts, pod will read the restart count from the registered dead container, increment
// it to get the new restart count, and then add a label with the new restart count on // it to get the new restart count, and then add a label with the new restart count on
@ -978,7 +980,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
// Anyhow, we only promised "best-effort" restart count reporting, we can just ignore // Anyhow, we only promised "best-effort" restart count reporting, we can just ignore
// these limitations now. // these limitations now.
// TODO: move this comment to SyncPod. // TODO: move this comment to SyncPod.
podSandboxIDs, err := m.getSandboxIDByPodUID(uid, nil) podSandboxIDs, err := m.getSandboxIDByPodUID(ctx, uid, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -998,7 +1000,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
sandboxStatuses := []*runtimeapi.PodSandboxStatus{} sandboxStatuses := []*runtimeapi.PodSandboxStatus{}
podIPs := []string{} podIPs := []string{}
for idx, podSandboxID := range podSandboxIDs { for idx, podSandboxID := range podSandboxIDs {
resp, err := m.runtimeService.PodSandboxStatus(podSandboxID, false) resp, err := m.runtimeService.PodSandboxStatus(ctx, podSandboxID, false)
// Between List (getSandboxIDByPodUID) and check (PodSandboxStatus) another thread might remove a container, and that is normal. // Between List (getSandboxIDByPodUID) and check (PodSandboxStatus) another thread might remove a container, and that is normal.
// The previous call (getSandboxIDByPodUID) never fails due to a pod sandbox not existing. // The previous call (getSandboxIDByPodUID) never fails due to a pod sandbox not existing.
// Therefore, this method should not either, but instead act as if the previous call failed, // Therefore, this method should not either, but instead act as if the previous call failed,
@ -1022,7 +1024,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
} }
// Get statuses of all containers visible in the pod. // Get statuses of all containers visible in the pod.
containerStatuses, err := m.getPodContainerStatuses(uid, name, namespace) containerStatuses, err := m.getPodContainerStatuses(ctx, uid, name, namespace)
if err != nil { if err != nil {
if m.logReduction.ShouldMessageBePrinted(err.Error(), podFullName) { if m.logReduction.ShouldMessageBePrinted(err.Error(), podFullName) {
klog.ErrorS(err, "getPodContainerStatuses for pod failed", "pod", klog.KObj(pod)) klog.ErrorS(err, "getPodContainerStatuses for pod failed", "pod", klog.KObj(pod))
@ -1042,17 +1044,17 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
} }
// GarbageCollect removes dead containers using the specified container gc policy. // GarbageCollect removes dead containers using the specified container gc policy.
func (m *kubeGenericRuntimeManager) GarbageCollect(gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error { func (m *kubeGenericRuntimeManager) GarbageCollect(ctx context.Context, gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
return m.containerGC.GarbageCollect(gcPolicy, allSourcesReady, evictNonDeletedPods) return m.containerGC.GarbageCollect(ctx, gcPolicy, allSourcesReady, evictNonDeletedPods)
} }
// UpdatePodCIDR is just a passthrough method to update the runtimeConfig of the shim // UpdatePodCIDR is just a passthrough method to update the runtimeConfig of the shim
// with the podCIDR supplied by the kubelet. // with the podCIDR supplied by the kubelet.
func (m *kubeGenericRuntimeManager) UpdatePodCIDR(podCIDR string) error { func (m *kubeGenericRuntimeManager) UpdatePodCIDR(ctx context.Context, podCIDR string) error {
// TODO(#35531): do we really want to write a method on this manager for each // TODO(#35531): do we really want to write a method on this manager for each
// field of the config? // field of the config?
klog.InfoS("Updating runtime config through cri with podcidr", "CIDR", podCIDR) klog.InfoS("Updating runtime config through cri with podcidr", "CIDR", podCIDR)
return m.runtimeService.UpdateRuntimeConfig( return m.runtimeService.UpdateRuntimeConfig(ctx,
&runtimeapi.RuntimeConfig{ &runtimeapi.RuntimeConfig{
NetworkConfig: &runtimeapi.NetworkConfig{ NetworkConfig: &runtimeapi.NetworkConfig{
PodCidr: podCIDR, PodCidr: podCIDR,
@ -1060,6 +1062,6 @@ func (m *kubeGenericRuntimeManager) UpdatePodCIDR(podCIDR string) error {
}) })
} }
func (m *kubeGenericRuntimeManager) CheckpointContainer(options *runtimeapi.CheckpointContainerRequest) error { func (m *kubeGenericRuntimeManager) CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error {
return m.runtimeService.CheckpointContainer(options) return m.runtimeService.CheckpointContainer(ctx, options)
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package kuberuntime package kuberuntime
import ( import (
"context"
"fmt" "fmt"
"path/filepath" "path/filepath"
"reflect" "reflect"
@ -161,10 +162,11 @@ func makeFakePodSandboxes(t *testing.T, m *kubeGenericRuntimeManager, templates
// makeFakeContainer creates a fake container based on a container template. // makeFakeContainer creates a fake container based on a container template.
func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template containerTemplate) *apitest.FakeContainer { func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template containerTemplate) *apitest.FakeContainer {
ctx := context.Background()
sandboxConfig, err := m.generatePodSandboxConfig(template.pod, template.sandboxAttempt) sandboxConfig, err := m.generatePodSandboxConfig(template.pod, template.sandboxAttempt)
assert.NoError(t, err, "generatePodSandboxConfig for container template %+v", template) assert.NoError(t, err, "generatePodSandboxConfig for container template %+v", template)
containerConfig, _, err := m.generateContainerConfig(template.container, template.pod, template.attempt, "", template.container.Image, []string{}, nil) containerConfig, _, err := m.generateContainerConfig(ctx, template.container, template.pod, template.attempt, "", template.container.Image, []string{}, nil)
assert.NoError(t, err, "generateContainerConfig for container template %+v", template) assert.NoError(t, err, "generateContainerConfig for container template %+v", template)
podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata) podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata)
@ -281,10 +283,11 @@ func TestNewKubeRuntimeManager(t *testing.T) {
} }
func TestVersion(t *testing.T) { func TestVersion(t *testing.T) {
ctx := context.Background()
_, _, m, err := createTestRuntimeManager() _, _, m, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
version, err := m.Version() version, err := m.Version(ctx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, kubeRuntimeAPIVersion, version.String()) assert.Equal(t, kubeRuntimeAPIVersion, version.String())
} }
@ -298,6 +301,7 @@ func TestContainerRuntimeType(t *testing.T) {
} }
func TestGetPodStatus(t *testing.T) { func TestGetPodStatus(t *testing.T) {
ctx := context.Background()
fakeRuntime, _, m, err := createTestRuntimeManager() fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
@ -327,7 +331,7 @@ func TestGetPodStatus(t *testing.T) {
// Set fake sandbox and faked containers to fakeRuntime. // Set fake sandbox and faked containers to fakeRuntime.
makeAndSetFakePod(t, m, fakeRuntime, pod) makeAndSetFakePod(t, m, fakeRuntime, pod)
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, pod.UID, podStatus.ID) assert.Equal(t, pod.UID, podStatus.ID)
assert.Equal(t, pod.Name, podStatus.Name) assert.Equal(t, pod.Name, podStatus.Name)
@ -336,6 +340,7 @@ func TestGetPodStatus(t *testing.T) {
} }
func TestStopContainerWithNotFoundError(t *testing.T) { func TestStopContainerWithNotFoundError(t *testing.T) {
ctx := context.Background()
fakeRuntime, _, m, err := createTestRuntimeManager() fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
@ -365,15 +370,16 @@ func TestStopContainerWithNotFoundError(t *testing.T) {
// Set fake sandbox and faked containers to fakeRuntime. // Set fake sandbox and faked containers to fakeRuntime.
makeAndSetFakePod(t, m, fakeRuntime, pod) makeAndSetFakePod(t, m, fakeRuntime, pod)
fakeRuntime.InjectError("StopContainer", status.Error(codes.NotFound, "No such container")) fakeRuntime.InjectError("StopContainer", status.Error(codes.NotFound, "No such container"))
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
require.NoError(t, err) require.NoError(t, err)
p := kubecontainer.ConvertPodStatusToRunningPod("", podStatus) p := kubecontainer.ConvertPodStatusToRunningPod("", podStatus)
gracePeriod := int64(1) gracePeriod := int64(1)
err = m.KillPod(pod, p, &gracePeriod) err = m.KillPod(ctx, pod, p, &gracePeriod)
require.NoError(t, err) require.NoError(t, err)
} }
func TestGetPodStatusWithNotFoundError(t *testing.T) { func TestGetPodStatusWithNotFoundError(t *testing.T) {
ctx := context.Background()
fakeRuntime, _, m, err := createTestRuntimeManager() fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
@ -403,7 +409,7 @@ func TestGetPodStatusWithNotFoundError(t *testing.T) {
// Set fake sandbox and faked containers to fakeRuntime. // Set fake sandbox and faked containers to fakeRuntime.
makeAndSetFakePod(t, m, fakeRuntime, pod) makeAndSetFakePod(t, m, fakeRuntime, pod)
fakeRuntime.InjectError("ContainerStatus", status.Error(codes.NotFound, "No such container")) fakeRuntime.InjectError("ContainerStatus", status.Error(codes.NotFound, "No such container"))
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, pod.UID, podStatus.ID) require.Equal(t, pod.UID, podStatus.ID)
require.Equal(t, pod.Name, podStatus.Name) require.Equal(t, pod.Name, podStatus.Name)
@ -412,6 +418,7 @@ func TestGetPodStatusWithNotFoundError(t *testing.T) {
} }
func TestGetPods(t *testing.T) { func TestGetPods(t *testing.T) {
ctx := context.Background()
fakeRuntime, _, m, err := createTestRuntimeManager() fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
@ -480,7 +487,7 @@ func TestGetPods(t *testing.T) {
}, },
} }
actual, err := m.GetPods(false) actual, err := m.GetPods(ctx, false)
assert.NoError(t, err) assert.NoError(t, err)
if !verifyPods(expected, actual) { if !verifyPods(expected, actual) {
@ -489,6 +496,7 @@ func TestGetPods(t *testing.T) {
} }
func TestGetPodsSorted(t *testing.T) { func TestGetPodsSorted(t *testing.T) {
ctx := context.Background()
fakeRuntime, _, m, err := createTestRuntimeManager() fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
@ -506,7 +514,7 @@ func TestGetPodsSorted(t *testing.T) {
} }
fakeRuntime.SetFakeSandboxes(fakeSandboxes) fakeRuntime.SetFakeSandboxes(fakeSandboxes)
actual, err := m.GetPods(false) actual, err := m.GetPods(ctx, false)
assert.NoError(t, err) assert.NoError(t, err)
assert.Len(t, actual, 3) assert.Len(t, actual, 3)
@ -518,6 +526,7 @@ func TestGetPodsSorted(t *testing.T) {
} }
func TestKillPod(t *testing.T) { func TestKillPod(t *testing.T) {
ctx := context.Background()
fakeRuntime, _, m, err := createTestRuntimeManager() fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
@ -584,7 +593,7 @@ func TestKillPod(t *testing.T) {
}, },
} }
err = m.KillPod(pod, runningPod, nil) err = m.KillPod(ctx, pod, runningPod, nil)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 3, len(fakeRuntime.Containers)) assert.Equal(t, 3, len(fakeRuntime.Containers))
assert.Equal(t, 1, len(fakeRuntime.Sandboxes)) assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
@ -624,7 +633,7 @@ func TestSyncPod(t *testing.T) {
} }
backOff := flowcontrol.NewBackOff(time.Second, time.Minute) backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
result := m.SyncPod(pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff) result := m.SyncPod(context.Background(), pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
assert.NoError(t, result.Error()) assert.NoError(t, result.Error())
assert.Equal(t, 2, len(fakeRuntime.Containers)) assert.Equal(t, 2, len(fakeRuntime.Containers))
assert.Equal(t, 2, len(fakeImage.Images)) assert.Equal(t, 2, len(fakeImage.Images))
@ -684,7 +693,7 @@ func TestSyncPodWithConvertedPodSysctls(t *testing.T) {
} }
backOff := flowcontrol.NewBackOff(time.Second, time.Minute) backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
result := m.SyncPod(pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff) result := m.SyncPod(context.Background(), pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
assert.NoError(t, result.Error()) assert.NoError(t, result.Error())
assert.Equal(t, exceptSysctls, pod.Spec.SecurityContext.Sysctls) assert.Equal(t, exceptSysctls, pod.Spec.SecurityContext.Sysctls)
for _, sandbox := range fakeRuntime.Sandboxes { for _, sandbox := range fakeRuntime.Sandboxes {
@ -696,6 +705,7 @@ func TestSyncPodWithConvertedPodSysctls(t *testing.T) {
} }
func TestPruneInitContainers(t *testing.T) { func TestPruneInitContainers(t *testing.T) {
ctx := context.Background()
fakeRuntime, _, m, err := createTestRuntimeManager() fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
@ -722,10 +732,10 @@ func TestPruneInitContainers(t *testing.T) {
} }
fakes := makeFakeContainers(t, m, templates) fakes := makeFakeContainers(t, m, templates)
fakeRuntime.SetFakeContainers(fakes) fakeRuntime.SetFakeContainers(fakes)
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err) assert.NoError(t, err)
m.pruneInitContainersBeforeStart(pod, podStatus) m.pruneInitContainersBeforeStart(ctx, pod, podStatus)
expectedContainers := sets.NewString(fakes[0].Id, fakes[2].Id) expectedContainers := sets.NewString(fakes[0].Id, fakes[2].Id)
if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok { if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok {
t.Errorf("expected %v, got %v", expectedContainers, actual) t.Errorf("expected %v, got %v", expectedContainers, actual)
@ -733,6 +743,7 @@ func TestPruneInitContainers(t *testing.T) {
} }
func TestSyncPodWithInitContainers(t *testing.T) { func TestSyncPodWithInitContainers(t *testing.T) {
ctx := context.Background()
fakeRuntime, _, m, err := createTestRuntimeManager() fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
@ -770,9 +781,9 @@ func TestSyncPodWithInitContainers(t *testing.T) {
backOff := flowcontrol.NewBackOff(time.Second, time.Minute) backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
// 1. should only create the init container. // 1. should only create the init container.
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err) assert.NoError(t, err)
result := m.SyncPod(pod, podStatus, []v1.Secret{}, backOff) result := m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff)
assert.NoError(t, result.Error()) assert.NoError(t, result.Error())
expected := []*cRecord{ expected := []*cRecord{
{name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING}, {name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
@ -780,24 +791,24 @@ func TestSyncPodWithInitContainers(t *testing.T) {
verifyContainerStatuses(t, fakeRuntime, expected, "start only the init container") verifyContainerStatuses(t, fakeRuntime, expected, "start only the init container")
// 2. should not create app container because init container is still running. // 2. should not create app container because init container is still running.
podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err) assert.NoError(t, err)
result = m.SyncPod(pod, podStatus, []v1.Secret{}, backOff) result = m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff)
assert.NoError(t, result.Error()) assert.NoError(t, result.Error())
verifyContainerStatuses(t, fakeRuntime, expected, "init container still running; do nothing") verifyContainerStatuses(t, fakeRuntime, expected, "init container still running; do nothing")
// 3. should create all app containers because init container finished. // 3. should create all app containers because init container finished.
// Stop init container instance 0. // Stop init container instance 0.
sandboxIDs, err := m.getSandboxIDByPodUID(pod.UID, nil) sandboxIDs, err := m.getSandboxIDByPodUID(ctx, pod.UID, nil)
require.NoError(t, err) require.NoError(t, err)
sandboxID := sandboxIDs[0] sandboxID := sandboxIDs[0]
initID0, err := fakeRuntime.GetContainerID(sandboxID, initContainers[0].Name, 0) initID0, err := fakeRuntime.GetContainerID(sandboxID, initContainers[0].Name, 0)
require.NoError(t, err) require.NoError(t, err)
fakeRuntime.StopContainer(initID0, 0) fakeRuntime.StopContainer(ctx, initID0, 0)
// Sync again. // Sync again.
podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err) assert.NoError(t, err)
result = m.SyncPod(pod, podStatus, []v1.Secret{}, backOff) result = m.SyncPod(ctx, pod, podStatus, []v1.Secret{}, backOff)
assert.NoError(t, result.Error()) assert.NoError(t, result.Error())
expected = []*cRecord{ expected = []*cRecord{
{name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED}, {name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
@ -808,11 +819,11 @@ func TestSyncPodWithInitContainers(t *testing.T) {
// 4. should restart the init container if needed to create a new podsandbox // 4. should restart the init container if needed to create a new podsandbox
// Stop the pod sandbox. // Stop the pod sandbox.
fakeRuntime.StopPodSandbox(sandboxID) fakeRuntime.StopPodSandbox(ctx, sandboxID)
// Sync again. // Sync again.
podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err) assert.NoError(t, err)
result = m.SyncPod(pod, podStatus, []v1.Secret{}, backOff) result = m.SyncPod(ctx, pod, podStatus, []v1.Secret{}, backOff)
assert.NoError(t, result.Error()) assert.NoError(t, result.Error())
expected = []*cRecord{ expected = []*cRecord{
// The first init container instance is purged and no longer visible. // The first init container instance is purged and no longer visible.
@ -1541,6 +1552,7 @@ func TestComputePodActionsWithInitAndEphemeralContainers(t *testing.T) {
} }
func TestSyncPodWithSandboxAndDeletedPod(t *testing.T) { func TestSyncPodWithSandboxAndDeletedPod(t *testing.T) {
ctx := context.Background()
fakeRuntime, _, m, err := createTestRuntimeManager() fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err) assert.NoError(t, err)
fakeRuntime.ErrorOnSandboxCreate = true fakeRuntime.ErrorOnSandboxCreate = true
@ -1569,9 +1581,9 @@ func TestSyncPodWithSandboxAndDeletedPod(t *testing.T) {
// GetPodStatus and the following SyncPod will not return errors in the // GetPodStatus and the following SyncPod will not return errors in the
// case where the pod has been deleted. We are not adding any pods into // case where the pod has been deleted. We are not adding any pods into
// the fakePodProvider so they are 'deleted'. // the fakePodProvider so they are 'deleted'.
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err) assert.NoError(t, err)
result := m.SyncPod(pod, podStatus, []v1.Secret{}, backOff) result := m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff)
// This will return an error if the pod has _not_ been deleted. // This will return an error if the pod has _not_ been deleted.
assert.NoError(t, result.Error()) assert.NoError(t, result.Error())
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package kuberuntime package kuberuntime
import ( import (
"context"
"fmt" "fmt"
"net/url" "net/url"
"runtime" "runtime"
@ -37,7 +38,7 @@ import (
) )
// createPodSandbox creates a pod sandbox and returns (podSandBoxID, message, error). // createPodSandbox creates a pod sandbox and returns (podSandBoxID, message, error).
func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32) (string, string, error) { func (m *kubeGenericRuntimeManager) createPodSandbox(ctx context.Context, pod *v1.Pod, attempt uint32) (string, string, error) {
podSandboxConfig, err := m.generatePodSandboxConfig(pod, attempt) podSandboxConfig, err := m.generatePodSandboxConfig(pod, attempt)
if err != nil { if err != nil {
message := fmt.Sprintf("Failed to generate sandbox config for pod %q: %v", format.Pod(pod), err) message := fmt.Sprintf("Failed to generate sandbox config for pod %q: %v", format.Pod(pod), err)
@ -65,7 +66,7 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32
} }
} }
podSandBoxID, err := m.runtimeService.RunPodSandbox(podSandboxConfig, runtimeHandler) podSandBoxID, err := m.runtimeService.RunPodSandbox(ctx, podSandboxConfig, runtimeHandler)
if err != nil { if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err) message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to create sandbox for pod", "pod", klog.KObj(pod)) klog.ErrorS(err, "Failed to create sandbox for pod", "pod", klog.KObj(pod))
@ -281,7 +282,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxWindowsConfig(pod *v1.Pod)
} }
// getKubeletSandboxes lists all (or just the running) sandboxes managed by kubelet. // getKubeletSandboxes lists all (or just the running) sandboxes managed by kubelet.
func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi.PodSandbox, error) { func (m *kubeGenericRuntimeManager) getKubeletSandboxes(ctx context.Context, all bool) ([]*runtimeapi.PodSandbox, error) {
var filter *runtimeapi.PodSandboxFilter var filter *runtimeapi.PodSandboxFilter
if !all { if !all {
readyState := runtimeapi.PodSandboxState_SANDBOX_READY readyState := runtimeapi.PodSandboxState_SANDBOX_READY
@ -292,7 +293,7 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi
} }
} }
resp, err := m.runtimeService.ListPodSandbox(filter) resp, err := m.runtimeService.ListPodSandbox(ctx, filter)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to list pod sandboxes") klog.ErrorS(err, "Failed to list pod sandboxes")
return nil, err return nil, err
@ -335,7 +336,7 @@ func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName
// getPodSandboxID gets the sandbox id by podUID and returns ([]sandboxID, error). // getPodSandboxID gets the sandbox id by podUID and returns ([]sandboxID, error).
// Param state could be nil in order to get all sandboxes belonging to same pod. // Param state could be nil in order to get all sandboxes belonging to same pod.
func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) { func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(ctx context.Context, podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) {
filter := &runtimeapi.PodSandboxFilter{ filter := &runtimeapi.PodSandboxFilter{
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(podUID)}, LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(podUID)},
} }
@ -344,7 +345,7 @@ func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, s
State: *state, State: *state,
} }
} }
sandboxes, err := m.runtimeService.ListPodSandbox(filter) sandboxes, err := m.runtimeService.ListPodSandbox(ctx, filter)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to list sandboxes for pod", "podUID", podUID) klog.ErrorS(err, "Failed to list sandboxes for pod", "podUID", podUID)
return nil, err return nil, err
@ -365,8 +366,8 @@ func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, s
} }
// GetPortForward gets the endpoint the runtime will serve the port-forward request from. // GetPortForward gets the endpoint the runtime will serve the port-forward request from.
func (m *kubeGenericRuntimeManager) GetPortForward(podName, podNamespace string, podUID kubetypes.UID, ports []int32) (*url.URL, error) { func (m *kubeGenericRuntimeManager) GetPortForward(ctx context.Context, podName, podNamespace string, podUID kubetypes.UID, ports []int32) (*url.URL, error) {
sandboxIDs, err := m.getSandboxIDByPodUID(podUID, nil) sandboxIDs, err := m.getSandboxIDByPodUID(ctx, podUID, nil)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to find sandboxID for pod %s: %v", format.PodDesc(podName, podNamespace, podUID), err) return nil, fmt.Errorf("failed to find sandboxID for pod %s: %v", format.PodDesc(podName, podNamespace, podUID), err)
} }
@ -377,7 +378,7 @@ func (m *kubeGenericRuntimeManager) GetPortForward(podName, podNamespace string,
PodSandboxId: sandboxIDs[0], PodSandboxId: sandboxIDs[0],
Port: ports, Port: ports,
} }
resp, err := m.runtimeService.PortForward(req) resp, err := m.runtimeService.PortForward(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package kuberuntime package kuberuntime
import ( import (
"context"
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
@ -38,6 +39,7 @@ import (
// TestCreatePodSandbox tests creating sandbox and its corresponding pod log directory. // TestCreatePodSandbox tests creating sandbox and its corresponding pod log directory.
func TestCreatePodSandbox(t *testing.T) { func TestCreatePodSandbox(t *testing.T) {
ctx := context.Background()
fakeRuntime, _, m, err := createTestRuntimeManager() fakeRuntime, _, m, err := createTestRuntimeManager()
require.NoError(t, err) require.NoError(t, err)
pod := newTestPod() pod := newTestPod()
@ -49,10 +51,10 @@ func TestCreatePodSandbox(t *testing.T) {
assert.Equal(t, os.FileMode(0755), perm) assert.Equal(t, os.FileMode(0755), perm)
return nil return nil
} }
id, _, err := m.createPodSandbox(pod, 1) id, _, err := m.createPodSandbox(ctx, pod, 1)
assert.NoError(t, err) assert.NoError(t, err)
assert.Contains(t, fakeRuntime.Called, "RunPodSandbox") assert.Contains(t, fakeRuntime.Called, "RunPodSandbox")
sandboxes, err := fakeRuntime.ListPodSandbox(&runtimeapi.PodSandboxFilter{Id: id}) sandboxes, err := fakeRuntime.ListPodSandbox(ctx, &runtimeapi.PodSandboxFilter{Id: id})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(sandboxes), 1) assert.Equal(t, len(sandboxes), 1)
// TODO Check pod sandbox configuration // TODO Check pod sandbox configuration
@ -103,6 +105,7 @@ func TestGeneratePodSandboxLinuxConfigSeccomp(t *testing.T) {
// TestCreatePodSandbox_RuntimeClass tests creating sandbox with RuntimeClasses enabled. // TestCreatePodSandbox_RuntimeClass tests creating sandbox with RuntimeClasses enabled.
func TestCreatePodSandbox_RuntimeClass(t *testing.T) { func TestCreatePodSandbox_RuntimeClass(t *testing.T) {
ctx := context.Background()
rcm := runtimeclass.NewManager(rctest.NewPopulatedClient()) rcm := runtimeclass.NewManager(rctest.NewPopulatedClient())
defer rctest.StartManagerSync(rcm)() defer rctest.StartManagerSync(rcm)()
@ -125,7 +128,7 @@ func TestCreatePodSandbox_RuntimeClass(t *testing.T) {
pod := newTestPod() pod := newTestPod()
pod.Spec.RuntimeClassName = test.rcn pod.Spec.RuntimeClassName = test.rcn
id, _, err := m.createPodSandbox(pod, 1) id, _, err := m.createPodSandbox(ctx, pod, 1)
if test.expectError { if test.expectError {
assert.Error(t, err) assert.Error(t, err)
} else { } else {

View File

@ -414,8 +414,8 @@ func ReadLogs(ctx context.Context, path, containerID string, opts *LogOptions, r
} }
} }
func isContainerRunning(id string, r internalapi.RuntimeService) (bool, error) { func isContainerRunning(ctx context.Context, id string, r internalapi.RuntimeService) (bool, error) {
resp, err := r.ContainerStatus(id, false) resp, err := r.ContainerStatus(ctx, id, false)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -438,7 +438,7 @@ func isContainerRunning(id string, r internalapi.RuntimeService) (bool, error) {
// the error is error happens during waiting new logs. // the error is error happens during waiting new logs.
func waitLogs(ctx context.Context, id string, w *fsnotify.Watcher, runtimeService internalapi.RuntimeService) (bool, bool, error) { func waitLogs(ctx context.Context, id string, w *fsnotify.Watcher, runtimeService internalapi.RuntimeService) (bool, bool, error) {
// no need to wait if the pod is not running // no need to wait if the pod is not running
if running, err := isContainerRunning(id, runtimeService); !running { if running, err := isContainerRunning(ctx, id, runtimeService); !running {
return false, false, err return false, false, err
} }
errRetry := 5 errRetry := 5

View File

@ -54,7 +54,7 @@ type handlerRunner struct {
} }
type podStatusProvider interface { type podStatusProvider interface {
GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) GetPodStatus(ctx context.Context, uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error)
} }
// NewHandlerRunner returns a configured lifecycle handler for a container. // NewHandlerRunner returns a configured lifecycle handler for a container.
@ -67,19 +67,19 @@ func NewHandlerRunner(httpDoer kubetypes.HTTPDoer, commandRunner kubecontainer.C
} }
} }
func (hr *handlerRunner) Run(containerID kubecontainer.ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler) (string, error) { func (hr *handlerRunner) Run(ctx context.Context, containerID kubecontainer.ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler) (string, error) {
switch { switch {
case handler.Exec != nil: case handler.Exec != nil:
var msg string var msg string
// TODO(tallclair): Pass a proper timeout value. // TODO(tallclair): Pass a proper timeout value.
output, err := hr.commandRunner.RunInContainer(containerID, handler.Exec.Command, 0) output, err := hr.commandRunner.RunInContainer(ctx, containerID, handler.Exec.Command, 0)
if err != nil { if err != nil {
msg = fmt.Sprintf("Exec lifecycle hook (%v) for Container %q in Pod %q failed - error: %v, message: %q", handler.Exec.Command, container.Name, format.Pod(pod), err, string(output)) msg = fmt.Sprintf("Exec lifecycle hook (%v) for Container %q in Pod %q failed - error: %v, message: %q", handler.Exec.Command, container.Name, format.Pod(pod), err, string(output))
klog.V(1).ErrorS(err, "Exec lifecycle hook for Container in Pod failed", "execCommand", handler.Exec.Command, "containerName", container.Name, "pod", klog.KObj(pod), "message", string(output)) klog.V(1).ErrorS(err, "Exec lifecycle hook for Container in Pod failed", "execCommand", handler.Exec.Command, "containerName", container.Name, "pod", klog.KObj(pod), "message", string(output))
} }
return msg, err return msg, err
case handler.HTTPGet != nil: case handler.HTTPGet != nil:
err := hr.runHTTPHandler(pod, container, handler, hr.eventRecorder) err := hr.runHTTPHandler(ctx, pod, container, handler, hr.eventRecorder)
var msg string var msg string
if err != nil { if err != nil {
msg = fmt.Sprintf("HTTP lifecycle hook (%s) for Container %q in Pod %q failed - error: %v", handler.HTTPGet.Path, container.Name, format.Pod(pod), err) msg = fmt.Sprintf("HTTP lifecycle hook (%s) for Container %q in Pod %q failed - error: %v", handler.HTTPGet.Path, container.Name, format.Pod(pod), err)
@ -117,11 +117,11 @@ func resolvePort(portReference intstr.IntOrString, container *v1.Container) (int
return -1, fmt.Errorf("couldn't find port: %v in %v", portReference, container) return -1, fmt.Errorf("couldn't find port: %v in %v", portReference, container)
} }
func (hr *handlerRunner) runHTTPHandler(pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler, eventRecorder record.EventRecorder) error { func (hr *handlerRunner) runHTTPHandler(ctx context.Context, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler, eventRecorder record.EventRecorder) error {
host := handler.HTTPGet.Host host := handler.HTTPGet.Host
podIP := host podIP := host
if len(host) == 0 { if len(host) == 0 {
status, err := hr.containerManager.GetPodStatus(pod.UID, pod.Name, pod.Namespace) status, err := hr.containerManager.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
if err != nil { if err != nil {
klog.ErrorS(err, "Unable to get pod info, event handlers may be invalid.", "pod", klog.KObj(pod)) klog.ErrorS(err, "Unable to get pod info, event handlers may be invalid.", "pod", klog.KObj(pod))
return err return err

View File

@ -17,6 +17,7 @@ limitations under the License.
package lifecycle package lifecycle
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"net" "net"
@ -94,7 +95,7 @@ type fakeContainerCommandRunner struct {
Msg string Msg string
} }
func (f *fakeContainerCommandRunner) RunInContainer(id kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { func (f *fakeContainerCommandRunner) RunInContainer(_ context.Context, id kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
f.Cmd = cmd f.Cmd = cmd
f.ID = id f.ID = id
return []byte(f.Msg), f.Err return []byte(f.Msg), f.Err
@ -113,11 +114,12 @@ func stubPodStatusProvider(podIP string) podStatusProvider {
type podStatusProviderFunc func(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) type podStatusProviderFunc func(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error)
func (f podStatusProviderFunc) GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) { func (f podStatusProviderFunc) GetPodStatus(_ context.Context, uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
return f(uid, name, namespace) return f(uid, name, namespace)
} }
func TestRunHandlerExec(t *testing.T) { func TestRunHandlerExec(t *testing.T) {
ctx := context.Background()
fakeCommandRunner := fakeContainerCommandRunner{} fakeCommandRunner := fakeContainerCommandRunner{}
handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeCommandRunner, nil, nil) handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeCommandRunner, nil, nil)
@ -139,7 +141,7 @@ func TestRunHandlerExec(t *testing.T) {
pod.ObjectMeta.Name = "podFoo" pod.ObjectMeta.Name = "podFoo"
pod.ObjectMeta.Namespace = "nsFoo" pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container} pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) _, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -163,6 +165,7 @@ func (f *fakeHTTP) Do(req *http.Request) (*http.Response, error) {
} }
func TestRunHandlerHttp(t *testing.T) { func TestRunHandlerHttp(t *testing.T) {
ctx := context.Background()
fakeHTTPGetter := fakeHTTP{} fakeHTTPGetter := fakeHTTP{}
fakePodStatusProvider := stubPodStatusProvider("127.0.0.1") fakePodStatusProvider := stubPodStatusProvider("127.0.0.1")
handlerRunner := NewHandlerRunner(&fakeHTTPGetter, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil) handlerRunner := NewHandlerRunner(&fakeHTTPGetter, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
@ -187,7 +190,7 @@ func TestRunHandlerHttp(t *testing.T) {
pod.ObjectMeta.Namespace = "nsFoo" pod.ObjectMeta.Namespace = "nsFoo"
pod.ObjectMeta.UID = "foo-bar-quux" pod.ObjectMeta.UID = "foo-bar-quux"
pod.Spec.Containers = []v1.Container{container} pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) _, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
@ -198,6 +201,7 @@ func TestRunHandlerHttp(t *testing.T) {
} }
func TestRunHandlerHttpWithHeaders(t *testing.T) { func TestRunHandlerHttpWithHeaders(t *testing.T) {
ctx := context.Background()
fakeHTTPDoer := fakeHTTP{} fakeHTTPDoer := fakeHTTP{}
fakePodStatusProvider := stubPodStatusProvider("127.0.0.1") fakePodStatusProvider := stubPodStatusProvider("127.0.0.1")
@ -225,7 +229,7 @@ func TestRunHandlerHttpWithHeaders(t *testing.T) {
pod.ObjectMeta.Name = "podFoo" pod.ObjectMeta.Name = "podFoo"
pod.ObjectMeta.Namespace = "nsFoo" pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container} pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) _, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
@ -239,6 +243,7 @@ func TestRunHandlerHttpWithHeaders(t *testing.T) {
} }
func TestRunHandlerHttps(t *testing.T) { func TestRunHandlerHttps(t *testing.T) {
ctx := context.Background()
fakeHTTPDoer := fakeHTTP{} fakeHTTPDoer := fakeHTTP{}
fakePodStatusProvider := stubPodStatusProvider("127.0.0.1") fakePodStatusProvider := stubPodStatusProvider("127.0.0.1")
handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil) handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
@ -266,7 +271,7 @@ func TestRunHandlerHttps(t *testing.T) {
t.Run("consistent", func(t *testing.T) { t.Run("consistent", func(t *testing.T) {
container.Lifecycle.PostStart.HTTPGet.Port = intstr.FromString("70") container.Lifecycle.PostStart.HTTPGet.Port = intstr.FromString("70")
pod.Spec.Containers = []v1.Container{container} pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) _, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
@ -280,7 +285,7 @@ func TestRunHandlerHttps(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, false)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, false)()
container.Lifecycle.PostStart.HTTPGet.Port = intstr.FromString("70") container.Lifecycle.PostStart.HTTPGet.Port = intstr.FromString("70")
pod.Spec.Containers = []v1.Container{container} pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) _, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
@ -347,13 +352,14 @@ func TestRunHandlerHTTPPort(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.Name, func(t *testing.T) { t.Run(tt.Name, func(t *testing.T) {
ctx := context.Background()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, tt.FeatureGateEnabled)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, tt.FeatureGateEnabled)()
fakeHTTPDoer := fakeHTTP{} fakeHTTPDoer := fakeHTTP{}
handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil) handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
container.Lifecycle.PostStart.HTTPGet.Port = tt.Port container.Lifecycle.PostStart.HTTPGet.Port = tt.Port
pod.Spec.Containers = []v1.Container{container} pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) _, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
if hasError := (err != nil); hasError != tt.ExpectError { if hasError := (err != nil); hasError != tt.ExpectError {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
@ -618,6 +624,7 @@ func TestRunHTTPHandler(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.Name, func(t *testing.T) { t.Run(tt.Name, func(t *testing.T) {
ctx := context.Background()
fakePodStatusProvider := stubPodStatusProvider(tt.PodIP) fakePodStatusProvider := stubPodStatusProvider(tt.PodIP)
container.Lifecycle.PostStart.HTTPGet = tt.HTTPGet container.Lifecycle.PostStart.HTTPGet = tt.HTTPGet
@ -627,7 +634,7 @@ func TestRunHTTPHandler(t *testing.T) {
fakeHTTPDoer := fakeHTTP{} fakeHTTPDoer := fakeHTTP{}
handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil) handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
_, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) _, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -654,6 +661,7 @@ func TestRunHTTPHandler(t *testing.T) {
} }
func TestRunHandlerNil(t *testing.T) { func TestRunHandlerNil(t *testing.T) {
ctx := context.Background()
handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeContainerCommandRunner{}, nil, nil) handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeContainerCommandRunner{}, nil, nil)
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"} containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
podName := "podFoo" podName := "podFoo"
@ -670,13 +678,14 @@ func TestRunHandlerNil(t *testing.T) {
pod.ObjectMeta.Name = podName pod.ObjectMeta.Name = podName
pod.ObjectMeta.Namespace = podNamespace pod.ObjectMeta.Namespace = podNamespace
pod.Spec.Containers = []v1.Container{container} pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) _, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err == nil { if err == nil {
t.Errorf("expect error, but got nil") t.Errorf("expect error, but got nil")
} }
} }
func TestRunHandlerExecFailure(t *testing.T) { func TestRunHandlerExecFailure(t *testing.T) {
ctx := context.Background()
expectedErr := fmt.Errorf("invalid command") expectedErr := fmt.Errorf("invalid command")
fakeCommandRunner := fakeContainerCommandRunner{Err: expectedErr, Msg: expectedErr.Error()} fakeCommandRunner := fakeContainerCommandRunner{Err: expectedErr, Msg: expectedErr.Error()}
handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeCommandRunner, nil, nil) handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeCommandRunner, nil, nil)
@ -701,7 +710,7 @@ func TestRunHandlerExecFailure(t *testing.T) {
pod.ObjectMeta.Namespace = "nsFoo" pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container} pod.Spec.Containers = []v1.Container{container}
expectedErrMsg := fmt.Sprintf("Exec lifecycle hook (%s) for Container %q in Pod %q failed - error: %v, message: %q", command, containerName, format.Pod(&pod), expectedErr, expectedErr.Error()) expectedErrMsg := fmt.Sprintf("Exec lifecycle hook (%s) for Container %q in Pod %q failed - error: %v, message: %q", command, containerName, format.Pod(&pod), expectedErr, expectedErr.Error())
msg, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) msg, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err == nil { if err == nil {
t.Errorf("expected error: %v", expectedErr) t.Errorf("expected error: %v", expectedErr)
} }
@ -711,6 +720,7 @@ func TestRunHandlerExecFailure(t *testing.T) {
} }
func TestRunHandlerHttpFailure(t *testing.T) { func TestRunHandlerHttpFailure(t *testing.T) {
ctx := context.Background()
expectedErr := fmt.Errorf("fake http error") expectedErr := fmt.Errorf("fake http error")
expectedResp := http.Response{ expectedResp := http.Response{
Body: io.NopCloser(strings.NewReader(expectedErr.Error())), Body: io.NopCloser(strings.NewReader(expectedErr.Error())),
@ -740,7 +750,7 @@ func TestRunHandlerHttpFailure(t *testing.T) {
pod.ObjectMeta.Namespace = "nsFoo" pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container} pod.Spec.Containers = []v1.Container{container}
expectedErrMsg := fmt.Sprintf("HTTP lifecycle hook (%s) for Container %q in Pod %q failed - error: %v", "bar", containerName, format.Pod(&pod), expectedErr) expectedErrMsg := fmt.Sprintf("HTTP lifecycle hook (%s) for Container %q in Pod %q failed - error: %v", "bar", containerName, format.Pod(&pod), expectedErr)
msg, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) msg, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err == nil { if err == nil {
t.Errorf("expected error: %v", expectedErr) t.Errorf("expected error: %v", expectedErr)
} }
@ -753,6 +763,7 @@ func TestRunHandlerHttpFailure(t *testing.T) {
} }
func TestRunHandlerHttpsFailureFallback(t *testing.T) { func TestRunHandlerHttpsFailureFallback(t *testing.T) {
ctx := context.Background()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, true)()
// Since prometheus' gatherer is global, other tests may have updated metrics already, so // Since prometheus' gatherer is global, other tests may have updated metrics already, so
@ -803,7 +814,7 @@ func TestRunHandlerHttpsFailureFallback(t *testing.T) {
pod.ObjectMeta.Name = "podFoo" pod.ObjectMeta.Name = "podFoo"
pod.ObjectMeta.Namespace = "nsFoo" pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container} pod.Spec.Containers = []v1.Container{container}
msg, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) msg, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)

View File

@ -18,6 +18,7 @@ package logs
import ( import (
"compress/gzip" "compress/gzip"
"context"
"fmt" "fmt"
"io" "io"
"os" "os"
@ -58,7 +59,7 @@ type ContainerLogManager interface {
// Start container log manager. // Start container log manager.
Start() Start()
// Clean removes all logs of specified container. // Clean removes all logs of specified container.
Clean(containerID string) error Clean(ctx context.Context, containerID string) error
} }
// LogRotatePolicy is a policy for container log rotation. The policy applies to all // LogRotatePolicy is a policy for container log rotation. The policy applies to all
@ -177,19 +178,20 @@ func NewContainerLogManager(runtimeService internalapi.RuntimeService, osInterfa
// Start the container log manager. // Start the container log manager.
func (c *containerLogManager) Start() { func (c *containerLogManager) Start() {
ctx := context.Background()
// Start a goroutine periodically does container log rotation. // Start a goroutine periodically does container log rotation.
go wait.Forever(func() { go wait.Forever(func() {
if err := c.rotateLogs(); err != nil { if err := c.rotateLogs(ctx); err != nil {
klog.ErrorS(err, "Failed to rotate container logs") klog.ErrorS(err, "Failed to rotate container logs")
} }
}, logMonitorPeriod) }, logMonitorPeriod)
} }
// Clean removes all logs of specified container (including rotated one). // Clean removes all logs of specified container (including rotated one).
func (c *containerLogManager) Clean(containerID string) error { func (c *containerLogManager) Clean(ctx context.Context, containerID string) error {
c.mutex.Lock() c.mutex.Lock()
defer c.mutex.Unlock() defer c.mutex.Unlock()
resp, err := c.runtimeService.ContainerStatus(containerID, false) resp, err := c.runtimeService.ContainerStatus(ctx, containerID, false)
if err != nil { if err != nil {
return fmt.Errorf("failed to get container status %q: %v", containerID, err) return fmt.Errorf("failed to get container status %q: %v", containerID, err)
} }
@ -211,11 +213,11 @@ func (c *containerLogManager) Clean(containerID string) error {
return nil return nil
} }
func (c *containerLogManager) rotateLogs() error { func (c *containerLogManager) rotateLogs(ctx context.Context) error {
c.mutex.Lock() c.mutex.Lock()
defer c.mutex.Unlock() defer c.mutex.Unlock()
// TODO(#59998): Use kubelet pod cache. // TODO(#59998): Use kubelet pod cache.
containers, err := c.runtimeService.ListContainers(&runtimeapi.ContainerFilter{}) containers, err := c.runtimeService.ListContainers(ctx, &runtimeapi.ContainerFilter{})
if err != nil { if err != nil {
return fmt.Errorf("failed to list containers: %v", err) return fmt.Errorf("failed to list containers: %v", err)
} }
@ -228,7 +230,7 @@ func (c *containerLogManager) rotateLogs() error {
} }
id := container.GetId() id := container.GetId()
// Note that we should not block log rotate for an error of a single container. // Note that we should not block log rotate for an error of a single container.
resp, err := c.runtimeService.ContainerStatus(id, false) resp, err := c.runtimeService.ContainerStatus(ctx, id, false)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to get container status", "containerID", id) klog.ErrorS(err, "Failed to get container status", "containerID", id)
continue continue
@ -247,7 +249,7 @@ func (c *containerLogManager) rotateLogs() error {
// In rotateLatestLog, there are several cases that we may // In rotateLatestLog, there are several cases that we may
// lose original container log after ReopenContainerLog fails. // lose original container log after ReopenContainerLog fails.
// We try to recover it by reopening container log. // We try to recover it by reopening container log.
if err := c.runtimeService.ReopenContainerLog(id); err != nil { if err := c.runtimeService.ReopenContainerLog(ctx, id); err != nil {
klog.ErrorS(err, "Container log doesn't exist, reopen container log failed", "containerID", id, "path", path) klog.ErrorS(err, "Container log doesn't exist, reopen container log failed", "containerID", id, "path", path)
continue continue
} }
@ -262,7 +264,7 @@ func (c *containerLogManager) rotateLogs() error {
continue continue
} }
// Perform log rotation. // Perform log rotation.
if err := c.rotateLog(id, path); err != nil { if err := c.rotateLog(ctx, id, path); err != nil {
klog.ErrorS(err, "Failed to rotate log for container", "path", path, "containerID", id) klog.ErrorS(err, "Failed to rotate log for container", "path", path, "containerID", id)
continue continue
} }
@ -270,7 +272,7 @@ func (c *containerLogManager) rotateLogs() error {
return nil return nil
} }
func (c *containerLogManager) rotateLog(id, log string) error { func (c *containerLogManager) rotateLog(ctx context.Context, id, log string) error {
// pattern is used to match all rotated files. // pattern is used to match all rotated files.
pattern := fmt.Sprintf("%s.*", log) pattern := fmt.Sprintf("%s.*", log)
logs, err := filepath.Glob(pattern) logs, err := filepath.Glob(pattern)
@ -298,7 +300,7 @@ func (c *containerLogManager) rotateLog(id, log string) error {
} }
} }
if err := c.rotateLatestLog(id, log); err != nil { if err := c.rotateLatestLog(ctx, id, log); err != nil {
return fmt.Errorf("failed to rotate log %q: %v", log, err) return fmt.Errorf("failed to rotate log %q: %v", log, err)
} }
@ -410,13 +412,13 @@ func (c *containerLogManager) compressLog(log string) error {
// rotateLatestLog rotates latest log without compression, so that container can still write // rotateLatestLog rotates latest log without compression, so that container can still write
// and fluentd can finish reading. // and fluentd can finish reading.
func (c *containerLogManager) rotateLatestLog(id, log string) error { func (c *containerLogManager) rotateLatestLog(ctx context.Context, id, log string) error {
timestamp := c.clock.Now().Format(timestampFormat) timestamp := c.clock.Now().Format(timestampFormat)
rotated := fmt.Sprintf("%s.%s", log, timestamp) rotated := fmt.Sprintf("%s.%s", log, timestamp)
if err := c.osInterface.Rename(log, rotated); err != nil { if err := c.osInterface.Rename(log, rotated); err != nil {
return fmt.Errorf("failed to rotate log %q to %q: %v", log, rotated, err) return fmt.Errorf("failed to rotate log %q to %q: %v", log, rotated, err)
} }
if err := c.runtimeService.ReopenContainerLog(id); err != nil { if err := c.runtimeService.ReopenContainerLog(ctx, id); err != nil {
// Rename the rotated log back, so that we can try rotating it again // Rename the rotated log back, so that we can try rotating it again
// next round. // next round.
// If kubelet gets restarted at this point, we'll lose original log. // If kubelet gets restarted at this point, we'll lose original log.

View File

@ -16,11 +16,13 @@ limitations under the License.
package logs package logs
import "context"
type containerLogManagerStub struct{} type containerLogManagerStub struct{}
func (*containerLogManagerStub) Start() {} func (*containerLogManagerStub) Start() {}
func (*containerLogManagerStub) Clean(containerID string) error { func (*containerLogManagerStub) Clean(ctx context.Context, containerID string) error {
return nil return nil
} }

View File

@ -18,6 +18,7 @@ package logs
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"io" "io"
"os" "os"
@ -74,6 +75,7 @@ func TestGetAllLogs(t *testing.T) {
} }
func TestRotateLogs(t *testing.T) { func TestRotateLogs(t *testing.T) {
ctx := context.Background()
dir, err := os.MkdirTemp("", "test-rotate-logs") dir, err := os.MkdirTemp("", "test-rotate-logs")
require.NoError(t, err) require.NoError(t, err)
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
@ -147,7 +149,7 @@ func TestRotateLogs(t *testing.T) {
}, },
} }
f.SetFakeContainers(testContainers) f.SetFakeContainers(testContainers)
require.NoError(t, c.rotateLogs()) require.NoError(t, c.rotateLogs(ctx))
timestamp := now.Format(timestampFormat) timestamp := now.Format(timestampFormat)
logs, err := os.ReadDir(dir) logs, err := os.ReadDir(dir)
@ -161,6 +163,7 @@ func TestRotateLogs(t *testing.T) {
} }
func TestClean(t *testing.T) { func TestClean(t *testing.T) {
ctx := context.Background()
dir, err := os.MkdirTemp("", "test-clean") dir, err := os.MkdirTemp("", "test-clean")
require.NoError(t, err) require.NoError(t, err)
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
@ -219,7 +222,7 @@ func TestClean(t *testing.T) {
} }
f.SetFakeContainers(testContainers) f.SetFakeContainers(testContainers)
err = c.Clean("container-3") err = c.Clean(ctx, "container-3")
require.NoError(t, err) require.NoError(t, err)
logs, err := os.ReadDir(dir) logs, err := os.ReadDir(dir)
@ -350,6 +353,7 @@ func TestCompressLog(t *testing.T) {
} }
func TestRotateLatestLog(t *testing.T) { func TestRotateLatestLog(t *testing.T) {
ctx := context.Background()
dir, err := os.MkdirTemp("", "test-rotate-latest-log") dir, err := os.MkdirTemp("", "test-rotate-latest-log")
require.NoError(t, err) require.NoError(t, err)
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
@ -393,7 +397,7 @@ func TestRotateLatestLog(t *testing.T) {
defer testFile.Close() defer testFile.Close()
testLog := testFile.Name() testLog := testFile.Name()
rotatedLog := fmt.Sprintf("%s.%s", testLog, now.Format(timestampFormat)) rotatedLog := fmt.Sprintf("%s.%s", testLog, now.Format(timestampFormat))
err = c.rotateLatestLog("test-id", testLog) err = c.rotateLatestLog(ctx, "test-id", testLog)
assert.Equal(t, test.expectError, err != nil) assert.Equal(t, test.expectError, err != nil)
_, err = os.Stat(testLog) _, err = os.Stat(testLog)
assert.Equal(t, test.expectOriginal, err == nil) assert.Equal(t, test.expectOriginal, err == nil)

View File

@ -17,6 +17,8 @@ limitations under the License.
package collectors package collectors
import ( import (
"context"
"k8s.io/component-base/metrics" "k8s.io/component-base/metrics"
"k8s.io/klog/v2" "k8s.io/klog/v2"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1" statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
@ -40,7 +42,7 @@ var (
type logMetricsCollector struct { type logMetricsCollector struct {
metrics.BaseStableCollector metrics.BaseStableCollector
podStats func() ([]statsapi.PodStats, error) podStats func(ctx context.Context) ([]statsapi.PodStats, error)
} }
// Check if logMetricsCollector implements necessary interface // Check if logMetricsCollector implements necessary interface
@ -48,7 +50,7 @@ var _ metrics.StableCollector = &logMetricsCollector{}
// NewLogMetricsCollector implements the metrics.StableCollector interface and // NewLogMetricsCollector implements the metrics.StableCollector interface and
// exposes metrics about container's log volume size. // exposes metrics about container's log volume size.
func NewLogMetricsCollector(podStats func() ([]statsapi.PodStats, error)) metrics.StableCollector { func NewLogMetricsCollector(podStats func(ctx context.Context) ([]statsapi.PodStats, error)) metrics.StableCollector {
return &logMetricsCollector{ return &logMetricsCollector{
podStats: podStats, podStats: podStats,
} }
@ -61,7 +63,7 @@ func (c *logMetricsCollector) DescribeWithStability(ch chan<- *metrics.Desc) {
// CollectWithStability implements the metrics.StableCollector interface. // CollectWithStability implements the metrics.StableCollector interface.
func (c *logMetricsCollector) CollectWithStability(ch chan<- metrics.Metric) { func (c *logMetricsCollector) CollectWithStability(ch chan<- metrics.Metric) {
podStats, err := c.podStats() podStats, err := c.podStats(context.Background())
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to get pod stats") klog.ErrorS(err, "Failed to get pod stats")
return return

View File

@ -17,6 +17,7 @@ limitations under the License.
package collectors package collectors
import ( import (
"context"
"strings" "strings"
"testing" "testing"
@ -29,7 +30,7 @@ func TestNoMetricsCollected(t *testing.T) {
descLogSize = descLogSize.GetRawDesc() descLogSize = descLogSize.GetRawDesc()
collector := &logMetricsCollector{ collector := &logMetricsCollector{
podStats: func() ([]statsapi.PodStats, error) { podStats: func(_ context.Context) ([]statsapi.PodStats, error) {
return []statsapi.PodStats{}, nil return []statsapi.PodStats{}, nil
}, },
} }
@ -45,7 +46,7 @@ func TestMetricsCollected(t *testing.T) {
size := uint64(18) size := uint64(18)
collector := &logMetricsCollector{ collector := &logMetricsCollector{
podStats: func() ([]statsapi.PodStats, error) { podStats: func(_ context.Context) ([]statsapi.PodStats, error) {
return []statsapi.PodStats{ return []statsapi.PodStats{
{ {
PodRef: statsapi.PodReference{ PodRef: statsapi.PodReference{

View File

@ -17,6 +17,7 @@ limitations under the License.
package collectors package collectors
import ( import (
"context"
"time" "time"
"k8s.io/component-base/metrics" "k8s.io/component-base/metrics"
@ -116,11 +117,12 @@ func (rc *resourceMetricsCollector) DescribeWithStability(ch chan<- *metrics.Des
// leak metric collectors for containers or pods that no longer exist. Instead, implement // leak metric collectors for containers or pods that no longer exist. Instead, implement
// custom collector in a way that only collects metrics for active containers. // custom collector in a way that only collects metrics for active containers.
func (rc *resourceMetricsCollector) CollectWithStability(ch chan<- metrics.Metric) { func (rc *resourceMetricsCollector) CollectWithStability(ch chan<- metrics.Metric) {
ctx := context.Background()
var errorCount float64 var errorCount float64
defer func() { defer func() {
ch <- metrics.NewLazyConstMetric(resourceScrapeResultDesc, metrics.GaugeValue, errorCount) ch <- metrics.NewLazyConstMetric(resourceScrapeResultDesc, metrics.GaugeValue, errorCount)
}() }()
statsSummary, err := rc.provider.GetCPUAndMemoryStats() statsSummary, err := rc.provider.GetCPUAndMemoryStats(ctx)
if err != nil { if err != nil {
errorCount = 1 errorCount = 1
klog.ErrorS(err, "Error getting summary for resourceMetric prometheus endpoint") klog.ErrorS(err, "Error getting summary for resourceMetric prometheus endpoint")

View File

@ -17,6 +17,7 @@ limitations under the License.
package collectors package collectors
import ( import (
"context"
"fmt" "fmt"
"strings" "strings"
"testing" "testing"
@ -357,8 +358,9 @@ func TestCollectResourceMetrics(t *testing.T) {
for _, test := range tests { for _, test := range tests {
tc := test tc := test
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
provider := summaryprovidertest.NewMockSummaryProvider(mockCtrl) provider := summaryprovidertest.NewMockSummaryProvider(mockCtrl)
provider.EXPECT().GetCPUAndMemoryStats().Return(tc.summary, tc.summaryErr).AnyTimes() provider.EXPECT().GetCPUAndMemoryStats(ctx).Return(tc.summary, tc.summaryErr).AnyTimes()
collector := NewResourceMetricsCollector(provider) collector := NewResourceMetricsCollector(provider)
if err := testutil.CustomCollectAndCompare(collector, strings.NewReader(tc.expectedMetrics), interestedMetrics...); err != nil { if err := testutil.CustomCollectAndCompare(collector, strings.NewReader(tc.expectedMetrics), interestedMetrics...); err != nil {

View File

@ -17,6 +17,8 @@ limitations under the License.
package collectors package collectors
import ( import (
"context"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/component-base/metrics" "k8s.io/component-base/metrics"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1" stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
@ -96,7 +98,8 @@ func (collector *volumeStatsCollector) DescribeWithStability(ch chan<- *metrics.
// CollectWithStability implements the metrics.StableCollector interface. // CollectWithStability implements the metrics.StableCollector interface.
func (collector *volumeStatsCollector) CollectWithStability(ch chan<- metrics.Metric) { func (collector *volumeStatsCollector) CollectWithStability(ch chan<- metrics.Metric) {
podStats, err := collector.statsProvider.ListPodStats() ctx := context.Background()
podStats, err := collector.statsProvider.ListPodStats(ctx)
if err != nil { if err != nil {
return return
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package collectors package collectors
import ( import (
"context"
"strings" "strings"
"testing" "testing"
@ -32,6 +33,7 @@ func newUint64Pointer(i uint64) *uint64 {
} }
func TestVolumeStatsCollector(t *testing.T) { func TestVolumeStatsCollector(t *testing.T) {
ctx := context.Background()
// Fixed metadata on type and help text. We prepend this to every expected // Fixed metadata on type and help text. We prepend this to every expected
// output so we only have to modify a single place when doing adjustments. // output so we only have to modify a single place when doing adjustments.
const metadata = ` const metadata = `
@ -144,14 +146,15 @@ func TestVolumeStatsCollector(t *testing.T) {
defer mockCtrl.Finish() defer mockCtrl.Finish()
mockStatsProvider := statstest.NewMockProvider(mockCtrl) mockStatsProvider := statstest.NewMockProvider(mockCtrl)
mockStatsProvider.EXPECT().ListPodStats().Return(podStats, nil).AnyTimes() mockStatsProvider.EXPECT().ListPodStats(ctx).Return(podStats, nil).AnyTimes()
mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage().Return(podStats, nil).AnyTimes() mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage(ctx).Return(podStats, nil).AnyTimes()
if err := testutil.CustomCollectAndCompare(&volumeStatsCollector{statsProvider: mockStatsProvider}, strings.NewReader(want), metrics...); err != nil { if err := testutil.CustomCollectAndCompare(&volumeStatsCollector{statsProvider: mockStatsProvider}, strings.NewReader(want), metrics...); err != nil {
t.Errorf("unexpected collecting result:\n%s", err) t.Errorf("unexpected collecting result:\n%s", err)
} }
} }
func TestVolumeStatsCollectorWithNullVolumeStatus(t *testing.T) { func TestVolumeStatsCollectorWithNullVolumeStatus(t *testing.T) {
ctx := context.Background()
// Fixed metadata on type and help text. We prepend this to every expected // Fixed metadata on type and help text. We prepend this to every expected
// output so we only have to modify a single place when doing adjustments. // output so we only have to modify a single place when doing adjustments.
const metadata = ` const metadata = `
@ -231,8 +234,8 @@ func TestVolumeStatsCollectorWithNullVolumeStatus(t *testing.T) {
defer mockCtrl.Finish() defer mockCtrl.Finish()
mockStatsProvider := statstest.NewMockProvider(mockCtrl) mockStatsProvider := statstest.NewMockProvider(mockCtrl)
mockStatsProvider.EXPECT().ListPodStats().Return(podStats, nil).AnyTimes() mockStatsProvider.EXPECT().ListPodStats(ctx).Return(podStats, nil).AnyTimes()
mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage().Return(podStats, nil).AnyTimes() mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage(ctx).Return(podStats, nil).AnyTimes()
if err := testutil.CustomCollectAndCompare(&volumeStatsCollector{statsProvider: mockStatsProvider}, strings.NewReader(want), metrics...); err != nil { if err := testutil.CustomCollectAndCompare(&volumeStatsCollector{statsProvider: mockStatsProvider}, strings.NewReader(want), metrics...); err != nil {
t.Errorf("unexpected collecting result:\n%s", err) t.Errorf("unexpected collecting result:\n%s", err)
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package nodestatus package nodestatus
import ( import (
"context"
"fmt" "fmt"
"math" "math"
"net" "net"
@ -54,7 +55,7 @@ const (
// Setter modifies the node in-place, and returns an error if the modification failed. // Setter modifies the node in-place, and returns an error if the modification failed.
// Setters may partially mutate the node before returning an error. // Setters may partially mutate the node before returning an error.
type Setter func(node *v1.Node) error type Setter func(ctx context.Context, node *v1.Node) error
// NodeAddress returns a Setter that updates address-related information on the node. // NodeAddress returns a Setter that updates address-related information on the node.
func NodeAddress(nodeIPs []net.IP, // typically Kubelet.nodeIPs func NodeAddress(nodeIPs []net.IP, // typically Kubelet.nodeIPs
@ -78,7 +79,7 @@ func NodeAddress(nodeIPs []net.IP, // typically Kubelet.nodeIPs
} }
secondaryNodeIPSpecified := secondaryNodeIP != nil && !secondaryNodeIP.IsUnspecified() secondaryNodeIPSpecified := secondaryNodeIP != nil && !secondaryNodeIP.IsUnspecified()
return func(node *v1.Node) error { return func(ctx context.Context, node *v1.Node) error {
if nodeIPSpecified { if nodeIPSpecified {
if err := validateNodeIPFunc(nodeIP); err != nil { if err := validateNodeIPFunc(nodeIP); err != nil {
return fmt.Errorf("failed to validate nodeIP: %v", err) return fmt.Errorf("failed to validate nodeIP: %v", err)
@ -250,7 +251,7 @@ func MachineInfo(nodeName string,
recordEventFunc func(eventType, event, message string), // typically Kubelet.recordEvent recordEventFunc func(eventType, event, message string), // typically Kubelet.recordEvent
localStorageCapacityIsolation bool, localStorageCapacityIsolation bool,
) Setter { ) Setter {
return func(node *v1.Node) error { return func(ctx context.Context, node *v1.Node) error {
// Note: avoid blindly overwriting the capacity in case opaque // Note: avoid blindly overwriting the capacity in case opaque
// resources are being advertised. // resources are being advertised.
if node.Status.Capacity == nil { if node.Status.Capacity == nil {
@ -379,9 +380,9 @@ func MachineInfo(nodeName string,
// VersionInfo returns a Setter that updates version-related information on the node. // VersionInfo returns a Setter that updates version-related information on the node.
func VersionInfo(versionInfoFunc func() (*cadvisorapiv1.VersionInfo, error), // typically Kubelet.cadvisor.VersionInfo func VersionInfo(versionInfoFunc func() (*cadvisorapiv1.VersionInfo, error), // typically Kubelet.cadvisor.VersionInfo
runtimeTypeFunc func() string, // typically Kubelet.containerRuntime.Type runtimeTypeFunc func() string, // typically Kubelet.containerRuntime.Type
runtimeVersionFunc func() (kubecontainer.Version, error), // typically Kubelet.containerRuntime.Version runtimeVersionFunc func(ctx context.Context) (kubecontainer.Version, error), // typically Kubelet.containerRuntime.Version
) Setter { ) Setter {
return func(node *v1.Node) error { return func(ctx context.Context, node *v1.Node) error {
verinfo, err := versionInfoFunc() verinfo, err := versionInfoFunc()
if err != nil { if err != nil {
return fmt.Errorf("error getting version info: %v", err) return fmt.Errorf("error getting version info: %v", err)
@ -391,7 +392,7 @@ func VersionInfo(versionInfoFunc func() (*cadvisorapiv1.VersionInfo, error), //
node.Status.NodeInfo.OSImage = verinfo.ContainerOsVersion node.Status.NodeInfo.OSImage = verinfo.ContainerOsVersion
runtimeVersion := "Unknown" runtimeVersion := "Unknown"
if runtimeVer, err := runtimeVersionFunc(); err == nil { if runtimeVer, err := runtimeVersionFunc(ctx); err == nil {
runtimeVersion = runtimeVer.String() runtimeVersion = runtimeVer.String()
} }
node.Status.NodeInfo.ContainerRuntimeVersion = fmt.Sprintf("%s://%s", runtimeTypeFunc(), runtimeVersion) node.Status.NodeInfo.ContainerRuntimeVersion = fmt.Sprintf("%s://%s", runtimeTypeFunc(), runtimeVersion)
@ -405,7 +406,7 @@ func VersionInfo(versionInfoFunc func() (*cadvisorapiv1.VersionInfo, error), //
// DaemonEndpoints returns a Setter that updates the daemon endpoints on the node. // DaemonEndpoints returns a Setter that updates the daemon endpoints on the node.
func DaemonEndpoints(daemonEndpoints *v1.NodeDaemonEndpoints) Setter { func DaemonEndpoints(daemonEndpoints *v1.NodeDaemonEndpoints) Setter {
return func(node *v1.Node) error { return func(ctx context.Context, node *v1.Node) error {
node.Status.DaemonEndpoints = *daemonEndpoints node.Status.DaemonEndpoints = *daemonEndpoints
return nil return nil
} }
@ -417,7 +418,7 @@ func DaemonEndpoints(daemonEndpoints *v1.NodeDaemonEndpoints) Setter {
func Images(nodeStatusMaxImages int32, func Images(nodeStatusMaxImages int32,
imageListFunc func() ([]kubecontainer.Image, error), // typically Kubelet.imageManager.GetImageList imageListFunc func() ([]kubecontainer.Image, error), // typically Kubelet.imageManager.GetImageList
) Setter { ) Setter {
return func(node *v1.Node) error { return func(ctx context.Context, node *v1.Node) error {
// Update image list of this node // Update image list of this node
var imagesOnNode []v1.ContainerImage var imagesOnNode []v1.ContainerImage
containerImages, err := imageListFunc() containerImages, err := imageListFunc()
@ -452,7 +453,7 @@ func Images(nodeStatusMaxImages int32,
// GoRuntime returns a Setter that sets GOOS and GOARCH on the node. // GoRuntime returns a Setter that sets GOOS and GOARCH on the node.
func GoRuntime() Setter { func GoRuntime() Setter {
return func(node *v1.Node) error { return func(ctx context.Context, node *v1.Node) error {
node.Status.NodeInfo.OperatingSystem = goruntime.GOOS node.Status.NodeInfo.OperatingSystem = goruntime.GOOS
node.Status.NodeInfo.Architecture = goruntime.GOARCH node.Status.NodeInfo.Architecture = goruntime.GOARCH
return nil return nil
@ -471,7 +472,7 @@ func ReadyCondition(
recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
localStorageCapacityIsolation bool, localStorageCapacityIsolation bool,
) Setter { ) Setter {
return func(node *v1.Node) error { return func(ctx context.Context, node *v1.Node) error {
// NOTE(aaronlevy): NodeReady condition needs to be the last in the list of node conditions. // NOTE(aaronlevy): NodeReady condition needs to be the last in the list of node conditions.
// This is due to an issue with version skewed kubelet and master components. // This is due to an issue with version skewed kubelet and master components.
// ref: https://github.com/kubernetes/kubernetes/issues/16961 // ref: https://github.com/kubernetes/kubernetes/issues/16961
@ -556,7 +557,7 @@ func MemoryPressureCondition(nowFunc func() time.Time, // typically Kubelet.cloc
pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderMemoryPressure pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderMemoryPressure
recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
) Setter { ) Setter {
return func(node *v1.Node) error { return func(ctx context.Context, node *v1.Node) error {
currentTime := metav1.NewTime(nowFunc()) currentTime := metav1.NewTime(nowFunc())
var condition *v1.NodeCondition var condition *v1.NodeCondition
@ -617,7 +618,7 @@ func PIDPressureCondition(nowFunc func() time.Time, // typically Kubelet.clock.N
pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderPIDPressure pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderPIDPressure
recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
) Setter { ) Setter {
return func(node *v1.Node) error { return func(ctx context.Context, node *v1.Node) error {
currentTime := metav1.NewTime(nowFunc()) currentTime := metav1.NewTime(nowFunc())
var condition *v1.NodeCondition var condition *v1.NodeCondition
@ -678,7 +679,7 @@ func DiskPressureCondition(nowFunc func() time.Time, // typically Kubelet.clock.
pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderDiskPressure pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderDiskPressure
recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
) Setter { ) Setter {
return func(node *v1.Node) error { return func(ctx context.Context, node *v1.Node) error {
currentTime := metav1.NewTime(nowFunc()) currentTime := metav1.NewTime(nowFunc())
var condition *v1.NodeCondition var condition *v1.NodeCondition
@ -738,7 +739,7 @@ func DiskPressureCondition(nowFunc func() time.Time, // typically Kubelet.clock.
func VolumesInUse(syncedFunc func() bool, // typically Kubelet.volumeManager.ReconcilerStatesHasBeenSynced func VolumesInUse(syncedFunc func() bool, // typically Kubelet.volumeManager.ReconcilerStatesHasBeenSynced
volumesInUseFunc func() []v1.UniqueVolumeName, // typically Kubelet.volumeManager.GetVolumesInUse volumesInUseFunc func() []v1.UniqueVolumeName, // typically Kubelet.volumeManager.GetVolumesInUse
) Setter { ) Setter {
return func(node *v1.Node) error { return func(ctx context.Context, node *v1.Node) error {
// Make sure to only update node status after reconciler starts syncing up states // Make sure to only update node status after reconciler starts syncing up states
if syncedFunc() { if syncedFunc() {
node.Status.VolumesInUse = volumesInUseFunc() node.Status.VolumesInUse = volumesInUseFunc()
@ -750,7 +751,7 @@ func VolumesInUse(syncedFunc func() bool, // typically Kubelet.volumeManager.Rec
// VolumeLimits returns a Setter that updates the volume limits on the node. // VolumeLimits returns a Setter that updates the volume limits on the node.
func VolumeLimits(volumePluginListFunc func() []volume.VolumePluginWithAttachLimits, // typically Kubelet.volumePluginMgr.ListVolumePluginWithLimits func VolumeLimits(volumePluginListFunc func() []volume.VolumePluginWithAttachLimits, // typically Kubelet.volumePluginMgr.ListVolumePluginWithLimits
) Setter { ) Setter {
return func(node *v1.Node) error { return func(ctx context.Context, node *v1.Node) error {
if node.Status.Capacity == nil { if node.Status.Capacity == nil {
node.Status.Capacity = v1.ResourceList{} node.Status.Capacity = v1.ResourceList{}
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package nodestatus package nodestatus
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"net" "net"
@ -512,6 +513,7 @@ func TestNodeAddress(t *testing.T) {
} }
for _, testCase := range cases { for _, testCase := range cases {
t.Run(testCase.name, func(t *testing.T) { t.Run(testCase.name, func(t *testing.T) {
ctx := context.Background()
// testCase setup // testCase setup
existingNode := &v1.Node{ existingNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -553,7 +555,7 @@ func TestNodeAddress(t *testing.T) {
nodeAddressesFunc) nodeAddressesFunc)
// call setter on existing node // call setter on existing node
err := setter(existingNode) err := setter(ctx, existingNode)
if err != nil && !testCase.shouldError { if err != nil && !testCase.shouldError {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} else if err != nil && testCase.shouldError { } else if err != nil && testCase.shouldError {
@ -598,6 +600,7 @@ func TestNodeAddress_NoCloudProvider(t *testing.T) {
} }
for _, testCase := range cases { for _, testCase := range cases {
t.Run(testCase.name, func(t *testing.T) { t.Run(testCase.name, func(t *testing.T) {
ctx := context.Background()
// testCase setup // testCase setup
existingNode := &v1.Node{ existingNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Annotations: make(map[string]string)}, ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Annotations: make(map[string]string)},
@ -624,7 +627,7 @@ func TestNodeAddress_NoCloudProvider(t *testing.T) {
nodeAddressesFunc) nodeAddressesFunc)
// call setter on existing node // call setter on existing node
err := setter(existingNode) err := setter(ctx, existingNode)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
@ -1049,6 +1052,7 @@ func TestMachineInfo(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) { t.Run(tc.desc, func(t *testing.T) {
ctx := context.Background()
machineInfoFunc := func() (*cadvisorapiv1.MachineInfo, error) { machineInfoFunc := func() (*cadvisorapiv1.MachineInfo, error) {
return tc.machineInfo, tc.machineInfoError return tc.machineInfo, tc.machineInfoError
} }
@ -1075,7 +1079,7 @@ func TestMachineInfo(t *testing.T) {
setter := MachineInfo(nodeName, tc.maxPods, tc.podsPerCore, machineInfoFunc, capacityFunc, setter := MachineInfo(nodeName, tc.maxPods, tc.podsPerCore, machineInfoFunc, capacityFunc,
devicePluginResourceCapacityFunc, nodeAllocatableReservationFunc, recordEventFunc, tc.disableLocalStorageCapacityIsolation) devicePluginResourceCapacityFunc, nodeAllocatableReservationFunc, recordEventFunc, tc.disableLocalStorageCapacityIsolation)
// call setter on node // call setter on node
if err := setter(tc.node); err != nil { if err := setter(ctx, tc.node); err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
// check expected node // check expected node
@ -1153,19 +1157,20 @@ func TestVersionInfo(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) { t.Run(tc.desc, func(t *testing.T) {
ctx := context.Background()
versionInfoFunc := func() (*cadvisorapiv1.VersionInfo, error) { versionInfoFunc := func() (*cadvisorapiv1.VersionInfo, error) {
return tc.versionInfo, tc.versionInfoError return tc.versionInfo, tc.versionInfoError
} }
runtimeTypeFunc := func() string { runtimeTypeFunc := func() string {
return tc.runtimeType return tc.runtimeType
} }
runtimeVersionFunc := func() (kubecontainer.Version, error) { runtimeVersionFunc := func(_ context.Context) (kubecontainer.Version, error) {
return tc.runtimeVersion, tc.runtimeVersionError return tc.runtimeVersion, tc.runtimeVersionError
} }
// construct setter // construct setter
setter := VersionInfo(versionInfoFunc, runtimeTypeFunc, runtimeVersionFunc) setter := VersionInfo(versionInfoFunc, runtimeTypeFunc, runtimeVersionFunc)
// call setter on node // call setter on node
err := setter(tc.node) err := setter(ctx, tc.node)
require.Equal(t, tc.expectError, err) require.Equal(t, tc.expectError, err)
// check expected node // check expected node
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectNode, tc.node), assert.True(t, apiequality.Semantic.DeepEqual(tc.expectNode, tc.node),
@ -1229,6 +1234,7 @@ func TestImages(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) { t.Run(tc.desc, func(t *testing.T) {
ctx := context.Background()
imageListFunc := func() ([]kubecontainer.Image, error) { imageListFunc := func() ([]kubecontainer.Image, error) {
// today, imageListFunc is expected to return a sorted list, // today, imageListFunc is expected to return a sorted list,
// but we may choose to sort in the setter at some future point // but we may choose to sort in the setter at some future point
@ -1240,7 +1246,7 @@ func TestImages(t *testing.T) {
setter := Images(tc.maxImages, imageListFunc) setter := Images(tc.maxImages, imageListFunc)
// call setter on node // call setter on node
node := &v1.Node{} node := &v1.Node{}
err := setter(node) err := setter(ctx, node)
require.Equal(t, tc.expectError, err) require.Equal(t, tc.expectError, err)
// check expected node, image list should be reset to empty when there is an error // check expected node, image list should be reset to empty when there is an error
expectNode := &v1.Node{} expectNode := &v1.Node{}
@ -1408,6 +1414,7 @@ func TestReadyCondition(t *testing.T) {
} }
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) { t.Run(tc.desc, func(t *testing.T) {
ctx := context.Background()
runtimeErrorsFunc := func() error { runtimeErrorsFunc := func() error {
return tc.runtimeErrors return tc.runtimeErrors
} }
@ -1433,7 +1440,7 @@ func TestReadyCondition(t *testing.T) {
// construct setter // construct setter
setter := ReadyCondition(nowFunc, runtimeErrorsFunc, networkErrorsFunc, storageErrorsFunc, tc.appArmorValidateHostFunc, cmStatusFunc, nodeShutdownErrorsFunc, recordEventFunc, !tc.disableLocalStorageCapacityIsolation) setter := ReadyCondition(nowFunc, runtimeErrorsFunc, networkErrorsFunc, storageErrorsFunc, tc.appArmorValidateHostFunc, cmStatusFunc, nodeShutdownErrorsFunc, recordEventFunc, !tc.disableLocalStorageCapacityIsolation)
// call setter on node // call setter on node
if err := setter(tc.node); err != nil { if err := setter(ctx, tc.node); err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
// check expected condition // check expected condition
@ -1541,6 +1548,7 @@ func TestMemoryPressureCondition(t *testing.T) {
} }
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) { t.Run(tc.desc, func(t *testing.T) {
ctx := context.Background()
events := []testEvent{} events := []testEvent{}
recordEventFunc := func(eventType, event string) { recordEventFunc := func(eventType, event string) {
events = append(events, testEvent{ events = append(events, testEvent{
@ -1554,7 +1562,7 @@ func TestMemoryPressureCondition(t *testing.T) {
// construct setter // construct setter
setter := MemoryPressureCondition(nowFunc, pressureFunc, recordEventFunc) setter := MemoryPressureCondition(nowFunc, pressureFunc, recordEventFunc)
// call setter on node // call setter on node
if err := setter(tc.node); err != nil { if err := setter(ctx, tc.node); err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
// check expected condition // check expected condition
@ -1662,6 +1670,7 @@ func TestPIDPressureCondition(t *testing.T) {
} }
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) { t.Run(tc.desc, func(t *testing.T) {
ctx := context.Background()
events := []testEvent{} events := []testEvent{}
recordEventFunc := func(eventType, event string) { recordEventFunc := func(eventType, event string) {
events = append(events, testEvent{ events = append(events, testEvent{
@ -1675,7 +1684,7 @@ func TestPIDPressureCondition(t *testing.T) {
// construct setter // construct setter
setter := PIDPressureCondition(nowFunc, pressureFunc, recordEventFunc) setter := PIDPressureCondition(nowFunc, pressureFunc, recordEventFunc)
// call setter on node // call setter on node
if err := setter(tc.node); err != nil { if err := setter(ctx, tc.node); err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
// check expected condition // check expected condition
@ -1783,6 +1792,7 @@ func TestDiskPressureCondition(t *testing.T) {
} }
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) { t.Run(tc.desc, func(t *testing.T) {
ctx := context.Background()
events := []testEvent{} events := []testEvent{}
recordEventFunc := func(eventType, event string) { recordEventFunc := func(eventType, event string) {
events = append(events, testEvent{ events = append(events, testEvent{
@ -1796,7 +1806,7 @@ func TestDiskPressureCondition(t *testing.T) {
// construct setter // construct setter
setter := DiskPressureCondition(nowFunc, pressureFunc, recordEventFunc) setter := DiskPressureCondition(nowFunc, pressureFunc, recordEventFunc)
// call setter on node // call setter on node
if err := setter(tc.node); err != nil { if err := setter(ctx, tc.node); err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
// check expected condition // check expected condition
@ -1843,6 +1853,7 @@ func TestVolumesInUse(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) { t.Run(tc.desc, func(t *testing.T) {
ctx := context.Background()
syncedFunc := func() bool { syncedFunc := func() bool {
return tc.synced return tc.synced
} }
@ -1852,7 +1863,7 @@ func TestVolumesInUse(t *testing.T) {
// construct setter // construct setter
setter := VolumesInUse(syncedFunc, volumesInUseFunc) setter := VolumesInUse(syncedFunc, volumesInUseFunc)
// call setter on node // call setter on node
if err := setter(tc.node); err != nil { if err := setter(ctx, tc.node); err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
// check expected volumes // check expected volumes
@ -1908,6 +1919,7 @@ func TestVolumeLimits(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) { t.Run(tc.desc, func(t *testing.T) {
ctx := context.Background()
volumePluginListFunc := func() []volume.VolumePluginWithAttachLimits { volumePluginListFunc := func() []volume.VolumePluginWithAttachLimits {
return tc.volumePluginList return tc.volumePluginList
} }
@ -1915,7 +1927,7 @@ func TestVolumeLimits(t *testing.T) {
setter := VolumeLimits(volumePluginListFunc) setter := VolumeLimits(volumePluginListFunc)
// call setter on node // call setter on node
node := &v1.Node{} node := &v1.Node{}
if err := setter(node); err != nil { if err := setter(ctx, node); err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
// check expected node // check expected node

View File

@ -17,6 +17,7 @@ limitations under the License.
package pleg package pleg
import ( import (
"context"
"fmt" "fmt"
"sync/atomic" "sync/atomic"
"time" "time"
@ -188,6 +189,7 @@ func (g *GenericPLEG) updateRelistTime(timestamp time.Time) {
// relist queries the container runtime for list of pods/containers, compare // relist queries the container runtime for list of pods/containers, compare
// with the internal pods/containers, and generates events accordingly. // with the internal pods/containers, and generates events accordingly.
func (g *GenericPLEG) relist() { func (g *GenericPLEG) relist() {
ctx := context.Background()
klog.V(5).InfoS("GenericPLEG: Relisting") klog.V(5).InfoS("GenericPLEG: Relisting")
if lastRelistTime := g.getRelistTime(); !lastRelistTime.IsZero() { if lastRelistTime := g.getRelistTime(); !lastRelistTime.IsZero() {
@ -200,7 +202,7 @@ func (g *GenericPLEG) relist() {
}() }()
// Get all the pods. // Get all the pods.
podList, err := g.runtime.GetPods(true) podList, err := g.runtime.GetPods(ctx, true)
if err != nil { if err != nil {
klog.ErrorS(err, "GenericPLEG: Unable to retrieve pods") klog.ErrorS(err, "GenericPLEG: Unable to retrieve pods")
return return
@ -247,7 +249,7 @@ func (g *GenericPLEG) relist() {
// inspecting the pod and getting the PodStatus to update the cache // inspecting the pod and getting the PodStatus to update the cache
// serially may take a while. We should be aware of this and // serially may take a while. We should be aware of this and
// parallelize if needed. // parallelize if needed.
if err := g.updateCache(pod, pid); err != nil { if err := g.updateCache(ctx, pod, pid); err != nil {
// Rely on updateCache calling GetPodStatus to log the actual error. // Rely on updateCache calling GetPodStatus to log the actual error.
klog.V(4).ErrorS(err, "PLEG: Ignoring events for pod", "pod", klog.KRef(pod.Namespace, pod.Name)) klog.V(4).ErrorS(err, "PLEG: Ignoring events for pod", "pod", klog.KRef(pod.Namespace, pod.Name))
@ -305,7 +307,7 @@ func (g *GenericPLEG) relist() {
if len(g.podsToReinspect) > 0 { if len(g.podsToReinspect) > 0 {
klog.V(5).InfoS("GenericPLEG: Reinspecting pods that previously failed inspection") klog.V(5).InfoS("GenericPLEG: Reinspecting pods that previously failed inspection")
for pid, pod := range g.podsToReinspect { for pid, pod := range g.podsToReinspect {
if err := g.updateCache(pod, pid); err != nil { if err := g.updateCache(ctx, pod, pid); err != nil {
// Rely on updateCache calling GetPodStatus to log the actual error. // Rely on updateCache calling GetPodStatus to log the actual error.
klog.V(5).ErrorS(err, "PLEG: pod failed reinspection", "pod", klog.KRef(pod.Namespace, pod.Name)) klog.V(5).ErrorS(err, "PLEG: pod failed reinspection", "pod", klog.KRef(pod.Namespace, pod.Name))
needsReinspection[pid] = pod needsReinspection[pid] = pod
@ -388,7 +390,7 @@ func (g *GenericPLEG) getPodIPs(pid types.UID, status *kubecontainer.PodStatus)
return oldStatus.IPs return oldStatus.IPs
} }
func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error { func (g *GenericPLEG) updateCache(ctx context.Context, pod *kubecontainer.Pod, pid types.UID) error {
if pod == nil { if pod == nil {
// The pod is missing in the current relist. This means that // The pod is missing in the current relist. This means that
// the pod has no visible (active or inactive) containers. // the pod has no visible (active or inactive) containers.
@ -400,7 +402,7 @@ func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error {
// TODO: Consider adding a new runtime method // TODO: Consider adding a new runtime method
// GetPodStatus(pod *kubecontainer.Pod) so that Docker can avoid listing // GetPodStatus(pod *kubecontainer.Pod) so that Docker can avoid listing
// all containers again. // all containers again.
status, err := g.runtime.GetPodStatus(pod.ID, pod.Name, pod.Namespace) status, err := g.runtime.GetPodStatus(ctx, pod.ID, pod.Name, pod.Namespace)
if err != nil { if err != nil {
// nolint:logcheck // Not using the result of klog.V inside the // nolint:logcheck // Not using the result of klog.V inside the
// if branch is okay, we just use it to determine whether the // if branch is okay, we just use it to determine whether the

View File

@ -17,6 +17,7 @@ limitations under the License.
package pleg package pleg
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"reflect" "reflect"
@ -350,6 +351,7 @@ func createTestPodsStatusesAndEvents(num int) ([]*kubecontainer.Pod, []*kubecont
} }
func TestRelistWithCache(t *testing.T) { func TestRelistWithCache(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() defer mockCtrl.Finish()
runtimeMock := containertest.NewMockRuntime(mockCtrl) runtimeMock := containertest.NewMockRuntime(mockCtrl)
@ -358,11 +360,11 @@ func TestRelistWithCache(t *testing.T) {
ch := pleg.Watch() ch := pleg.Watch()
pods, statuses, events := createTestPodsStatusesAndEvents(2) pods, statuses, events := createTestPodsStatusesAndEvents(2)
runtimeMock.EXPECT().GetPods(true).Return(pods, nil).AnyTimes() runtimeMock.EXPECT().GetPods(ctx, true).Return(pods, nil).AnyTimes()
runtimeMock.EXPECT().GetPodStatus(pods[0].ID, "", "").Return(statuses[0], nil).Times(1) runtimeMock.EXPECT().GetPodStatus(ctx, pods[0].ID, "", "").Return(statuses[0], nil).Times(1)
// Inject an error when querying runtime for the pod status for pods[1]. // Inject an error when querying runtime for the pod status for pods[1].
statusErr := fmt.Errorf("unable to get status") statusErr := fmt.Errorf("unable to get status")
runtimeMock.EXPECT().GetPodStatus(pods[1].ID, "", "").Return(&kubecontainer.PodStatus{}, statusErr).Times(1) runtimeMock.EXPECT().GetPodStatus(ctx, pods[1].ID, "", "").Return(&kubecontainer.PodStatus{}, statusErr).Times(1)
pleg.relist() pleg.relist()
actualEvents := getEventsFromChannel(ch) actualEvents := getEventsFromChannel(ch)
@ -384,7 +386,7 @@ func TestRelistWithCache(t *testing.T) {
assert.Exactly(t, []*PodLifecycleEvent{events[0]}, actualEvents) assert.Exactly(t, []*PodLifecycleEvent{events[0]}, actualEvents)
// Return normal status for pods[1]. // Return normal status for pods[1].
runtimeMock.EXPECT().GetPodStatus(pods[1].ID, "", "").Return(statuses[1], nil).Times(1) runtimeMock.EXPECT().GetPodStatus(ctx, pods[1].ID, "", "").Return(statuses[1], nil).Times(1)
pleg.relist() pleg.relist()
actualEvents = getEventsFromChannel(ch) actualEvents = getEventsFromChannel(ch)
cases = []struct { cases = []struct {
@ -406,19 +408,20 @@ func TestRelistWithCache(t *testing.T) {
} }
func TestRemoveCacheEntry(t *testing.T) { func TestRemoveCacheEntry(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() defer mockCtrl.Finish()
runtimeMock := containertest.NewMockRuntime(mockCtrl) runtimeMock := containertest.NewMockRuntime(mockCtrl)
pleg := newTestGenericPLEGWithRuntimeMock(runtimeMock) pleg := newTestGenericPLEGWithRuntimeMock(runtimeMock)
pods, statuses, _ := createTestPodsStatusesAndEvents(1) pods, statuses, _ := createTestPodsStatusesAndEvents(1)
runtimeMock.EXPECT().GetPods(true).Return(pods, nil).Times(1) runtimeMock.EXPECT().GetPods(ctx, true).Return(pods, nil).Times(1)
runtimeMock.EXPECT().GetPodStatus(pods[0].ID, "", "").Return(statuses[0], nil).Times(1) runtimeMock.EXPECT().GetPodStatus(ctx, pods[0].ID, "", "").Return(statuses[0], nil).Times(1)
// Does a relist to populate the cache. // Does a relist to populate the cache.
pleg.relist() pleg.relist()
// Delete the pod from runtime. Verify that the cache entry has been // Delete the pod from runtime. Verify that the cache entry has been
// removed after relisting. // removed after relisting.
runtimeMock.EXPECT().GetPods(true).Return([]*kubecontainer.Pod{}, nil).Times(1) runtimeMock.EXPECT().GetPods(ctx, true).Return([]*kubecontainer.Pod{}, nil).Times(1)
pleg.relist() pleg.relist()
actualStatus, actualErr := pleg.cache.Get(pods[0].ID) actualStatus, actualErr := pleg.cache.Get(pods[0].ID)
assert.Equal(t, &kubecontainer.PodStatus{ID: pods[0].ID}, actualStatus) assert.Equal(t, &kubecontainer.PodStatus{ID: pods[0].ID}, actualStatus)
@ -453,6 +456,7 @@ func TestHealthy(t *testing.T) {
} }
func TestRelistWithReinspection(t *testing.T) { func TestRelistWithReinspection(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() defer mockCtrl.Finish()
runtimeMock := containertest.NewMockRuntime(mockCtrl) runtimeMock := containertest.NewMockRuntime(mockCtrl)
@ -467,13 +471,13 @@ func TestRelistWithReinspection(t *testing.T) {
ID: podID, ID: podID,
Containers: []*kubecontainer.Container{infraContainer}, Containers: []*kubecontainer.Container{infraContainer},
}} }}
runtimeMock.EXPECT().GetPods(true).Return(pods, nil).Times(1) runtimeMock.EXPECT().GetPods(ctx, true).Return(pods, nil).Times(1)
goodStatus := &kubecontainer.PodStatus{ goodStatus := &kubecontainer.PodStatus{
ID: podID, ID: podID,
ContainerStatuses: []*kubecontainer.Status{{ID: infraContainer.ID, State: infraContainer.State}}, ContainerStatuses: []*kubecontainer.Status{{ID: infraContainer.ID, State: infraContainer.State}},
} }
runtimeMock.EXPECT().GetPodStatus(podID, "", "").Return(goodStatus, nil).Times(1) runtimeMock.EXPECT().GetPodStatus(ctx, podID, "", "").Return(goodStatus, nil).Times(1)
goodEvent := &PodLifecycleEvent{ID: podID, Type: ContainerStarted, Data: infraContainer.ID.ID} goodEvent := &PodLifecycleEvent{ID: podID, Type: ContainerStarted, Data: infraContainer.ID.ID}
@ -492,13 +496,13 @@ func TestRelistWithReinspection(t *testing.T) {
ID: podID, ID: podID,
Containers: []*kubecontainer.Container{infraContainer, transientContainer}, Containers: []*kubecontainer.Container{infraContainer, transientContainer},
}} }}
runtimeMock.EXPECT().GetPods(true).Return(podsWithTransientContainer, nil).Times(1) runtimeMock.EXPECT().GetPods(ctx, true).Return(podsWithTransientContainer, nil).Times(1)
badStatus := &kubecontainer.PodStatus{ badStatus := &kubecontainer.PodStatus{
ID: podID, ID: podID,
ContainerStatuses: []*kubecontainer.Status{}, ContainerStatuses: []*kubecontainer.Status{},
} }
runtimeMock.EXPECT().GetPodStatus(podID, "", "").Return(badStatus, errors.New("inspection error")).Times(1) runtimeMock.EXPECT().GetPodStatus(ctx, podID, "", "").Return(badStatus, errors.New("inspection error")).Times(1)
pleg.relist() pleg.relist()
actualEvents = getEventsFromChannel(ch) actualEvents = getEventsFromChannel(ch)
@ -509,8 +513,8 @@ func TestRelistWithReinspection(t *testing.T) {
// listing 3 - pretend the transient container has now disappeared, leaving just the infra // listing 3 - pretend the transient container has now disappeared, leaving just the infra
// container. Make sure the pod is reinspected for its status and the cache is updated. // container. Make sure the pod is reinspected for its status and the cache is updated.
runtimeMock.EXPECT().GetPods(true).Return(pods, nil).Times(1) runtimeMock.EXPECT().GetPods(ctx, true).Return(pods, nil).Times(1)
runtimeMock.EXPECT().GetPodStatus(podID, "", "").Return(goodStatus, nil).Times(1) runtimeMock.EXPECT().GetPodStatus(ctx, podID, "", "").Return(goodStatus, nil).Times(1)
pleg.relist() pleg.relist()
actualEvents = getEventsFromChannel(ch) actualEvents = getEventsFromChannel(ch)
@ -591,6 +595,7 @@ func TestRelistingWithSandboxes(t *testing.T) {
} }
func TestRelistIPChange(t *testing.T) { func TestRelistIPChange(t *testing.T) {
ctx := context.Background()
testCases := []struct { testCases := []struct {
name string name string
podID string podID string
@ -631,8 +636,8 @@ func TestRelistIPChange(t *testing.T) {
} }
event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID} event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID}
runtimeMock.EXPECT().GetPods(true).Return([]*kubecontainer.Pod{pod}, nil).Times(1) runtimeMock.EXPECT().GetPods(ctx, true).Return([]*kubecontainer.Pod{pod}, nil).Times(1)
runtimeMock.EXPECT().GetPodStatus(pod.ID, "", "").Return(status, nil).Times(1) runtimeMock.EXPECT().GetPodStatus(ctx, pod.ID, "", "").Return(status, nil).Times(1)
pleg.relist() pleg.relist()
actualEvents := getEventsFromChannel(ch) actualEvents := getEventsFromChannel(ch)
@ -652,8 +657,8 @@ func TestRelistIPChange(t *testing.T) {
ContainerStatuses: []*kubecontainer.Status{{ID: container.ID, State: kubecontainer.ContainerStateExited}}, ContainerStatuses: []*kubecontainer.Status{{ID: container.ID, State: kubecontainer.ContainerStateExited}},
} }
event = &PodLifecycleEvent{ID: pod.ID, Type: ContainerDied, Data: container.ID.ID} event = &PodLifecycleEvent{ID: pod.ID, Type: ContainerDied, Data: container.ID.ID}
runtimeMock.EXPECT().GetPods(true).Return([]*kubecontainer.Pod{pod}, nil).Times(1) runtimeMock.EXPECT().GetPods(ctx, true).Return([]*kubecontainer.Pod{pod}, nil).Times(1)
runtimeMock.EXPECT().GetPodStatus(pod.ID, "", "").Return(status, nil).Times(1) runtimeMock.EXPECT().GetPodStatus(ctx, pod.ID, "", "").Return(status, nil).Times(1)
pleg.relist() pleg.relist()
actualEvents = getEventsFromChannel(ch) actualEvents = getEventsFromChannel(ch)

View File

@ -17,6 +17,7 @@ limitations under the License.
package kubelet package kubelet
import ( import (
"context"
"sort" "sort"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
@ -48,7 +49,7 @@ func newPodContainerDeletor(runtime kubecontainer.Runtime, containersToKeep int)
go wait.Until(func() { go wait.Until(func() {
for { for {
id := <-buffer id := <-buffer
if err := runtime.DeleteContainer(id); err != nil { if err := runtime.DeleteContainer(context.Background(), id); err != nil {
klog.InfoS("DeleteContainer returned error", "containerID", id, "err", err) klog.InfoS("DeleteContainer returned error", "containerID", id, "err", err)
} }
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package prober package prober
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"time" "time"
@ -80,7 +81,7 @@ func (pb *prober) recordContainerEvent(pod *v1.Pod, container *v1.Container, eve
} }
// probe probes the container. // probe probes the container.
func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (results.Result, error) { func (pb *prober) probe(ctx context.Context, probeType probeType, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (results.Result, error) {
var probeSpec *v1.Probe var probeSpec *v1.Probe
switch probeType { switch probeType {
case readiness: case readiness:
@ -98,7 +99,7 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c
return results.Success, nil return results.Success, nil
} }
result, output, err := pb.runProbeWithRetries(probeType, probeSpec, pod, status, container, containerID, maxProbeRetries) result, output, err := pb.runProbeWithRetries(ctx, probeType, probeSpec, pod, status, container, containerID, maxProbeRetries)
if err != nil || (result != probe.Success && result != probe.Warning) { if err != nil || (result != probe.Success && result != probe.Warning) {
// Probe failed in one way or another. // Probe failed in one way or another.
if err != nil { if err != nil {
@ -121,12 +122,12 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c
// runProbeWithRetries tries to probe the container in a finite loop, it returns the last result // runProbeWithRetries tries to probe the container in a finite loop, it returns the last result
// if it never succeeds. // if it never succeeds.
func (pb *prober) runProbeWithRetries(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) { func (pb *prober) runProbeWithRetries(ctx context.Context, probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) {
var err error var err error
var result probe.Result var result probe.Result
var output string var output string
for i := 0; i < retries; i++ { for i := 0; i < retries; i++ {
result, output, err = pb.runProbe(probeType, p, pod, status, container, containerID) result, output, err = pb.runProbe(ctx, probeType, p, pod, status, container, containerID)
if err == nil { if err == nil {
return result, output, nil return result, output, nil
} }
@ -134,12 +135,12 @@ func (pb *prober) runProbeWithRetries(probeType probeType, p *v1.Probe, pod *v1.
return result, output, err return result, output, err
} }
func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) { func (pb *prober) runProbe(ctx context.Context, probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) {
timeout := time.Duration(p.TimeoutSeconds) * time.Second timeout := time.Duration(p.TimeoutSeconds) * time.Second
if p.Exec != nil { if p.Exec != nil {
klog.V(4).InfoS("Exec-Probe runProbe", "pod", klog.KObj(pod), "containerName", container.Name, "execCommand", p.Exec.Command) klog.V(4).InfoS("Exec-Probe runProbe", "pod", klog.KObj(pod), "containerName", container.Name, "execCommand", p.Exec.Command)
command := kubecontainer.ExpandContainerCommandOnlyStatic(p.Exec.Command, container.Env) command := kubecontainer.ExpandContainerCommandOnlyStatic(p.Exec.Command, container.Env)
return pb.exec.Probe(pb.newExecInContainer(container, containerID, command, timeout)) return pb.exec.Probe(pb.newExecInContainer(ctx, container, containerID, command, timeout))
} }
if p.HTTPGet != nil { if p.HTTPGet != nil {
req, err := httpprobe.NewRequestForHTTPGetAction(p.HTTPGet, &container, status.PodIP, "probe") req, err := httpprobe.NewRequestForHTTPGetAction(p.HTTPGet, &container, status.PodIP, "probe")
@ -187,9 +188,9 @@ type execInContainer struct {
writer io.Writer writer io.Writer
} }
func (pb *prober) newExecInContainer(container v1.Container, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) exec.Cmd { func (pb *prober) newExecInContainer(ctx context.Context, container v1.Container, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) exec.Cmd {
return &execInContainer{run: func() ([]byte, error) { return &execInContainer{run: func() ([]byte, error) {
return pb.runner.RunInContainer(containerID, cmd, timeout) return pb.runner.RunInContainer(ctx, containerID, cmd, timeout)
}} }}
} }

View File

@ -18,6 +18,7 @@ package prober
import ( import (
"bytes" "bytes"
"context"
"errors" "errors"
"fmt" "fmt"
"reflect" "reflect"
@ -132,6 +133,7 @@ func TestGetTCPAddrParts(t *testing.T) {
} }
func TestProbe(t *testing.T) { func TestProbe(t *testing.T) {
ctx := context.Background()
containerID := kubecontainer.ContainerID{Type: "test", ID: "foobar"} containerID := kubecontainer.ContainerID{Type: "test", ID: "foobar"}
execProbe := &v1.Probe{ execProbe := &v1.Probe{
@ -234,7 +236,7 @@ func TestProbe(t *testing.T) {
prober.exec = fakeExecProber{test.execResult, nil} prober.exec = fakeExecProber{test.execResult, nil}
} }
result, err := prober.probe(probeType, &v1.Pod{}, v1.PodStatus{}, testContainer, containerID) result, err := prober.probe(ctx, probeType, &v1.Pod{}, v1.PodStatus{}, testContainer, containerID)
if test.expectError && err == nil { if test.expectError && err == nil {
t.Errorf("[%s] Expected probe error but no error was returned.", testID) t.Errorf("[%s] Expected probe error but no error was returned.", testID)
} }
@ -248,7 +250,7 @@ func TestProbe(t *testing.T) {
if len(test.expectCommand) > 0 { if len(test.expectCommand) > 0 {
prober.exec = execprobe.New() prober.exec = execprobe.New()
prober.runner = &containertest.FakeContainerCommandRunner{} prober.runner = &containertest.FakeContainerCommandRunner{}
_, err := prober.probe(probeType, &v1.Pod{}, v1.PodStatus{}, testContainer, containerID) _, err := prober.probe(ctx, probeType, &v1.Pod{}, v1.PodStatus{}, testContainer, containerID)
if err != nil { if err != nil {
t.Errorf("[%s] Didn't expect probe error but got: %v", testID, err) t.Errorf("[%s] Didn't expect probe error but got: %v", testID, err)
continue continue
@ -262,6 +264,7 @@ func TestProbe(t *testing.T) {
} }
func TestNewExecInContainer(t *testing.T) { func TestNewExecInContainer(t *testing.T) {
ctx := context.Background()
limit := 1024 limit := 1024
tenKilobyte := strings.Repeat("logs-123", 128*10) tenKilobyte := strings.Repeat("logs-123", 128*10)
@ -303,7 +306,7 @@ func TestNewExecInContainer(t *testing.T) {
container := v1.Container{} container := v1.Container{}
containerID := kubecontainer.ContainerID{Type: "docker", ID: "containerID"} containerID := kubecontainer.ContainerID{Type: "docker", ID: "containerID"}
cmd := []string{"/foo", "bar"} cmd := []string{"/foo", "bar"}
exec := prober.newExecInContainer(container, containerID, cmd, 0) exec := prober.newExecInContainer(ctx, container, containerID, cmd, 0)
var dataBuffer bytes.Buffer var dataBuffer bytes.Buffer
writer := ioutils.LimitWriter(&dataBuffer, int64(limit)) writer := ioutils.LimitWriter(&dataBuffer, int64(limit))

View File

@ -17,6 +17,7 @@ limitations under the License.
package prober package prober
import ( import (
"context"
"fmt" "fmt"
"math/rand" "math/rand"
"strings" "strings"
@ -148,6 +149,7 @@ func newWorker(
// run periodically probes the container. // run periodically probes the container.
func (w *worker) run() { func (w *worker) run() {
ctx := context.Background()
probeTickerPeriod := time.Duration(w.spec.PeriodSeconds) * time.Second probeTickerPeriod := time.Duration(w.spec.PeriodSeconds) * time.Second
// If kubelet restarted the probes could be started in rapid succession. // If kubelet restarted the probes could be started in rapid succession.
@ -175,7 +177,7 @@ func (w *worker) run() {
}() }()
probeLoop: probeLoop:
for w.doProbe() { for w.doProbe(ctx) {
// Wait for next probe tick. // Wait for next probe tick.
select { select {
case <-w.stopCh: case <-w.stopCh:
@ -198,7 +200,7 @@ func (w *worker) stop() {
// doProbe probes the container once and records the result. // doProbe probes the container once and records the result.
// Returns whether the worker should continue. // Returns whether the worker should continue.
func (w *worker) doProbe() (keepGoing bool) { func (w *worker) doProbe(ctx context.Context) (keepGoing bool) {
defer func() { recover() }() // Actually eat panics (HandleCrash takes care of logging) defer func() { recover() }() // Actually eat panics (HandleCrash takes care of logging)
defer runtime.HandleCrash(func(_ interface{}) { keepGoing = true }) defer runtime.HandleCrash(func(_ interface{}) { keepGoing = true })
@ -284,7 +286,7 @@ func (w *worker) doProbe() (keepGoing bool) {
} }
// Note, exec probe does NOT have access to pod environment variables or downward API // Note, exec probe does NOT have access to pod environment variables or downward API
result, err := w.probeManager.prober.probe(w.probeType, w.pod, status, w.container, w.containerID) result, err := w.probeManager.prober.probe(ctx, w.probeType, w.pod, status, w.container, w.containerID)
if err != nil { if err != nil {
// Prober error, throw away the result. // Prober error, throw away the result.
return true return true

View File

@ -17,6 +17,7 @@ limitations under the License.
package prober package prober
import ( import (
"context"
"fmt" "fmt"
"testing" "testing"
"time" "time"
@ -130,6 +131,7 @@ func TestDoProbe(t *testing.T) {
} }
for i, test := range tests { for i, test := range tests {
ctx := context.Background()
w := newTestWorker(m, probeType, test.probe) w := newTestWorker(m, probeType, test.probe)
if test.podStatus != nil { if test.podStatus != nil {
m.statusManager.SetPodStatus(w.pod, *test.podStatus) m.statusManager.SetPodStatus(w.pod, *test.podStatus)
@ -138,7 +140,7 @@ func TestDoProbe(t *testing.T) {
now := metav1.Now() now := metav1.Now()
w.pod.ObjectMeta.DeletionTimestamp = &now w.pod.ObjectMeta.DeletionTimestamp = &now
} }
if c := w.doProbe(); c != test.expectContinue[probeType.String()] { if c := w.doProbe(ctx); c != test.expectContinue[probeType.String()] {
t.Errorf("[%s-%d] Expected continue to be %v but got %v", probeType, i, test.expectContinue[probeType.String()], c) t.Errorf("[%s-%d] Expected continue to be %v but got %v", probeType, i, test.expectContinue[probeType.String()], c)
} }
result, ok := resultsManager(m, probeType).Get(testContainerID) result, ok := resultsManager(m, probeType).Get(testContainerID)
@ -157,6 +159,7 @@ func TestDoProbe(t *testing.T) {
} }
func TestInitialDelay(t *testing.T) { func TestInitialDelay(t *testing.T) {
ctx := context.Background()
m := newTestManager() m := newTestManager()
for _, probeType := range [...]probeType{liveness, readiness, startup} { for _, probeType := range [...]probeType{liveness, readiness, startup} {
@ -165,7 +168,7 @@ func TestInitialDelay(t *testing.T) {
}) })
m.statusManager.SetPodStatus(w.pod, getTestRunningStatusWithStarted(probeType != startup)) m.statusManager.SetPodStatus(w.pod, getTestRunningStatusWithStarted(probeType != startup))
expectContinue(t, w, w.doProbe(), "during initial delay") expectContinue(t, w, w.doProbe(ctx), "during initial delay")
// Default value depends on probe, Success for liveness, Failure for readiness, Unknown for startup // Default value depends on probe, Success for liveness, Failure for readiness, Unknown for startup
switch probeType { switch probeType {
case liveness: case liveness:
@ -183,12 +186,13 @@ func TestInitialDelay(t *testing.T) {
m.statusManager.SetPodStatus(w.pod, laterStatus) m.statusManager.SetPodStatus(w.pod, laterStatus)
// Second call should succeed (already waited). // Second call should succeed (already waited).
expectContinue(t, w, w.doProbe(), "after initial delay") expectContinue(t, w, w.doProbe(ctx), "after initial delay")
expectResult(t, w, results.Success, "after initial delay") expectResult(t, w, results.Success, "after initial delay")
} }
} }
func TestFailureThreshold(t *testing.T) { func TestFailureThreshold(t *testing.T) {
ctx := context.Background()
m := newTestManager() m := newTestManager()
w := newTestWorker(m, readiness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3}) w := newTestWorker(m, readiness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3})
m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) m.statusManager.SetPodStatus(w.pod, getTestRunningStatus())
@ -199,7 +203,7 @@ func TestFailureThreshold(t *testing.T) {
for j := 0; j < 3; j++ { for j := 0; j < 3; j++ {
msg := fmt.Sprintf("%d success (%d)", j+1, i) msg := fmt.Sprintf("%d success (%d)", j+1, i)
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Success, msg) expectResult(t, w, results.Success, msg)
} }
@ -209,20 +213,21 @@ func TestFailureThreshold(t *testing.T) {
// Next 2 probes should still be "success". // Next 2 probes should still be "success".
for j := 0; j < 2; j++ { for j := 0; j < 2; j++ {
msg := fmt.Sprintf("%d failing (%d)", j+1, i) msg := fmt.Sprintf("%d failing (%d)", j+1, i)
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Success, msg) expectResult(t, w, results.Success, msg)
} }
// Third & following fail. // Third & following fail.
for j := 0; j < 3; j++ { for j := 0; j < 3; j++ {
msg := fmt.Sprintf("%d failure (%d)", j+3, i) msg := fmt.Sprintf("%d failure (%d)", j+3, i)
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Failure, msg) expectResult(t, w, results.Failure, msg)
} }
} }
} }
func TestSuccessThreshold(t *testing.T) { func TestSuccessThreshold(t *testing.T) {
ctx := context.Background()
m := newTestManager() m := newTestManager()
w := newTestWorker(m, readiness, v1.Probe{SuccessThreshold: 3, FailureThreshold: 1}) w := newTestWorker(m, readiness, v1.Probe{SuccessThreshold: 3, FailureThreshold: 1})
m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) m.statusManager.SetPodStatus(w.pod, getTestRunningStatus())
@ -234,21 +239,21 @@ func TestSuccessThreshold(t *testing.T) {
// Probe defaults to Failure. // Probe defaults to Failure.
for j := 0; j < 2; j++ { for j := 0; j < 2; j++ {
msg := fmt.Sprintf("%d success (%d)", j+1, i) msg := fmt.Sprintf("%d success (%d)", j+1, i)
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Failure, msg) expectResult(t, w, results.Failure, msg)
} }
// Continuing success! // Continuing success!
for j := 0; j < 3; j++ { for j := 0; j < 3; j++ {
msg := fmt.Sprintf("%d success (%d)", j+3, i) msg := fmt.Sprintf("%d success (%d)", j+3, i)
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Success, msg) expectResult(t, w, results.Success, msg)
} }
// Prober flakes :( // Prober flakes :(
m.prober.exec = fakeExecProber{probe.Failure, nil} m.prober.exec = fakeExecProber{probe.Failure, nil}
msg := fmt.Sprintf("1 failure (%d)", i) msg := fmt.Sprintf("1 failure (%d)", i)
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Failure, msg) expectResult(t, w, results.Failure, msg)
// Back to success. // Back to success.
@ -322,6 +327,7 @@ func resultsManager(m *manager, probeType probeType) results.Manager {
} }
func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) { func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) {
ctx := context.Background()
m := newTestManager() m := newTestManager()
for _, probeType := range [...]probeType{liveness, startup} { for _, probeType := range [...]probeType{liveness, startup} {
@ -332,7 +338,7 @@ func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) {
// First probe should fail. // First probe should fail.
m.prober.exec = fakeExecProber{probe.Failure, nil} m.prober.exec = fakeExecProber{probe.Failure, nil}
msg := "first probe" msg := "first probe"
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Failure, msg) expectResult(t, w, results.Failure, msg)
if !w.onHold { if !w.onHold {
t.Errorf("Prober should be on hold due to %s check failure", probeType) t.Errorf("Prober should be on hold due to %s check failure", probeType)
@ -341,7 +347,7 @@ func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) {
// failure because the worker is on hold and won't probe. // failure because the worker is on hold and won't probe.
m.prober.exec = fakeExecProber{probe.Success, nil} m.prober.exec = fakeExecProber{probe.Success, nil}
msg = "while on hold" msg = "while on hold"
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Failure, msg) expectResult(t, w, results.Failure, msg)
if !w.onHold { if !w.onHold {
t.Errorf("Prober should be on hold due to %s check failure", probeType) t.Errorf("Prober should be on hold due to %s check failure", probeType)
@ -351,7 +357,7 @@ func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) {
status.ContainerStatuses[0].ContainerID = "test://newCont_ID" status.ContainerStatuses[0].ContainerID = "test://newCont_ID"
m.statusManager.SetPodStatus(w.pod, status) m.statusManager.SetPodStatus(w.pod, status)
msg = "hold lifted" msg = "hold lifted"
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Success, msg) expectResult(t, w, results.Success, msg)
if w.onHold { if w.onHold {
t.Errorf("Prober should not be on hold anymore") t.Errorf("Prober should not be on hold anymore")
@ -360,13 +366,14 @@ func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) {
} }
func TestResultRunOnLivenessCheckFailure(t *testing.T) { func TestResultRunOnLivenessCheckFailure(t *testing.T) {
ctx := context.Background()
m := newTestManager() m := newTestManager()
w := newTestWorker(m, liveness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3}) w := newTestWorker(m, liveness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3})
m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) m.statusManager.SetPodStatus(w.pod, getTestRunningStatus())
m.prober.exec = fakeExecProber{probe.Success, nil} m.prober.exec = fakeExecProber{probe.Success, nil}
msg := "initial probe success" msg := "initial probe success"
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Success, msg) expectResult(t, w, results.Success, msg)
if w.resultRun != 1 { if w.resultRun != 1 {
t.Errorf("Prober resultRun should be 1") t.Errorf("Prober resultRun should be 1")
@ -374,7 +381,7 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) {
m.prober.exec = fakeExecProber{probe.Failure, nil} m.prober.exec = fakeExecProber{probe.Failure, nil}
msg = "probe failure, result success" msg = "probe failure, result success"
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Success, msg) expectResult(t, w, results.Success, msg)
if w.resultRun != 1 { if w.resultRun != 1 {
t.Errorf("Prober resultRun should be 1") t.Errorf("Prober resultRun should be 1")
@ -382,7 +389,7 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) {
m.prober.exec = fakeExecProber{probe.Failure, nil} m.prober.exec = fakeExecProber{probe.Failure, nil}
msg = "2nd probe failure, result success" msg = "2nd probe failure, result success"
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Success, msg) expectResult(t, w, results.Success, msg)
if w.resultRun != 2 { if w.resultRun != 2 {
t.Errorf("Prober resultRun should be 2") t.Errorf("Prober resultRun should be 2")
@ -393,7 +400,7 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) {
// also gets FailureThreshold attempts to succeed. // also gets FailureThreshold attempts to succeed.
m.prober.exec = fakeExecProber{probe.Failure, nil} m.prober.exec = fakeExecProber{probe.Failure, nil}
msg = "3rd probe failure, result failure" msg = "3rd probe failure, result failure"
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Failure, msg) expectResult(t, w, results.Failure, msg)
if w.resultRun != 0 { if w.resultRun != 0 {
t.Errorf("Prober resultRun should be reset to 0") t.Errorf("Prober resultRun should be reset to 0")
@ -401,6 +408,7 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) {
} }
func TestResultRunOnStartupCheckFailure(t *testing.T) { func TestResultRunOnStartupCheckFailure(t *testing.T) {
ctx := context.Background()
m := newTestManager() m := newTestManager()
w := newTestWorker(m, startup, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3}) w := newTestWorker(m, startup, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3})
m.statusManager.SetPodStatus(w.pod, getTestRunningStatusWithStarted(false)) m.statusManager.SetPodStatus(w.pod, getTestRunningStatusWithStarted(false))
@ -409,7 +417,7 @@ func TestResultRunOnStartupCheckFailure(t *testing.T) {
// which is failed for startup at first. // which is failed for startup at first.
m.prober.exec = fakeExecProber{probe.Failure, nil} m.prober.exec = fakeExecProber{probe.Failure, nil}
msg := "probe failure, result unknown" msg := "probe failure, result unknown"
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Unknown, msg) expectResult(t, w, results.Unknown, msg)
if w.resultRun != 1 { if w.resultRun != 1 {
t.Errorf("Prober resultRun should be 1") t.Errorf("Prober resultRun should be 1")
@ -417,7 +425,7 @@ func TestResultRunOnStartupCheckFailure(t *testing.T) {
m.prober.exec = fakeExecProber{probe.Failure, nil} m.prober.exec = fakeExecProber{probe.Failure, nil}
msg = "2nd probe failure, result unknown" msg = "2nd probe failure, result unknown"
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Unknown, msg) expectResult(t, w, results.Unknown, msg)
if w.resultRun != 2 { if w.resultRun != 2 {
t.Errorf("Prober resultRun should be 2") t.Errorf("Prober resultRun should be 2")
@ -428,7 +436,7 @@ func TestResultRunOnStartupCheckFailure(t *testing.T) {
// also gets FailureThreshold attempts to succeed. // also gets FailureThreshold attempts to succeed.
m.prober.exec = fakeExecProber{probe.Failure, nil} m.prober.exec = fakeExecProber{probe.Failure, nil}
msg = "3rd probe failure, result failure" msg = "3rd probe failure, result failure"
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Failure, msg) expectResult(t, w, results.Failure, msg)
if w.resultRun != 0 { if w.resultRun != 0 {
t.Errorf("Prober resultRun should be reset to 0") t.Errorf("Prober resultRun should be reset to 0")
@ -436,43 +444,45 @@ func TestResultRunOnStartupCheckFailure(t *testing.T) {
} }
func TestLivenessProbeDisabledByStarted(t *testing.T) { func TestLivenessProbeDisabledByStarted(t *testing.T) {
ctx := context.Background()
m := newTestManager() m := newTestManager()
w := newTestWorker(m, liveness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 1}) w := newTestWorker(m, liveness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 1})
m.statusManager.SetPodStatus(w.pod, getTestRunningStatusWithStarted(false)) m.statusManager.SetPodStatus(w.pod, getTestRunningStatusWithStarted(false))
// livenessProbe fails, but is disabled // livenessProbe fails, but is disabled
m.prober.exec = fakeExecProber{probe.Failure, nil} m.prober.exec = fakeExecProber{probe.Failure, nil}
msg := "Not started, probe failure, result success" msg := "Not started, probe failure, result success"
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Success, msg) expectResult(t, w, results.Success, msg)
// setting started state // setting started state
m.statusManager.SetContainerStartup(w.pod.UID, w.containerID, true) m.statusManager.SetContainerStartup(w.pod.UID, w.containerID, true)
// livenessProbe fails // livenessProbe fails
m.prober.exec = fakeExecProber{probe.Failure, nil} m.prober.exec = fakeExecProber{probe.Failure, nil}
msg = "Started, probe failure, result failure" msg = "Started, probe failure, result failure"
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Failure, msg) expectResult(t, w, results.Failure, msg)
} }
func TestStartupProbeDisabledByStarted(t *testing.T) { func TestStartupProbeDisabledByStarted(t *testing.T) {
ctx := context.Background()
m := newTestManager() m := newTestManager()
w := newTestWorker(m, startup, v1.Probe{SuccessThreshold: 1, FailureThreshold: 2}) w := newTestWorker(m, startup, v1.Probe{SuccessThreshold: 1, FailureThreshold: 2})
m.statusManager.SetPodStatus(w.pod, getTestRunningStatusWithStarted(false)) m.statusManager.SetPodStatus(w.pod, getTestRunningStatusWithStarted(false))
// startupProbe fails < FailureThreshold, stays unknown // startupProbe fails < FailureThreshold, stays unknown
m.prober.exec = fakeExecProber{probe.Failure, nil} m.prober.exec = fakeExecProber{probe.Failure, nil}
msg := "Not started, probe failure, result unknown" msg := "Not started, probe failure, result unknown"
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Unknown, msg) expectResult(t, w, results.Unknown, msg)
// startupProbe succeeds // startupProbe succeeds
m.prober.exec = fakeExecProber{probe.Success, nil} m.prober.exec = fakeExecProber{probe.Success, nil}
msg = "Started, probe success, result success" msg = "Started, probe success, result success"
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Success, msg) expectResult(t, w, results.Success, msg)
// setting started state // setting started state
m.statusManager.SetContainerStartup(w.pod.UID, w.containerID, true) m.statusManager.SetContainerStartup(w.pod.UID, w.containerID, true)
// startupProbe fails, but is disabled // startupProbe fails, but is disabled
m.prober.exec = fakeExecProber{probe.Failure, nil} m.prober.exec = fakeExecProber{probe.Failure, nil}
msg = "Started, probe failure, result success" msg = "Started, probe failure, result success"
expectContinue(t, w, w.doProbe(), msg) expectContinue(t, w, w.doProbe(ctx), msg)
expectResult(t, w, results.Success, msg) expectResult(t, w, results.Success, msg)
} }

View File

@ -44,6 +44,7 @@ type RunPodResult struct {
// RunOnce polls from one configuration update and run the associated pods. // RunOnce polls from one configuration update and run the associated pods.
func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult, error) { func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult, error) {
ctx := context.Background()
// Setup filesystem directories. // Setup filesystem directories.
if err := kl.setupDataDirs(); err != nil { if err := kl.setupDataDirs(); err != nil {
return nil, err return nil, err
@ -59,7 +60,7 @@ func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult,
select { select {
case u := <-updates: case u := <-updates:
klog.InfoS("Processing manifest with pods", "numPods", len(u.Pods)) klog.InfoS("Processing manifest with pods", "numPods", len(u.Pods))
result, err := kl.runOnce(u.Pods, runOnceRetryDelay) result, err := kl.runOnce(ctx, u.Pods, runOnceRetryDelay)
klog.InfoS("Finished processing pods", "numPods", len(u.Pods)) klog.InfoS("Finished processing pods", "numPods", len(u.Pods))
return result, err return result, err
case <-time.After(runOnceManifestDelay): case <-time.After(runOnceManifestDelay):
@ -68,7 +69,7 @@ func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult,
} }
// runOnce runs a given set of pods and returns their status. // runOnce runs a given set of pods and returns their status.
func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results []RunPodResult, err error) { func (kl *Kubelet) runOnce(ctx context.Context, pods []*v1.Pod, retryDelay time.Duration) (results []RunPodResult, err error) {
ch := make(chan RunPodResult) ch := make(chan RunPodResult)
admitted := []*v1.Pod{} admitted := []*v1.Pod{}
for _, pod := range pods { for _, pod := range pods {
@ -81,7 +82,7 @@ func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results []
admitted = append(admitted, pod) admitted = append(admitted, pod)
go func(pod *v1.Pod) { go func(pod *v1.Pod) {
err := kl.runPod(pod, retryDelay) err := kl.runPod(ctx, pod, retryDelay)
ch <- RunPodResult{pod, err} ch <- RunPodResult{pod, err}
}(pod) }(pod)
} }
@ -92,7 +93,7 @@ func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results []
res := <-ch res := <-ch
results = append(results, res) results = append(results, res)
if res.Err != nil { if res.Err != nil {
failedContainerName, err := kl.getFailedContainers(res.Pod) failedContainerName, err := kl.getFailedContainers(ctx, res.Pod)
if err != nil { if err != nil {
klog.InfoS("Unable to get failed containers' names for pod", "pod", klog.KObj(res.Pod), "err", err) klog.InfoS("Unable to get failed containers' names for pod", "pod", klog.KObj(res.Pod), "err", err)
} else { } else {
@ -111,12 +112,12 @@ func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results []
} }
// runPod runs a single pod and wait until all containers are running. // runPod runs a single pod and wait until all containers are running.
func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error { func (kl *Kubelet) runPod(ctx context.Context, pod *v1.Pod, retryDelay time.Duration) error {
var isTerminal bool var isTerminal bool
delay := retryDelay delay := retryDelay
retry := 0 retry := 0
for !isTerminal { for !isTerminal {
status, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace) status, err := kl.containerRuntime.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
if err != nil { if err != nil {
return fmt.Errorf("unable to get status for pod %q: %v", format.Pod(pod), err) return fmt.Errorf("unable to get status for pod %q: %v", format.Pod(pod), err)
} }
@ -132,7 +133,7 @@ func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error {
klog.ErrorS(err, "Failed creating a mirror pod", "pod", klog.KObj(pod)) klog.ErrorS(err, "Failed creating a mirror pod", "pod", klog.KObj(pod))
} }
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod) mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
if isTerminal, err = kl.syncPod(context.Background(), kubetypes.SyncPodUpdate, pod, mirrorPod, status); err != nil { if isTerminal, err = kl.syncPod(ctx, kubetypes.SyncPodUpdate, pod, mirrorPod, status); err != nil {
return fmt.Errorf("error syncing pod %q: %v", format.Pod(pod), err) return fmt.Errorf("error syncing pod %q: %v", format.Pod(pod), err)
} }
if retry >= runOnceMaxRetries { if retry >= runOnceMaxRetries {
@ -160,8 +161,8 @@ func (kl *Kubelet) isPodRunning(pod *v1.Pod, status *kubecontainer.PodStatus) bo
} }
// getFailedContainer returns failed container name for pod. // getFailedContainer returns failed container name for pod.
func (kl *Kubelet) getFailedContainers(pod *v1.Pod) ([]string, error) { func (kl *Kubelet) getFailedContainers(ctx context.Context, pod *v1.Pod) ([]string, error) {
status, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace) status, err := kl.containerRuntime.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to get status for pod %q: %v", format.Pod(pod), err) return nil, fmt.Errorf("unable to get status for pod %q: %v", format.Pod(pod), err)
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package kubelet package kubelet
import ( import (
"context"
"os" "os"
"testing" "testing"
"time" "time"
@ -52,6 +53,7 @@ import (
) )
func TestRunOnce(t *testing.T) { func TestRunOnce(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() defer mockCtrl.Finish()
@ -168,7 +170,7 @@ func TestRunOnce(t *testing.T) {
}, },
}, },
} }
results, err := kb.runOnce(pods, time.Millisecond) results, err := kb.runOnce(ctx, pods, time.Millisecond)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }

View File

@ -239,17 +239,17 @@ type HostInterface interface {
stats.Provider stats.Provider
GetVersionInfo() (*cadvisorapi.VersionInfo, error) GetVersionInfo() (*cadvisorapi.VersionInfo, error)
GetCachedMachineInfo() (*cadvisorapi.MachineInfo, error) GetCachedMachineInfo() (*cadvisorapi.MachineInfo, error)
GetRunningPods() ([]*v1.Pod, error) GetRunningPods(ctx context.Context) ([]*v1.Pod, error)
RunInContainer(name string, uid types.UID, container string, cmd []string) ([]byte, error) RunInContainer(ctx context.Context, name string, uid types.UID, container string, cmd []string) ([]byte, error)
CheckpointContainer(podUID types.UID, podFullName, containerName string, options *runtimeapi.CheckpointContainerRequest) error CheckpointContainer(ctx context.Context, podUID types.UID, podFullName, containerName string, options *runtimeapi.CheckpointContainerRequest) error
GetKubeletContainerLogs(ctx context.Context, podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error GetKubeletContainerLogs(ctx context.Context, podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error
ServeLogs(w http.ResponseWriter, req *http.Request) ServeLogs(w http.ResponseWriter, req *http.Request)
ResyncInterval() time.Duration ResyncInterval() time.Duration
GetHostname() string GetHostname() string
LatestLoopEntryTime() time.Time LatestLoopEntryTime() time.Time
GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) GetExec(ctx context.Context, podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error)
GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) GetAttach(ctx context.Context, podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error)
GetPortForward(podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error)
} }
// NewServer initializes and configures a kubelet.Server object to handle HTTP requests. // NewServer initializes and configures a kubelet.Server object to handle HTTP requests.
@ -740,7 +740,8 @@ func (s *Server) getPods(request *restful.Request, response *restful.Response) {
// provided by the container runtime, and is different from the list returned // provided by the container runtime, and is different from the list returned
// by getPods, which is a set of desired pods to run. // by getPods, which is a set of desired pods to run.
func (s *Server) getRunningPods(request *restful.Request, response *restful.Response) { func (s *Server) getRunningPods(request *restful.Request, response *restful.Response) {
pods, err := s.host.GetRunningPods() ctx := request.Request.Context()
pods, err := s.host.GetRunningPods(ctx)
if err != nil { if err != nil {
response.WriteError(http.StatusInternalServerError, err) response.WriteError(http.StatusInternalServerError, err)
return return
@ -820,7 +821,7 @@ func (s *Server) getAttach(request *restful.Request, response *restful.Response)
} }
podFullName := kubecontainer.GetPodFullName(pod) podFullName := kubecontainer.GetPodFullName(pod)
url, err := s.host.GetAttach(podFullName, params.podUID, params.containerName, *streamOpts) url, err := s.host.GetAttach(request.Request.Context(), podFullName, params.podUID, params.containerName, *streamOpts)
if err != nil { if err != nil {
streaming.WriteError(err, response.ResponseWriter) streaming.WriteError(err, response.ResponseWriter)
return return
@ -845,7 +846,7 @@ func (s *Server) getExec(request *restful.Request, response *restful.Response) {
} }
podFullName := kubecontainer.GetPodFullName(pod) podFullName := kubecontainer.GetPodFullName(pod)
url, err := s.host.GetExec(podFullName, params.podUID, params.containerName, params.cmd, *streamOpts) url, err := s.host.GetExec(request.Request.Context(), podFullName, params.podUID, params.containerName, params.cmd, *streamOpts)
if err != nil { if err != nil {
streaming.WriteError(err, response.ResponseWriter) streaming.WriteError(err, response.ResponseWriter)
return return
@ -864,7 +865,7 @@ func (s *Server) getRun(request *restful.Request, response *restful.Response) {
// For legacy reasons, run uses different query param than exec. // For legacy reasons, run uses different query param than exec.
params.cmd = strings.Split(request.QueryParameter("cmd"), " ") params.cmd = strings.Split(request.QueryParameter("cmd"), " ")
data, err := s.host.RunInContainer(kubecontainer.GetPodFullName(pod), params.podUID, params.containerName, params.cmd) data, err := s.host.RunInContainer(request.Request.Context(), kubecontainer.GetPodFullName(pod), params.podUID, params.containerName, params.cmd)
if err != nil { if err != nil {
response.WriteError(http.StatusInternalServerError, err) response.WriteError(http.StatusInternalServerError, err)
return return
@ -907,7 +908,7 @@ func (s *Server) getPortForward(request *restful.Request, response *restful.Resp
return return
} }
url, err := s.host.GetPortForward(pod.Name, pod.Namespace, pod.UID, *portForwardOptions) url, err := s.host.GetPortForward(request.Request.Context(), pod.Name, pod.Namespace, pod.UID, *portForwardOptions)
if err != nil { if err != nil {
streaming.WriteError(err, response.ResponseWriter) streaming.WriteError(err, response.ResponseWriter)
return return
@ -919,6 +920,7 @@ func (s *Server) getPortForward(request *restful.Request, response *restful.Resp
// podNamespace, pod and container actually exist and only then calls out // podNamespace, pod and container actually exist and only then calls out
// to the runtime to actually checkpoint the container. // to the runtime to actually checkpoint the container.
func (s *Server) checkpoint(request *restful.Request, response *restful.Response) { func (s *Server) checkpoint(request *restful.Request, response *restful.Response) {
ctx := request.Request.Context()
pod, ok := s.host.GetPodByName(request.PathParameter("podNamespace"), request.PathParameter("podID")) pod, ok := s.host.GetPodByName(request.PathParameter("podNamespace"), request.PathParameter("podID"))
if !ok { if !ok {
response.WriteError(http.StatusNotFound, fmt.Errorf("pod does not exist")) response.WriteError(http.StatusNotFound, fmt.Errorf("pod does not exist"))
@ -973,7 +975,7 @@ func (s *Server) checkpoint(request *restful.Request, response *restful.Response
options.Timeout = timeout options.Timeout = timeout
} }
if err := s.host.CheckpointContainer(pod.UID, kubecontainer.GetPodFullName(pod), containerName, options); err != nil { if err := s.host.CheckpointContainer(ctx, pod.UID, kubecontainer.GetPodFullName(pod), containerName, options); err != nil {
response.WriteError( response.WriteError(
http.StatusInternalServerError, http.StatusInternalServerError,
fmt.Errorf( fmt.Errorf(

View File

@ -74,11 +74,11 @@ const (
type fakeKubelet struct { type fakeKubelet struct {
podByNameFunc func(namespace, name string) (*v1.Pod, bool) podByNameFunc func(namespace, name string) (*v1.Pod, bool)
containerInfoFunc func(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) containerInfoFunc func(ctx context.Context, podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error)
rawInfoFunc func(query *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error) rawInfoFunc func(query *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error)
machineInfoFunc func() (*cadvisorapi.MachineInfo, error) machineInfoFunc func() (*cadvisorapi.MachineInfo, error)
podsFunc func() []*v1.Pod podsFunc func() []*v1.Pod
runningPodsFunc func() ([]*v1.Pod, error) runningPodsFunc func(ctx context.Context) ([]*v1.Pod, error)
logFunc func(w http.ResponseWriter, req *http.Request) logFunc func(w http.ResponseWriter, req *http.Request)
runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error)
getExecCheck func(string, types.UID, string, []string, remotecommandserver.Options) getExecCheck func(string, types.UID, string, []string, remotecommandserver.Options)
@ -109,8 +109,8 @@ func (fk *fakeKubelet) GetRequestedContainersInfo(containerName string, options
return map[string]*cadvisorapi.ContainerInfo{}, nil return map[string]*cadvisorapi.ContainerInfo{}, nil
} }
func (fk *fakeKubelet) GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { func (fk *fakeKubelet) GetContainerInfo(ctx context.Context, podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) {
return fk.containerInfoFunc(podFullName, uid, containerName, req) return fk.containerInfoFunc(ctx, podFullName, uid, containerName, req)
} }
func (fk *fakeKubelet) GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error) { func (fk *fakeKubelet) GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error) {
@ -129,8 +129,8 @@ func (fk *fakeKubelet) GetPods() []*v1.Pod {
return fk.podsFunc() return fk.podsFunc()
} }
func (fk *fakeKubelet) GetRunningPods() ([]*v1.Pod, error) { func (fk *fakeKubelet) GetRunningPods(ctx context.Context) ([]*v1.Pod, error) {
return fk.runningPodsFunc() return fk.runningPodsFunc(ctx)
} }
func (fk *fakeKubelet) ServeLogs(w http.ResponseWriter, req *http.Request) { func (fk *fakeKubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
@ -145,11 +145,11 @@ func (fk *fakeKubelet) GetHostname() string {
return fk.hostnameFunc() return fk.hostnameFunc()
} }
func (fk *fakeKubelet) RunInContainer(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) { func (fk *fakeKubelet) RunInContainer(_ context.Context, podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) {
return fk.runFunc(podFullName, uid, containerName, cmd) return fk.runFunc(podFullName, uid, containerName, cmd)
} }
func (fk *fakeKubelet) CheckpointContainer(podUID types.UID, podFullName, containerName string, options *runtimeapi.CheckpointContainerRequest) error { func (fk *fakeKubelet) CheckpointContainer(_ context.Context, podUID types.UID, podFullName, containerName string, options *runtimeapi.CheckpointContainerRequest) error {
if containerName == "checkpointingFailure" { if containerName == "checkpointingFailure" {
return fmt.Errorf("Returning error for test") return fmt.Errorf("Returning error for test")
} }
@ -162,15 +162,15 @@ type fakeRuntime struct {
portForwardFunc func(string, int32, io.ReadWriteCloser) error portForwardFunc func(string, int32, io.ReadWriteCloser) error
} }
func (f *fakeRuntime) Exec(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { func (f *fakeRuntime) Exec(_ context.Context, containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
return f.execFunc(containerID, cmd, stdin, stdout, stderr, tty, resize) return f.execFunc(containerID, cmd, stdin, stdout, stderr, tty, resize)
} }
func (f *fakeRuntime) Attach(containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { func (f *fakeRuntime) Attach(_ context.Context, containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
return f.attachFunc(containerID, stdin, stdout, stderr, tty, resize) return f.attachFunc(containerID, stdin, stdout, stderr, tty, resize)
} }
func (f *fakeRuntime) PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error { func (f *fakeRuntime) PortForward(_ context.Context, podSandboxID string, port int32, stream io.ReadWriteCloser) error {
return f.portForwardFunc(podSandboxID, port, stream) return f.portForwardFunc(podSandboxID, port, stream)
} }
@ -209,7 +209,7 @@ func newTestStreamingServer(streamIdleTimeout time.Duration) (s *testStreamingSe
return s, nil return s, nil
} }
func (fk *fakeKubelet) GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) { func (fk *fakeKubelet) GetExec(_ context.Context, podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) {
if fk.getExecCheck != nil { if fk.getExecCheck != nil {
fk.getExecCheck(podFullName, podUID, containerName, cmd, streamOpts) fk.getExecCheck(podFullName, podUID, containerName, cmd, streamOpts)
} }
@ -228,7 +228,7 @@ func (fk *fakeKubelet) GetExec(podFullName string, podUID types.UID, containerNa
return url.Parse(resp.GetUrl()) return url.Parse(resp.GetUrl())
} }
func (fk *fakeKubelet) GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) { func (fk *fakeKubelet) GetAttach(_ context.Context, podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) {
if fk.getAttachCheck != nil { if fk.getAttachCheck != nil {
fk.getAttachCheck(podFullName, podUID, containerName, streamOpts) fk.getAttachCheck(podFullName, podUID, containerName, streamOpts)
} }
@ -246,7 +246,7 @@ func (fk *fakeKubelet) GetAttach(podFullName string, podUID types.UID, container
return url.Parse(resp.GetUrl()) return url.Parse(resp.GetUrl())
} }
func (fk *fakeKubelet) GetPortForward(podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) { func (fk *fakeKubelet) GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) {
if fk.getPortForwardCheck != nil { if fk.getPortForwardCheck != nil {
fk.getPortForwardCheck(podName, podNamespace, podUID, portForwardOpts) fk.getPortForwardCheck(podName, podNamespace, podUID, portForwardOpts)
} }
@ -272,14 +272,16 @@ func (fk *fakeKubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Vo
func (*fakeKubelet) ListBlockVolumesForPod(podUID types.UID) (map[string]volume.BlockVolume, bool) { func (*fakeKubelet) ListBlockVolumesForPod(podUID types.UID) (map[string]volume.BlockVolume, bool) {
return map[string]volume.BlockVolume{}, true return map[string]volume.BlockVolume{}, true
} }
func (*fakeKubelet) RootFsStats() (*statsapi.FsStats, error) { return nil, nil } func (*fakeKubelet) RootFsStats() (*statsapi.FsStats, error) { return nil, nil }
func (*fakeKubelet) ListPodStats() ([]statsapi.PodStats, error) { return nil, nil } func (*fakeKubelet) ListPodStats(_ context.Context) ([]statsapi.PodStats, error) { return nil, nil }
func (*fakeKubelet) ListPodStatsAndUpdateCPUNanoCoreUsage() ([]statsapi.PodStats, error) { func (*fakeKubelet) ListPodStatsAndUpdateCPUNanoCoreUsage(_ context.Context) ([]statsapi.PodStats, error) {
return nil, nil return nil, nil
} }
func (*fakeKubelet) ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error) { return nil, nil } func (*fakeKubelet) ListPodCPUAndMemoryStats(_ context.Context) ([]statsapi.PodStats, error) {
func (*fakeKubelet) ImageFsStats() (*statsapi.FsStats, error) { return nil, nil } return nil, nil
func (*fakeKubelet) RlimitStats() (*statsapi.RlimitStats, error) { return nil, nil } }
func (*fakeKubelet) ImageFsStats(_ context.Context) (*statsapi.FsStats, error) { return nil, nil }
func (*fakeKubelet) RlimitStats() (*statsapi.RlimitStats, error) { return nil, nil }
func (*fakeKubelet) GetCgroupStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) { func (*fakeKubelet) GetCgroupStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) {
return nil, nil, nil return nil, nil, nil
} }

View File

@ -18,6 +18,7 @@ limitations under the License.
package stats package stats
import ( import (
"context"
"fmt" "fmt"
"net/http" "net/http"
@ -26,7 +27,7 @@ import (
cadvisorv2 "github.com/google/cadvisor/info/v2" cadvisorv2 "github.com/google/cadvisor/info/v2"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1" statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/cm"
@ -39,18 +40,18 @@ type Provider interface {
// The following stats are provided by either CRI or cAdvisor. // The following stats are provided by either CRI or cAdvisor.
// //
// ListPodStats returns the stats of all the containers managed by pods. // ListPodStats returns the stats of all the containers managed by pods.
ListPodStats() ([]statsapi.PodStats, error) ListPodStats(ctx context.Context) ([]statsapi.PodStats, error)
// ListPodStatsAndUpdateCPUNanoCoreUsage updates the cpu nano core usage for // ListPodStatsAndUpdateCPUNanoCoreUsage updates the cpu nano core usage for
// the containers and returns the stats for all the pod-managed containers. // the containers and returns the stats for all the pod-managed containers.
ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error) ListPodCPUAndMemoryStats(ctx context.Context) ([]statsapi.PodStats, error)
// ListPodStatsAndUpdateCPUNanoCoreUsage returns the stats of all the // ListPodStatsAndUpdateCPUNanoCoreUsage returns the stats of all the
// containers managed by pods and force update the cpu usageNanoCores. // containers managed by pods and force update the cpu usageNanoCores.
// This is a workaround for CRI runtimes that do not integrate with // This is a workaround for CRI runtimes that do not integrate with
// cadvisor. See https://github.com/kubernetes/kubernetes/issues/72788 // cadvisor. See https://github.com/kubernetes/kubernetes/issues/72788
// for more details. // for more details.
ListPodStatsAndUpdateCPUNanoCoreUsage() ([]statsapi.PodStats, error) ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]statsapi.PodStats, error)
// ImageFsStats returns the stats of the image filesystem. // ImageFsStats returns the stats of the image filesystem.
ImageFsStats() (*statsapi.FsStats, error) ImageFsStats(ctx context.Context) (*statsapi.FsStats, error)
// The following stats are provided by cAdvisor. // The following stats are provided by cAdvisor.
// //
@ -67,7 +68,7 @@ type Provider interface {
// //
// GetContainerInfo returns the information of the container with the // GetContainerInfo returns the information of the container with the
// containerName managed by the pod with the uid. // containerName managed by the pod with the uid.
GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) GetContainerInfo(ctx context.Context, podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error)
// GetRawContainerInfo returns the information of the container with the // GetRawContainerInfo returns the information of the container with the
// containerName. If subcontainers is true, this function will return the // containerName. If subcontainers is true, this function will return the
// information of all the sub-containers as well. // information of all the sub-containers as well.
@ -140,6 +141,7 @@ func CreateHandlers(rootPath string, provider Provider, summaryProvider SummaryP
// Handles stats summary requests to /stats/summary // Handles stats summary requests to /stats/summary
// If "only_cpu_and_memory" GET param is true then only cpu and memory is returned in response. // If "only_cpu_and_memory" GET param is true then only cpu and memory is returned in response.
func (h *handler) handleSummary(request *restful.Request, response *restful.Response) { func (h *handler) handleSummary(request *restful.Request, response *restful.Response) {
ctx := request.Request.Context()
onlyCPUAndMemory := false onlyCPUAndMemory := false
err := request.Request.ParseForm() err := request.Request.ParseForm()
if err != nil { if err != nil {
@ -152,11 +154,11 @@ func (h *handler) handleSummary(request *restful.Request, response *restful.Resp
} }
var summary *statsapi.Summary var summary *statsapi.Summary
if onlyCPUAndMemory { if onlyCPUAndMemory {
summary, err = h.summaryProvider.GetCPUAndMemoryStats() summary, err = h.summaryProvider.GetCPUAndMemoryStats(ctx)
} else { } else {
// external calls to the summary API use cached stats // external calls to the summary API use cached stats
forceStatsUpdate := false forceStatsUpdate := false
summary, err = h.summaryProvider.Get(forceStatsUpdate) summary, err = h.summaryProvider.Get(ctx, forceStatsUpdate)
} }
if err != nil { if err != nil {
handleError(response, "/stats/summary", err) handleError(response, "/stats/summary", err)

View File

@ -18,6 +18,7 @@ limitations under the License.
package stats package stats
import ( import (
"context"
"fmt" "fmt"
"k8s.io/klog/v2" "k8s.io/klog/v2"
@ -31,9 +32,9 @@ import (
type SummaryProvider interface { type SummaryProvider interface {
// Get provides a new Summary with the stats from Kubelet, // Get provides a new Summary with the stats from Kubelet,
// and will update some stats if updateStats is true // and will update some stats if updateStats is true
Get(updateStats bool) (*statsapi.Summary, error) Get(ctx context.Context, updateStats bool) (*statsapi.Summary, error)
// GetCPUAndMemoryStats provides a new Summary with the CPU and memory stats from Kubelet, // GetCPUAndMemoryStats provides a new Summary with the CPU and memory stats from Kubelet,
GetCPUAndMemoryStats() (*statsapi.Summary, error) GetCPUAndMemoryStats(ctx context.Context) (*statsapi.Summary, error)
} }
// summaryProviderImpl implements the SummaryProvider interface. // summaryProviderImpl implements the SummaryProvider interface.
@ -65,7 +66,7 @@ func NewSummaryProvider(statsProvider Provider) SummaryProvider {
} }
} }
func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error) { func (sp *summaryProviderImpl) Get(ctx context.Context, updateStats bool) (*statsapi.Summary, error) {
// TODO(timstclair): Consider returning a best-effort response if any of // TODO(timstclair): Consider returning a best-effort response if any of
// the following errors occur. // the following errors occur.
node, err := sp.provider.GetNode() node, err := sp.provider.GetNode()
@ -81,15 +82,15 @@ func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get rootFs stats: %v", err) return nil, fmt.Errorf("failed to get rootFs stats: %v", err)
} }
imageFsStats, err := sp.provider.ImageFsStats() imageFsStats, err := sp.provider.ImageFsStats(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get imageFs stats: %v", err) return nil, fmt.Errorf("failed to get imageFs stats: %v", err)
} }
var podStats []statsapi.PodStats var podStats []statsapi.PodStats
if updateStats { if updateStats {
podStats, err = sp.provider.ListPodStatsAndUpdateCPUNanoCoreUsage() podStats, err = sp.provider.ListPodStatsAndUpdateCPUNanoCoreUsage(ctx)
} else { } else {
podStats, err = sp.provider.ListPodStats() podStats, err = sp.provider.ListPodStats(ctx)
} }
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to list pod stats: %v", err) return nil, fmt.Errorf("failed to list pod stats: %v", err)
@ -118,7 +119,7 @@ func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error)
return &summary, nil return &summary, nil
} }
func (sp *summaryProviderImpl) GetCPUAndMemoryStats() (*statsapi.Summary, error) { func (sp *summaryProviderImpl) GetCPUAndMemoryStats(ctx context.Context) (*statsapi.Summary, error) {
// TODO(timstclair): Consider returning a best-effort response if any of // TODO(timstclair): Consider returning a best-effort response if any of
// the following errors occur. // the following errors occur.
node, err := sp.provider.GetNode() node, err := sp.provider.GetNode()
@ -131,7 +132,7 @@ func (sp *summaryProviderImpl) GetCPUAndMemoryStats() (*statsapi.Summary, error)
return nil, fmt.Errorf("failed to get root cgroup stats: %v", err) return nil, fmt.Errorf("failed to get root cgroup stats: %v", err)
} }
podStats, err := sp.provider.ListPodCPUAndMemoryStats() podStats, err := sp.provider.ListPodCPUAndMemoryStats(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to list pod stats: %v", err) return nil, fmt.Errorf("failed to list pod stats: %v", err)
} }

View File

@ -20,6 +20,7 @@ limitations under the License.
package stats package stats
import ( import (
"context"
"testing" "testing"
"time" "time"
@ -48,6 +49,7 @@ var (
) )
func TestSummaryProviderGetStats(t *testing.T) { func TestSummaryProviderGetStats(t *testing.T) {
ctx := context.Background()
assert := assert.New(t) assert := assert.New(t)
podStats := []statsapi.PodStats{ podStats := []statsapi.PodStats{
@ -77,9 +79,9 @@ func TestSummaryProviderGetStats(t *testing.T) {
mockStatsProvider.EXPECT().GetNode().Return(node, nil) mockStatsProvider.EXPECT().GetNode().Return(node, nil)
mockStatsProvider.EXPECT().GetNodeConfig().Return(nodeConfig) mockStatsProvider.EXPECT().GetNodeConfig().Return(nodeConfig)
mockStatsProvider.EXPECT().GetPodCgroupRoot().Return(cgroupRoot) mockStatsProvider.EXPECT().GetPodCgroupRoot().Return(cgroupRoot)
mockStatsProvider.EXPECT().ListPodStats().Return(podStats, nil).AnyTimes() mockStatsProvider.EXPECT().ListPodStats(ctx).Return(podStats, nil).AnyTimes()
mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage().Return(podStats, nil) mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage(ctx).Return(podStats, nil)
mockStatsProvider.EXPECT().ImageFsStats().Return(imageFsStats, nil) mockStatsProvider.EXPECT().ImageFsStats(ctx).Return(imageFsStats, nil)
mockStatsProvider.EXPECT().RootFsStats().Return(rootFsStats, nil) mockStatsProvider.EXPECT().RootFsStats().Return(rootFsStats, nil)
mockStatsProvider.EXPECT().RlimitStats().Return(rlimitStats, nil) mockStatsProvider.EXPECT().RlimitStats().Return(rlimitStats, nil)
mockStatsProvider.EXPECT().GetCgroupStats("/", true).Return(cgroupStatsMap["/"].cs, cgroupStatsMap["/"].ns, nil) mockStatsProvider.EXPECT().GetCgroupStats("/", true).Return(cgroupStatsMap["/"].cs, cgroupStatsMap["/"].ns, nil)
@ -91,7 +93,7 @@ func TestSummaryProviderGetStats(t *testing.T) {
kubeletCreationTime := metav1.Now() kubeletCreationTime := metav1.Now()
systemBootTime := metav1.Now() systemBootTime := metav1.Now()
provider := summaryProviderImpl{kubeletCreationTime: kubeletCreationTime, systemBootTime: systemBootTime, provider: mockStatsProvider} provider := summaryProviderImpl{kubeletCreationTime: kubeletCreationTime, systemBootTime: systemBootTime, provider: mockStatsProvider}
summary, err := provider.Get(true) summary, err := provider.Get(ctx, true)
assert.NoError(err) assert.NoError(err)
assert.Equal(summary.Node.NodeName, "test-node") assert.Equal(summary.Node.NodeName, "test-node")
@ -139,6 +141,7 @@ func TestSummaryProviderGetStats(t *testing.T) {
} }
func TestSummaryProviderGetCPUAndMemoryStats(t *testing.T) { func TestSummaryProviderGetCPUAndMemoryStats(t *testing.T) {
ctx := context.Background()
assert := assert.New(t) assert := assert.New(t)
podStats := []statsapi.PodStats{ podStats := []statsapi.PodStats{
@ -165,7 +168,7 @@ func TestSummaryProviderGetCPUAndMemoryStats(t *testing.T) {
mockStatsProvider.EXPECT().GetNode().Return(node, nil) mockStatsProvider.EXPECT().GetNode().Return(node, nil)
mockStatsProvider.EXPECT().GetNodeConfig().Return(nodeConfig) mockStatsProvider.EXPECT().GetNodeConfig().Return(nodeConfig)
mockStatsProvider.EXPECT().GetPodCgroupRoot().Return(cgroupRoot) mockStatsProvider.EXPECT().GetPodCgroupRoot().Return(cgroupRoot)
mockStatsProvider.EXPECT().ListPodCPUAndMemoryStats().Return(podStats, nil) mockStatsProvider.EXPECT().ListPodCPUAndMemoryStats(ctx).Return(podStats, nil)
mockStatsProvider.EXPECT().GetCgroupCPUAndMemoryStats("/", false).Return(cgroupStatsMap["/"].cs, nil) mockStatsProvider.EXPECT().GetCgroupCPUAndMemoryStats("/", false).Return(cgroupStatsMap["/"].cs, nil)
mockStatsProvider.EXPECT().GetCgroupCPUAndMemoryStats("/runtime", false).Return(cgroupStatsMap["/runtime"].cs, nil) mockStatsProvider.EXPECT().GetCgroupCPUAndMemoryStats("/runtime", false).Return(cgroupStatsMap["/runtime"].cs, nil)
mockStatsProvider.EXPECT().GetCgroupCPUAndMemoryStats("/misc", false).Return(cgroupStatsMap["/misc"].cs, nil) mockStatsProvider.EXPECT().GetCgroupCPUAndMemoryStats("/misc", false).Return(cgroupStatsMap["/misc"].cs, nil)
@ -173,7 +176,7 @@ func TestSummaryProviderGetCPUAndMemoryStats(t *testing.T) {
mockStatsProvider.EXPECT().GetCgroupCPUAndMemoryStats("/kubepods", false).Return(cgroupStatsMap["/pods"].cs, nil) mockStatsProvider.EXPECT().GetCgroupCPUAndMemoryStats("/kubepods", false).Return(cgroupStatsMap["/pods"].cs, nil)
provider := NewSummaryProvider(mockStatsProvider) provider := NewSummaryProvider(mockStatsProvider)
summary, err := provider.GetCPUAndMemoryStats() summary, err := provider.GetCPUAndMemoryStats(ctx)
assert.NoError(err) assert.NoError(err)
assert.Equal(summary.Node.NodeName, "test-node") assert.Equal(summary.Node.NodeName, "test-node")

View File

@ -21,6 +21,7 @@ limitations under the License.
package testing package testing
import ( import (
context "context"
reflect "reflect" reflect "reflect"
gomock "github.com/golang/mock/gomock" gomock "github.com/golang/mock/gomock"
@ -88,18 +89,18 @@ func (mr *MockProviderMockRecorder) GetCgroupStats(cgroupName, updateStats inter
} }
// GetContainerInfo mocks base method. // GetContainerInfo mocks base method.
func (m *MockProvider) GetContainerInfo(podFullName string, uid types.UID, containerName string, req *v1.ContainerInfoRequest) (*v1.ContainerInfo, error) { func (m *MockProvider) GetContainerInfo(ctx context.Context, podFullName string, uid types.UID, containerName string, req *v1.ContainerInfoRequest) (*v1.ContainerInfo, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetContainerInfo", podFullName, uid, containerName, req) ret := m.ctrl.Call(m, "GetContainerInfo", ctx, podFullName, uid, containerName, req)
ret0, _ := ret[0].(*v1.ContainerInfo) ret0, _ := ret[0].(*v1.ContainerInfo)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// GetContainerInfo indicates an expected call of GetContainerInfo. // GetContainerInfo indicates an expected call of GetContainerInfo.
func (mr *MockProviderMockRecorder) GetContainerInfo(podFullName, uid, containerName, req interface{}) *gomock.Call { func (mr *MockProviderMockRecorder) GetContainerInfo(ctx, podFullName, uid, containerName, req interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContainerInfo", reflect.TypeOf((*MockProvider)(nil).GetContainerInfo), podFullName, uid, containerName, req) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContainerInfo", reflect.TypeOf((*MockProvider)(nil).GetContainerInfo), ctx, podFullName, uid, containerName, req)
} }
// GetNode mocks base method. // GetNode mocks base method.
@ -220,18 +221,18 @@ func (mr *MockProviderMockRecorder) GetRequestedContainersInfo(containerName, op
} }
// ImageFsStats mocks base method. // ImageFsStats mocks base method.
func (m *MockProvider) ImageFsStats() (*v1alpha1.FsStats, error) { func (m *MockProvider) ImageFsStats(ctx context.Context) (*v1alpha1.FsStats, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImageFsStats") ret := m.ctrl.Call(m, "ImageFsStats", ctx)
ret0, _ := ret[0].(*v1alpha1.FsStats) ret0, _ := ret[0].(*v1alpha1.FsStats)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// ImageFsStats indicates an expected call of ImageFsStats. // ImageFsStats indicates an expected call of ImageFsStats.
func (mr *MockProviderMockRecorder) ImageFsStats() *gomock.Call { func (mr *MockProviderMockRecorder) ImageFsStats(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageFsStats", reflect.TypeOf((*MockProvider)(nil).ImageFsStats)) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageFsStats", reflect.TypeOf((*MockProvider)(nil).ImageFsStats), ctx)
} }
// ListBlockVolumesForPod mocks base method. // ListBlockVolumesForPod mocks base method.
@ -250,48 +251,48 @@ func (mr *MockProviderMockRecorder) ListBlockVolumesForPod(podUID interface{}) *
} }
// ListPodCPUAndMemoryStats mocks base method. // ListPodCPUAndMemoryStats mocks base method.
func (m *MockProvider) ListPodCPUAndMemoryStats() ([]v1alpha1.PodStats, error) { func (m *MockProvider) ListPodCPUAndMemoryStats(ctx context.Context) ([]v1alpha1.PodStats, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListPodCPUAndMemoryStats") ret := m.ctrl.Call(m, "ListPodCPUAndMemoryStats", ctx)
ret0, _ := ret[0].([]v1alpha1.PodStats) ret0, _ := ret[0].([]v1alpha1.PodStats)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// ListPodCPUAndMemoryStats indicates an expected call of ListPodCPUAndMemoryStats. // ListPodCPUAndMemoryStats indicates an expected call of ListPodCPUAndMemoryStats.
func (mr *MockProviderMockRecorder) ListPodCPUAndMemoryStats() *gomock.Call { func (mr *MockProviderMockRecorder) ListPodCPUAndMemoryStats(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodCPUAndMemoryStats", reflect.TypeOf((*MockProvider)(nil).ListPodCPUAndMemoryStats)) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodCPUAndMemoryStats", reflect.TypeOf((*MockProvider)(nil).ListPodCPUAndMemoryStats), ctx)
} }
// ListPodStats mocks base method. // ListPodStats mocks base method.
func (m *MockProvider) ListPodStats() ([]v1alpha1.PodStats, error) { func (m *MockProvider) ListPodStats(ctx context.Context) ([]v1alpha1.PodStats, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListPodStats") ret := m.ctrl.Call(m, "ListPodStats", ctx)
ret0, _ := ret[0].([]v1alpha1.PodStats) ret0, _ := ret[0].([]v1alpha1.PodStats)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// ListPodStats indicates an expected call of ListPodStats. // ListPodStats indicates an expected call of ListPodStats.
func (mr *MockProviderMockRecorder) ListPodStats() *gomock.Call { func (mr *MockProviderMockRecorder) ListPodStats(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodStats", reflect.TypeOf((*MockProvider)(nil).ListPodStats)) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodStats", reflect.TypeOf((*MockProvider)(nil).ListPodStats), ctx)
} }
// ListPodStatsAndUpdateCPUNanoCoreUsage mocks base method. // ListPodStatsAndUpdateCPUNanoCoreUsage mocks base method.
func (m *MockProvider) ListPodStatsAndUpdateCPUNanoCoreUsage() ([]v1alpha1.PodStats, error) { func (m *MockProvider) ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]v1alpha1.PodStats, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListPodStatsAndUpdateCPUNanoCoreUsage") ret := m.ctrl.Call(m, "ListPodStatsAndUpdateCPUNanoCoreUsage", ctx)
ret0, _ := ret[0].([]v1alpha1.PodStats) ret0, _ := ret[0].([]v1alpha1.PodStats)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// ListPodStatsAndUpdateCPUNanoCoreUsage indicates an expected call of ListPodStatsAndUpdateCPUNanoCoreUsage. // ListPodStatsAndUpdateCPUNanoCoreUsage indicates an expected call of ListPodStatsAndUpdateCPUNanoCoreUsage.
func (mr *MockProviderMockRecorder) ListPodStatsAndUpdateCPUNanoCoreUsage() *gomock.Call { func (mr *MockProviderMockRecorder) ListPodStatsAndUpdateCPUNanoCoreUsage(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodStatsAndUpdateCPUNanoCoreUsage", reflect.TypeOf((*MockProvider)(nil).ListPodStatsAndUpdateCPUNanoCoreUsage)) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodStatsAndUpdateCPUNanoCoreUsage", reflect.TypeOf((*MockProvider)(nil).ListPodStatsAndUpdateCPUNanoCoreUsage), ctx)
} }
// ListVolumesForPod mocks base method. // ListVolumesForPod mocks base method.

View File

@ -21,6 +21,7 @@ limitations under the License.
package testing package testing
import ( import (
context "context"
reflect "reflect" reflect "reflect"
gomock "github.com/golang/mock/gomock" gomock "github.com/golang/mock/gomock"
@ -51,31 +52,31 @@ func (m *MockSummaryProvider) EXPECT() *MockSummaryProviderMockRecorder {
} }
// Get mocks base method. // Get mocks base method.
func (m *MockSummaryProvider) Get(updateStats bool) (*v1alpha1.Summary, error) { func (m *MockSummaryProvider) Get(ctx context.Context, updateStats bool) (*v1alpha1.Summary, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", updateStats) ret := m.ctrl.Call(m, "Get", ctx, updateStats)
ret0, _ := ret[0].(*v1alpha1.Summary) ret0, _ := ret[0].(*v1alpha1.Summary)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// Get indicates an expected call of Get. // Get indicates an expected call of Get.
func (mr *MockSummaryProviderMockRecorder) Get(updateStats interface{}) *gomock.Call { func (mr *MockSummaryProviderMockRecorder) Get(ctx, updateStats interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSummaryProvider)(nil).Get), updateStats) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSummaryProvider)(nil).Get), ctx, updateStats)
} }
// GetCPUAndMemoryStats mocks base method. // GetCPUAndMemoryStats mocks base method.
func (m *MockSummaryProvider) GetCPUAndMemoryStats() (*v1alpha1.Summary, error) { func (m *MockSummaryProvider) GetCPUAndMemoryStats(ctx context.Context) (*v1alpha1.Summary, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetCPUAndMemoryStats") ret := m.ctrl.Call(m, "GetCPUAndMemoryStats", ctx)
ret0, _ := ret[0].(*v1alpha1.Summary) ret0, _ := ret[0].(*v1alpha1.Summary)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
// GetCPUAndMemoryStats indicates an expected call of GetCPUAndMemoryStats. // GetCPUAndMemoryStats indicates an expected call of GetCPUAndMemoryStats.
func (mr *MockSummaryProviderMockRecorder) GetCPUAndMemoryStats() *gomock.Call { func (mr *MockSummaryProviderMockRecorder) GetCPUAndMemoryStats(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCPUAndMemoryStats", reflect.TypeOf((*MockSummaryProvider)(nil).GetCPUAndMemoryStats)) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCPUAndMemoryStats", reflect.TypeOf((*MockSummaryProvider)(nil).GetCPUAndMemoryStats), ctx)
} }

Some files were not shown because too many files have changed in this diff Show More