mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 18:02:01 +00:00
Second attempt: Plumb context to Kubelet CRI calls (#113591)
* plumb context from CRI calls through kubelet * clean up extra timeouts * try fixing incorrectly cancelled context
This commit is contained in:
parent
27766455f1
commit
64af1adace
@ -18,6 +18,7 @@ package tests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@ -51,7 +52,7 @@ type fakePortForwarder struct {
|
||||
|
||||
var _ portforward.PortForwarder = &fakePortForwarder{}
|
||||
|
||||
func (pf *fakePortForwarder) PortForward(name string, uid types.UID, port int32, stream io.ReadWriteCloser) error {
|
||||
func (pf *fakePortForwarder) PortForward(_ context.Context, name string, uid types.UID, port int32, stream io.ReadWriteCloser) error {
|
||||
defer stream.Close()
|
||||
|
||||
// read from the client
|
||||
|
@ -59,11 +59,11 @@ type fakeExecutor struct {
|
||||
exec bool
|
||||
}
|
||||
|
||||
func (ex *fakeExecutor) ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remoteclient.TerminalSize, timeout time.Duration) error {
|
||||
func (ex *fakeExecutor) ExecInContainer(_ context.Context, name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remoteclient.TerminalSize, timeout time.Duration) error {
|
||||
return ex.run(name, uid, container, cmd, in, out, err, tty)
|
||||
}
|
||||
|
||||
func (ex *fakeExecutor) AttachContainer(name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remoteclient.TerminalSize) error {
|
||||
func (ex *fakeExecutor) AttachContainer(_ context.Context, name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remoteclient.TerminalSize) error {
|
||||
return ex.run(name, uid, container, nil, in, out, err, tty)
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@ package cm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
@ -551,9 +552,10 @@ func (cm *containerManagerImpl) Start(node *v1.Node,
|
||||
podStatusProvider status.PodStatusProvider,
|
||||
runtimeService internalapi.RuntimeService,
|
||||
localStorageCapacityIsolation bool) error {
|
||||
ctx := context.Background()
|
||||
|
||||
// Initialize CPU manager
|
||||
containerMap := buildContainerMapFromRuntime(runtimeService)
|
||||
containerMap := buildContainerMapFromRuntime(ctx, runtimeService)
|
||||
err := cm.cpuManager.Start(cpumanager.ActivePodsFunc(activePods), sourcesReady, podStatusProvider, runtimeService, containerMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("start cpu manager error: %v", err)
|
||||
@ -561,7 +563,7 @@ func (cm *containerManagerImpl) Start(node *v1.Node,
|
||||
|
||||
// Initialize memory manager
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryManager) {
|
||||
containerMap := buildContainerMapFromRuntime(runtimeService)
|
||||
containerMap := buildContainerMapFromRuntime(ctx, runtimeService)
|
||||
err := cm.memoryManager.Start(memorymanager.ActivePodsFunc(activePods), sourcesReady, podStatusProvider, runtimeService, containerMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("start memory manager error: %v", err)
|
||||
@ -719,15 +721,15 @@ func (cm *containerManagerImpl) SystemCgroupsLimit() v1.ResourceList {
|
||||
}
|
||||
}
|
||||
|
||||
func buildContainerMapFromRuntime(runtimeService internalapi.RuntimeService) containermap.ContainerMap {
|
||||
func buildContainerMapFromRuntime(ctx context.Context, runtimeService internalapi.RuntimeService) containermap.ContainerMap {
|
||||
podSandboxMap := make(map[string]string)
|
||||
podSandboxList, _ := runtimeService.ListPodSandbox(nil)
|
||||
podSandboxList, _ := runtimeService.ListPodSandbox(ctx, nil)
|
||||
for _, p := range podSandboxList {
|
||||
podSandboxMap[p.Id] = p.Metadata.Uid
|
||||
}
|
||||
|
||||
containerMap := containermap.NewContainerMap()
|
||||
containerList, _ := runtimeService.ListContainers(nil)
|
||||
containerList, _ := runtimeService.ListContainers(ctx, nil)
|
||||
for _, c := range containerList {
|
||||
if _, exists := podSandboxMap[c.PodSandboxId]; !exists {
|
||||
klog.InfoS("no PodSandBox found for the container", "podSandboxId", c.PodSandboxId, "containerName", c.Metadata.Name, "containerId", c.Id)
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package cpumanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
@ -42,7 +43,7 @@ import (
|
||||
type ActivePodsFunc func() []*v1.Pod
|
||||
|
||||
type runtimeService interface {
|
||||
UpdateContainerResources(id string, resources *runtimeapi.ContainerResources) error
|
||||
UpdateContainerResources(ctx context.Context, id string, resources *runtimeapi.ContainerResources) error
|
||||
}
|
||||
|
||||
type policyName string
|
||||
@ -401,6 +402,7 @@ func (m *manager) removeStaleState() {
|
||||
}
|
||||
|
||||
func (m *manager) reconcileState() (success []reconciledContainer, failure []reconciledContainer) {
|
||||
ctx := context.Background()
|
||||
success = []reconciledContainer{}
|
||||
failure = []reconciledContainer{}
|
||||
|
||||
@ -469,7 +471,7 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec
|
||||
lcset := m.lastUpdateState.GetCPUSetOrDefault(string(pod.UID), container.Name)
|
||||
if !cset.Equals(lcset) {
|
||||
klog.V(4).InfoS("ReconcileState: updating container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID, "cpuSet", cset)
|
||||
err = m.updateContainerCPUSet(containerID, cset)
|
||||
err = m.updateContainerCPUSet(ctx, containerID, cset)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "ReconcileState: failed to update container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID, "cpuSet", cset)
|
||||
failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID})
|
||||
@ -508,12 +510,13 @@ func findContainerStatusByName(status *v1.PodStatus, name string) (*v1.Container
|
||||
return nil, fmt.Errorf("unable to find status for container with name %v in pod status (it may not be running)", name)
|
||||
}
|
||||
|
||||
func (m *manager) updateContainerCPUSet(containerID string, cpus cpuset.CPUSet) error {
|
||||
func (m *manager) updateContainerCPUSet(ctx context.Context, containerID string, cpus cpuset.CPUSet) error {
|
||||
// TODO: Consider adding a `ResourceConfigForContainer` helper in
|
||||
// helpers_linux.go similar to what exists for pods.
|
||||
// It would be better to pass the full container resources here instead of
|
||||
// this patch-like partial resources.
|
||||
return m.containerRuntime.UpdateContainerResources(
|
||||
ctx,
|
||||
containerID,
|
||||
&runtimeapi.ContainerResources{
|
||||
Linux: &runtimeapi.LinuxContainerResources{
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package cpumanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
@ -127,7 +128,7 @@ type mockRuntimeService struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (rt mockRuntimeService) UpdateContainerResources(id string, resources *runtimeapi.ContainerResources) error {
|
||||
func (rt mockRuntimeService) UpdateContainerResources(_ context.Context, id string, resources *runtimeapi.ContainerResources) error {
|
||||
return rt.err
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package memorymanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
@ -43,7 +44,7 @@ const memoryManagerStateFileName = "memory_manager_state"
|
||||
type ActivePodsFunc func() []*v1.Pod
|
||||
|
||||
type runtimeService interface {
|
||||
UpdateContainerResources(id string, resources *runtimeapi.ContainerResources) error
|
||||
UpdateContainerResources(ctx context.Context, id string, resources *runtimeapi.ContainerResources) error
|
||||
}
|
||||
|
||||
type sourcesReadyStub struct{}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package memorymanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
@ -121,7 +122,7 @@ type mockRuntimeService struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (rt mockRuntimeService) UpdateContainerResources(id string, resources *runtimeapi.ContainerResources) error {
|
||||
func (rt mockRuntimeService) UpdateContainerResources(_ context.Context, id string, resources *runtimeapi.ContainerResources) error {
|
||||
return rt.err
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@ -41,9 +42,9 @@ type GCPolicy struct {
|
||||
// Implementation is thread-compatible.
|
||||
type GC interface {
|
||||
// Garbage collect containers.
|
||||
GarbageCollect() error
|
||||
GarbageCollect(ctx context.Context) error
|
||||
// Deletes all unused containers, including containers belonging to pods that are terminated but not deleted
|
||||
DeleteAllUnusedContainers() error
|
||||
DeleteAllUnusedContainers(ctx context.Context) error
|
||||
}
|
||||
|
||||
// SourcesReadyProvider knows how to determine if configuration sources are ready
|
||||
@ -77,11 +78,11 @@ func NewContainerGC(runtime Runtime, policy GCPolicy, sourcesReadyProvider Sourc
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (cgc *realContainerGC) GarbageCollect() error {
|
||||
return cgc.runtime.GarbageCollect(cgc.policy, cgc.sourcesReadyProvider.AllReady(), false)
|
||||
func (cgc *realContainerGC) GarbageCollect(ctx context.Context) error {
|
||||
return cgc.runtime.GarbageCollect(ctx, cgc.policy, cgc.sourcesReadyProvider.AllReady(), false)
|
||||
}
|
||||
|
||||
func (cgc *realContainerGC) DeleteAllUnusedContainers() error {
|
||||
func (cgc *realContainerGC) DeleteAllUnusedContainers(ctx context.Context) error {
|
||||
klog.InfoS("Attempting to delete unused containers")
|
||||
return cgc.runtime.GarbageCollect(cgc.policy, cgc.sourcesReadyProvider.AllReady(), true)
|
||||
return cgc.runtime.GarbageCollect(ctx, cgc.policy, cgc.sourcesReadyProvider.AllReady(), true)
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
@ -39,13 +40,13 @@ import (
|
||||
|
||||
// HandlerRunner runs a lifecycle handler for a container.
|
||||
type HandlerRunner interface {
|
||||
Run(containerID ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler) (string, error)
|
||||
Run(ctx context.Context, containerID ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler) (string, error)
|
||||
}
|
||||
|
||||
// RuntimeHelper wraps kubelet to make container runtime
|
||||
// able to get necessary informations like the RunContainerOptions, DNS settings, Host IP.
|
||||
type RuntimeHelper interface {
|
||||
GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (contOpts *RunContainerOptions, cleanupAction func(), err error)
|
||||
GenerateRunContainerOptions(ctx context.Context, pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (contOpts *RunContainerOptions, cleanupAction func(), err error)
|
||||
GetPodDNS(pod *v1.Pod) (dnsConfig *runtimeapi.DNSConfig, err error)
|
||||
// GetPodCgroupParent returns the CgroupName identifier, and its literal cgroupfs form on the host
|
||||
// of a pod.
|
||||
|
@ -70,7 +70,7 @@ type Runtime interface {
|
||||
Type() string
|
||||
|
||||
// Version returns the version information of the container runtime.
|
||||
Version() (Version, error)
|
||||
Version(ctx context.Context) (Version, error)
|
||||
|
||||
// APIVersion returns the cached API version information of the container
|
||||
// runtime. Implementation is expected to update this cache periodically.
|
||||
@ -79,11 +79,11 @@ type Runtime interface {
|
||||
APIVersion() (Version, error)
|
||||
// Status returns the status of the runtime. An error is returned if the Status
|
||||
// function itself fails, nil otherwise.
|
||||
Status() (*RuntimeStatus, error)
|
||||
Status(ctx context.Context) (*RuntimeStatus, error)
|
||||
// GetPods returns a list of containers grouped by pods. The boolean parameter
|
||||
// specifies whether the runtime returns all containers including those already
|
||||
// exited and dead containers (used for garbage collection).
|
||||
GetPods(all bool) ([]*Pod, error)
|
||||
GetPods(ctx context.Context, all bool) ([]*Pod, error)
|
||||
// GarbageCollect removes dead containers using the specified container gc policy
|
||||
// If allSourcesReady is not true, it means that kubelet doesn't have the
|
||||
// complete list of pods from all available sources (e.g., apiserver, http,
|
||||
@ -93,18 +93,18 @@ type Runtime interface {
|
||||
// that are terminated, but not deleted will be evicted. Otherwise, only deleted pods
|
||||
// will be GC'd.
|
||||
// TODO: Revisit this method and make it cleaner.
|
||||
GarbageCollect(gcPolicy GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error
|
||||
GarbageCollect(ctx context.Context, gcPolicy GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error
|
||||
// SyncPod syncs the running pod into the desired pod.
|
||||
SyncPod(pod *v1.Pod, podStatus *PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult
|
||||
SyncPod(ctx context.Context, pod *v1.Pod, podStatus *PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult
|
||||
// KillPod kills all the containers of a pod. Pod may be nil, running pod must not be.
|
||||
// TODO(random-liu): Return PodSyncResult in KillPod.
|
||||
// gracePeriodOverride if specified allows the caller to override the pod default grace period.
|
||||
// only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data.
|
||||
// it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios.
|
||||
KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error
|
||||
KillPod(ctx context.Context, pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error
|
||||
// GetPodStatus retrieves the status of the pod, including the
|
||||
// information of all containers in the pod that are visible in Runtime.
|
||||
GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error)
|
||||
GetPodStatus(ctx context.Context, uid types.UID, name, namespace string) (*PodStatus, error)
|
||||
// TODO(vmarmol): Unify pod and containerID args.
|
||||
// GetContainerLogs returns logs of a specific container. By
|
||||
// default, it returns a snapshot of the container log. Set 'follow' to true to
|
||||
@ -112,53 +112,53 @@ type Runtime interface {
|
||||
// "100" or "all") to tail the log.
|
||||
GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error)
|
||||
// DeleteContainer deletes a container. If the container is still running, an error is returned.
|
||||
DeleteContainer(containerID ContainerID) error
|
||||
DeleteContainer(ctx context.Context, containerID ContainerID) error
|
||||
// ImageService provides methods to image-related methods.
|
||||
ImageService
|
||||
// UpdatePodCIDR sends a new podCIDR to the runtime.
|
||||
// This method just proxies a new runtimeConfig with the updated
|
||||
// CIDR value down to the runtime shim.
|
||||
UpdatePodCIDR(podCIDR string) error
|
||||
UpdatePodCIDR(ctx context.Context, podCIDR string) error
|
||||
// CheckpointContainer tells the runtime to checkpoint a container
|
||||
// and store the resulting archive to the checkpoint directory.
|
||||
CheckpointContainer(options *runtimeapi.CheckpointContainerRequest) error
|
||||
CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error
|
||||
}
|
||||
|
||||
// StreamingRuntime is the interface implemented by runtimes that handle the serving of the
|
||||
// streaming calls (exec/attach/port-forward) themselves. In this case, Kubelet should redirect to
|
||||
// the runtime server.
|
||||
type StreamingRuntime interface {
|
||||
GetExec(id ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error)
|
||||
GetAttach(id ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error)
|
||||
GetPortForward(podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error)
|
||||
GetExec(ctx context.Context, id ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error)
|
||||
GetAttach(ctx context.Context, id ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error)
|
||||
GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error)
|
||||
}
|
||||
|
||||
// ImageService interfaces allows to work with image service.
|
||||
type ImageService interface {
|
||||
// PullImage pulls an image from the network to local storage using the supplied
|
||||
// secrets if necessary. It returns a reference (digest or ID) to the pulled image.
|
||||
PullImage(image ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error)
|
||||
PullImage(ctx context.Context, image ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error)
|
||||
// GetImageRef gets the reference (digest or ID) of the image which has already been in
|
||||
// the local storage. It returns ("", nil) if the image isn't in the local storage.
|
||||
GetImageRef(image ImageSpec) (string, error)
|
||||
GetImageRef(ctx context.Context, image ImageSpec) (string, error)
|
||||
// ListImages gets all images currently on the machine.
|
||||
ListImages() ([]Image, error)
|
||||
ListImages(ctx context.Context) ([]Image, error)
|
||||
// RemoveImage removes the specified image.
|
||||
RemoveImage(image ImageSpec) error
|
||||
RemoveImage(ctx context.Context, image ImageSpec) error
|
||||
// ImageStats returns Image statistics.
|
||||
ImageStats() (*ImageStats, error)
|
||||
ImageStats(ctx context.Context) (*ImageStats, error)
|
||||
}
|
||||
|
||||
// Attacher interface allows to attach a container.
|
||||
type Attacher interface {
|
||||
AttachContainer(id ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) (err error)
|
||||
AttachContainer(ctx context.Context, id ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) (err error)
|
||||
}
|
||||
|
||||
// CommandRunner interface allows to run command in a container.
|
||||
type CommandRunner interface {
|
||||
// RunInContainer synchronously executes the command in the container, and returns the output.
|
||||
// If the command completes with a non-0 exit code, a k8s.io/utils/exec.ExitError will be returned.
|
||||
RunInContainer(id ContainerID, cmd []string, timeout time.Duration) ([]byte, error)
|
||||
RunInContainer(ctx context.Context, id ContainerID, cmd []string, timeout time.Duration) ([]byte, error)
|
||||
}
|
||||
|
||||
// Pod is a group of containers.
|
||||
|
@ -18,6 +18,7 @@ limitations under the License.
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@ -29,12 +30,12 @@ var (
|
||||
|
||||
// RuntimeCache is in interface for obtaining cached Pods.
|
||||
type RuntimeCache interface {
|
||||
GetPods() ([]*Pod, error)
|
||||
ForceUpdateIfOlder(time.Time) error
|
||||
GetPods(context.Context) ([]*Pod, error)
|
||||
ForceUpdateIfOlder(context.Context, time.Time) error
|
||||
}
|
||||
|
||||
type podsGetter interface {
|
||||
GetPods(bool) ([]*Pod, error)
|
||||
GetPods(context.Context, bool) ([]*Pod, error)
|
||||
}
|
||||
|
||||
// NewRuntimeCache creates a container runtime cache.
|
||||
@ -60,28 +61,28 @@ type runtimeCache struct {
|
||||
|
||||
// GetPods returns the cached pods if they are not outdated; otherwise, it
|
||||
// retrieves the latest pods and return them.
|
||||
func (r *runtimeCache) GetPods() ([]*Pod, error) {
|
||||
func (r *runtimeCache) GetPods(ctx context.Context) ([]*Pod, error) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
if time.Since(r.cacheTime) > defaultCachePeriod {
|
||||
if err := r.updateCache(); err != nil {
|
||||
if err := r.updateCache(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return r.pods, nil
|
||||
}
|
||||
|
||||
func (r *runtimeCache) ForceUpdateIfOlder(minExpectedCacheTime time.Time) error {
|
||||
func (r *runtimeCache) ForceUpdateIfOlder(ctx context.Context, minExpectedCacheTime time.Time) error {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
if r.cacheTime.Before(minExpectedCacheTime) {
|
||||
return r.updateCache()
|
||||
return r.updateCache(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *runtimeCache) updateCache() error {
|
||||
pods, timestamp, err := r.getPodsWithTimestamp()
|
||||
func (r *runtimeCache) updateCache(ctx context.Context) error {
|
||||
pods, timestamp, err := r.getPodsWithTimestamp(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -90,9 +91,9 @@ func (r *runtimeCache) updateCache() error {
|
||||
}
|
||||
|
||||
// getPodsWithTimestamp records a timestamp and retrieves pods from the getter.
|
||||
func (r *runtimeCache) getPodsWithTimestamp() ([]*Pod, time.Time, error) {
|
||||
func (r *runtimeCache) getPodsWithTimestamp(ctx context.Context) ([]*Pod, time.Time, error) {
|
||||
// Always record the timestamp before getting the pods to avoid stale pods.
|
||||
timestamp := time.Now()
|
||||
pods, err := r.getter.GetPods(false)
|
||||
pods, err := r.getter.GetPods(ctx, false)
|
||||
return pods, timestamp, err
|
||||
}
|
||||
|
@ -16,6 +16,8 @@ limitations under the License.
|
||||
|
||||
package container
|
||||
|
||||
import "context"
|
||||
|
||||
// TestRuntimeCache embeds runtimeCache with some additional methods for testing.
|
||||
// It must be declared in the container package to have visibility to runtimeCache.
|
||||
// It cannot be in a "..._test.go" file in order for runtime_cache_test.go to have cross-package visibility to it.
|
||||
@ -28,7 +30,7 @@ type TestRuntimeCache struct {
|
||||
func (r *TestRuntimeCache) UpdateCacheWithLock() error {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
return r.updateCache()
|
||||
return r.updateCache(context.Background())
|
||||
}
|
||||
|
||||
// GetCachedPods returns the cached pods.
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package container_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
@ -37,11 +38,12 @@ func comparePods(t *testing.T, expected []*ctest.FakePod, actual []*Pod) {
|
||||
}
|
||||
|
||||
func TestGetPods(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
runtime := &ctest.FakeRuntime{}
|
||||
expected := []*ctest.FakePod{{Pod: &Pod{ID: "1111"}}, {Pod: &Pod{ID: "2222"}}, {Pod: &Pod{ID: "3333"}}}
|
||||
runtime.PodList = expected
|
||||
cache := NewTestRuntimeCache(runtime)
|
||||
actual, err := cache.GetPods()
|
||||
actual, err := cache.GetPods(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error %v", err)
|
||||
}
|
||||
@ -50,6 +52,7 @@ func TestGetPods(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestForceUpdateIfOlder(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
runtime := &ctest.FakeRuntime{}
|
||||
cache := NewTestRuntimeCache(runtime)
|
||||
|
||||
@ -63,12 +66,12 @@ func TestForceUpdateIfOlder(t *testing.T) {
|
||||
runtime.PodList = newpods
|
||||
|
||||
// An older timestamp should not force an update.
|
||||
cache.ForceUpdateIfOlder(time.Now().Add(-20 * time.Minute))
|
||||
cache.ForceUpdateIfOlder(ctx, time.Now().Add(-20*time.Minute))
|
||||
actual := cache.GetCachedPods()
|
||||
comparePods(t, oldpods, actual)
|
||||
|
||||
// A newer timestamp should force an update.
|
||||
cache.ForceUpdateIfOlder(time.Now().Add(20 * time.Second))
|
||||
cache.ForceUpdateIfOlder(ctx, time.Now().Add(20*time.Second))
|
||||
actual = cache.GetCachedPods()
|
||||
comparePods(t, newpods, actual)
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -32,7 +33,7 @@ func NewFakeCache(runtime container.Runtime) container.Cache {
|
||||
}
|
||||
|
||||
func (c *fakeCache) Get(id types.UID) (*container.PodStatus, error) {
|
||||
return c.runtime.GetPodStatus(id, "", "")
|
||||
return c.runtime.GetPodStatus(context.Background(), id, "", "")
|
||||
}
|
||||
|
||||
func (c *fakeCache) GetNewerThan(id types.UID, minTime time.Time) (*container.PodStatus, error) {
|
||||
|
@ -91,7 +91,7 @@ func (fv *FakeVersion) Compare(other string) (int, error) {
|
||||
}
|
||||
|
||||
type podsGetter interface {
|
||||
GetPods(bool) ([]*kubecontainer.Pod, error)
|
||||
GetPods(context.Context, bool) ([]*kubecontainer.Pod, error)
|
||||
}
|
||||
|
||||
type FakeRuntimeCache struct {
|
||||
@ -102,11 +102,11 @@ func NewFakeRuntimeCache(getter podsGetter) kubecontainer.RuntimeCache {
|
||||
return &FakeRuntimeCache{getter}
|
||||
}
|
||||
|
||||
func (f *FakeRuntimeCache) GetPods() ([]*kubecontainer.Pod, error) {
|
||||
return f.getter.GetPods(false)
|
||||
func (f *FakeRuntimeCache) GetPods(ctx context.Context) ([]*kubecontainer.Pod, error) {
|
||||
return f.getter.GetPods(ctx, false)
|
||||
}
|
||||
|
||||
func (f *FakeRuntimeCache) ForceUpdateIfOlder(time.Time) error {
|
||||
func (f *FakeRuntimeCache) ForceUpdateIfOlder(context.Context, time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -132,7 +132,7 @@ func (f *FakeRuntime) ClearCalls() {
|
||||
}
|
||||
|
||||
// UpdatePodCIDR fulfills the cri interface.
|
||||
func (f *FakeRuntime) UpdatePodCIDR(c string) error {
|
||||
func (f *FakeRuntime) UpdatePodCIDR(_ context.Context, c string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -179,7 +179,7 @@ func (f *FakeRuntime) Type() string {
|
||||
return f.RuntimeType
|
||||
}
|
||||
|
||||
func (f *FakeRuntime) Version() (kubecontainer.Version, error) {
|
||||
func (f *FakeRuntime) Version(_ context.Context) (kubecontainer.Version, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -195,7 +195,7 @@ func (f *FakeRuntime) APIVersion() (kubecontainer.Version, error) {
|
||||
return &FakeVersion{Version: f.APIVersionInfo}, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeRuntime) Status() (*kubecontainer.RuntimeStatus, error) {
|
||||
func (f *FakeRuntime) Status(_ context.Context) (*kubecontainer.RuntimeStatus, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -203,7 +203,7 @@ func (f *FakeRuntime) Status() (*kubecontainer.RuntimeStatus, error) {
|
||||
return f.RuntimeStatus, f.StatusErr
|
||||
}
|
||||
|
||||
func (f *FakeRuntime) GetPods(all bool) ([]*kubecontainer.Pod, error) {
|
||||
func (f *FakeRuntime) GetPods(_ context.Context, all bool) ([]*kubecontainer.Pod, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -222,7 +222,7 @@ func (f *FakeRuntime) GetPods(all bool) ([]*kubecontainer.Pod, error) {
|
||||
return pods, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeRuntime) SyncPod(pod *v1.Pod, _ *kubecontainer.PodStatus, _ []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
|
||||
func (f *FakeRuntime) SyncPod(_ context.Context, pod *v1.Pod, _ *kubecontainer.PodStatus, _ []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -238,7 +238,7 @@ func (f *FakeRuntime) SyncPod(pod *v1.Pod, _ *kubecontainer.PodStatus, _ []v1.Se
|
||||
return
|
||||
}
|
||||
|
||||
func (f *FakeRuntime) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
|
||||
func (f *FakeRuntime) KillPod(_ context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -276,7 +276,7 @@ func (f *FakeRuntime) KillContainerInPod(container v1.Container, pod *v1.Pod) er
|
||||
return f.Err
|
||||
}
|
||||
|
||||
func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
|
||||
func (f *FakeRuntime) GetPodStatus(_ context.Context, uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -293,7 +293,7 @@ func (f *FakeRuntime) GetContainerLogs(_ context.Context, pod *v1.Pod, container
|
||||
return f.Err
|
||||
}
|
||||
|
||||
func (f *FakeRuntime) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
func (f *FakeRuntime) PullImage(_ context.Context, image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -308,7 +308,7 @@ func (f *FakeRuntime) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.
|
||||
return image.Image, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeRuntime) GetImageRef(image kubecontainer.ImageSpec) (string, error) {
|
||||
func (f *FakeRuntime) GetImageRef(_ context.Context, image kubecontainer.ImageSpec) (string, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -321,7 +321,7 @@ func (f *FakeRuntime) GetImageRef(image kubecontainer.ImageSpec) (string, error)
|
||||
return "", f.InspectErr
|
||||
}
|
||||
|
||||
func (f *FakeRuntime) ListImages() ([]kubecontainer.Image, error) {
|
||||
func (f *FakeRuntime) ListImages(_ context.Context) ([]kubecontainer.Image, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -329,7 +329,7 @@ func (f *FakeRuntime) ListImages() ([]kubecontainer.Image, error) {
|
||||
return f.ImageList, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeRuntime) RemoveImage(image kubecontainer.ImageSpec) error {
|
||||
func (f *FakeRuntime) RemoveImage(_ context.Context, image kubecontainer.ImageSpec) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -346,7 +346,7 @@ func (f *FakeRuntime) RemoveImage(image kubecontainer.ImageSpec) error {
|
||||
return f.Err
|
||||
}
|
||||
|
||||
func (f *FakeRuntime) GarbageCollect(gcPolicy kubecontainer.GCPolicy, ready bool, evictNonDeletedPods bool) error {
|
||||
func (f *FakeRuntime) GarbageCollect(_ context.Context, gcPolicy kubecontainer.GCPolicy, ready bool, evictNonDeletedPods bool) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -354,7 +354,7 @@ func (f *FakeRuntime) GarbageCollect(gcPolicy kubecontainer.GCPolicy, ready bool
|
||||
return f.Err
|
||||
}
|
||||
|
||||
func (f *FakeRuntime) DeleteContainer(containerID kubecontainer.ContainerID) error {
|
||||
func (f *FakeRuntime) DeleteContainer(_ context.Context, containerID kubecontainer.ContainerID) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -362,7 +362,7 @@ func (f *FakeRuntime) DeleteContainer(containerID kubecontainer.ContainerID) err
|
||||
return f.Err
|
||||
}
|
||||
|
||||
func (f *FakeRuntime) CheckpointContainer(options *runtimeapi.CheckpointContainerRequest) error {
|
||||
func (f *FakeRuntime) CheckpointContainer(_ context.Context, options *runtimeapi.CheckpointContainerRequest) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -370,7 +370,7 @@ func (f *FakeRuntime) CheckpointContainer(options *runtimeapi.CheckpointContaine
|
||||
return f.Err
|
||||
}
|
||||
|
||||
func (f *FakeRuntime) ImageStats() (*kubecontainer.ImageStats, error) {
|
||||
func (f *FakeRuntime) ImageStats(_ context.Context) (*kubecontainer.ImageStats, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -378,7 +378,7 @@ func (f *FakeRuntime) ImageStats() (*kubecontainer.ImageStats, error) {
|
||||
return nil, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeStreamingRuntime) GetExec(id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
||||
func (f *FakeStreamingRuntime) GetExec(_ context.Context, id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -386,7 +386,7 @@ func (f *FakeStreamingRuntime) GetExec(id kubecontainer.ContainerID, cmd []strin
|
||||
return &url.URL{Host: FakeHost}, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeStreamingRuntime) GetAttach(id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
||||
func (f *FakeStreamingRuntime) GetAttach(_ context.Context, id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -394,7 +394,7 @@ func (f *FakeStreamingRuntime) GetAttach(id kubecontainer.ContainerID, stdin, st
|
||||
return &url.URL{Host: FakeHost}, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeStreamingRuntime) GetPortForward(podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) {
|
||||
func (f *FakeStreamingRuntime) GetPortForward(_ context.Context, podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -414,7 +414,7 @@ type FakeContainerCommandRunner struct {
|
||||
|
||||
var _ kubecontainer.CommandRunner = &FakeContainerCommandRunner{}
|
||||
|
||||
func (f *FakeContainerCommandRunner) RunInContainer(containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
|
||||
func (f *FakeContainerCommandRunner) RunInContainer(_ context.Context, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
|
||||
// record invoked values
|
||||
f.ContainerID = containerID
|
||||
f.Cmd = cmd
|
||||
|
@ -17,7 +17,9 @@ limitations under the License.
|
||||
package testing
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
@ -34,7 +36,7 @@ type FakeRuntimeHelper struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (f *FakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) {
|
||||
func (f *FakeRuntimeHelper) GenerateRunContainerOptions(_ context.Context, pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) {
|
||||
var opts kubecontainer.RunContainerOptions
|
||||
if len(container.TerminationMessagePath) != 0 {
|
||||
opts.PodContainerDir = f.PodContainerDir
|
||||
|
@ -21,6 +21,7 @@ limitations under the License.
|
||||
package testing
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
time "time"
|
||||
|
||||
@ -52,32 +53,32 @@ func (m *MockRuntimeCache) EXPECT() *MockRuntimeCacheMockRecorder {
|
||||
}
|
||||
|
||||
// ForceUpdateIfOlder mocks base method.
|
||||
func (m *MockRuntimeCache) ForceUpdateIfOlder(arg0 time.Time) error {
|
||||
func (m *MockRuntimeCache) ForceUpdateIfOlder(arg0 context.Context, arg1 time.Time) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ForceUpdateIfOlder", arg0)
|
||||
ret := m.ctrl.Call(m, "ForceUpdateIfOlder", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ForceUpdateIfOlder indicates an expected call of ForceUpdateIfOlder.
|
||||
func (mr *MockRuntimeCacheMockRecorder) ForceUpdateIfOlder(arg0 interface{}) *gomock.Call {
|
||||
func (mr *MockRuntimeCacheMockRecorder) ForceUpdateIfOlder(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceUpdateIfOlder", reflect.TypeOf((*MockRuntimeCache)(nil).ForceUpdateIfOlder), arg0)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceUpdateIfOlder", reflect.TypeOf((*MockRuntimeCache)(nil).ForceUpdateIfOlder), arg0, arg1)
|
||||
}
|
||||
|
||||
// GetPods mocks base method.
|
||||
func (m *MockRuntimeCache) GetPods() ([]*container.Pod, error) {
|
||||
func (m *MockRuntimeCache) GetPods(arg0 context.Context) ([]*container.Pod, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetPods")
|
||||
ret := m.ctrl.Call(m, "GetPods", arg0)
|
||||
ret0, _ := ret[0].([]*container.Pod)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetPods indicates an expected call of GetPods.
|
||||
func (mr *MockRuntimeCacheMockRecorder) GetPods() *gomock.Call {
|
||||
func (mr *MockRuntimeCacheMockRecorder) GetPods(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockRuntimeCache)(nil).GetPods))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockRuntimeCache)(nil).GetPods), arg0)
|
||||
}
|
||||
|
||||
// MockpodsGetter is a mock of podsGetter interface.
|
||||
@ -104,16 +105,16 @@ func (m *MockpodsGetter) EXPECT() *MockpodsGetterMockRecorder {
|
||||
}
|
||||
|
||||
// GetPods mocks base method.
|
||||
func (m *MockpodsGetter) GetPods(arg0 bool) ([]*container.Pod, error) {
|
||||
func (m *MockpodsGetter) GetPods(arg0 context.Context, arg1 bool) ([]*container.Pod, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetPods", arg0)
|
||||
ret := m.ctrl.Call(m, "GetPods", arg0, arg1)
|
||||
ret0, _ := ret[0].([]*container.Pod)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetPods indicates an expected call of GetPods.
|
||||
func (mr *MockpodsGetterMockRecorder) GetPods(arg0 interface{}) *gomock.Call {
|
||||
func (mr *MockpodsGetterMockRecorder) GetPods(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockpodsGetter)(nil).GetPods), arg0)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockpodsGetter)(nil).GetPods), arg0, arg1)
|
||||
}
|
||||
|
@ -127,45 +127,45 @@ func (mr *MockRuntimeMockRecorder) APIVersion() *gomock.Call {
|
||||
}
|
||||
|
||||
// CheckpointContainer mocks base method.
|
||||
func (m *MockRuntime) CheckpointContainer(options *v10.CheckpointContainerRequest) error {
|
||||
func (m *MockRuntime) CheckpointContainer(ctx context.Context, options *v10.CheckpointContainerRequest) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "CheckpointContainer", options)
|
||||
ret := m.ctrl.Call(m, "CheckpointContainer", ctx, options)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// CheckpointContainer indicates an expected call of CheckpointContainer.
|
||||
func (mr *MockRuntimeMockRecorder) CheckpointContainer(options interface{}) *gomock.Call {
|
||||
func (mr *MockRuntimeMockRecorder) CheckpointContainer(ctx, options interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckpointContainer", reflect.TypeOf((*MockRuntime)(nil).CheckpointContainer), options)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckpointContainer", reflect.TypeOf((*MockRuntime)(nil).CheckpointContainer), ctx, options)
|
||||
}
|
||||
|
||||
// DeleteContainer mocks base method.
|
||||
func (m *MockRuntime) DeleteContainer(containerID container.ContainerID) error {
|
||||
func (m *MockRuntime) DeleteContainer(ctx context.Context, containerID container.ContainerID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteContainer", containerID)
|
||||
ret := m.ctrl.Call(m, "DeleteContainer", ctx, containerID)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// DeleteContainer indicates an expected call of DeleteContainer.
|
||||
func (mr *MockRuntimeMockRecorder) DeleteContainer(containerID interface{}) *gomock.Call {
|
||||
func (mr *MockRuntimeMockRecorder) DeleteContainer(ctx, containerID interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteContainer", reflect.TypeOf((*MockRuntime)(nil).DeleteContainer), containerID)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteContainer", reflect.TypeOf((*MockRuntime)(nil).DeleteContainer), ctx, containerID)
|
||||
}
|
||||
|
||||
// GarbageCollect mocks base method.
|
||||
func (m *MockRuntime) GarbageCollect(gcPolicy container.GCPolicy, allSourcesReady, evictNonDeletedPods bool) error {
|
||||
func (m *MockRuntime) GarbageCollect(ctx context.Context, gcPolicy container.GCPolicy, allSourcesReady, evictNonDeletedPods bool) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GarbageCollect", gcPolicy, allSourcesReady, evictNonDeletedPods)
|
||||
ret := m.ctrl.Call(m, "GarbageCollect", ctx, gcPolicy, allSourcesReady, evictNonDeletedPods)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GarbageCollect indicates an expected call of GarbageCollect.
|
||||
func (mr *MockRuntimeMockRecorder) GarbageCollect(gcPolicy, allSourcesReady, evictNonDeletedPods interface{}) *gomock.Call {
|
||||
func (mr *MockRuntimeMockRecorder) GarbageCollect(ctx, gcPolicy, allSourcesReady, evictNonDeletedPods interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GarbageCollect", reflect.TypeOf((*MockRuntime)(nil).GarbageCollect), gcPolicy, allSourcesReady, evictNonDeletedPods)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GarbageCollect", reflect.TypeOf((*MockRuntime)(nil).GarbageCollect), ctx, gcPolicy, allSourcesReady, evictNonDeletedPods)
|
||||
}
|
||||
|
||||
// GetContainerLogs mocks base method.
|
||||
@ -183,150 +183,150 @@ func (mr *MockRuntimeMockRecorder) GetContainerLogs(ctx, pod, containerID, logOp
|
||||
}
|
||||
|
||||
// GetImageRef mocks base method.
|
||||
func (m *MockRuntime) GetImageRef(image container.ImageSpec) (string, error) {
|
||||
func (m *MockRuntime) GetImageRef(ctx context.Context, image container.ImageSpec) (string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetImageRef", image)
|
||||
ret := m.ctrl.Call(m, "GetImageRef", ctx, image)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetImageRef indicates an expected call of GetImageRef.
|
||||
func (mr *MockRuntimeMockRecorder) GetImageRef(image interface{}) *gomock.Call {
|
||||
func (mr *MockRuntimeMockRecorder) GetImageRef(ctx, image interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageRef", reflect.TypeOf((*MockRuntime)(nil).GetImageRef), image)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageRef", reflect.TypeOf((*MockRuntime)(nil).GetImageRef), ctx, image)
|
||||
}
|
||||
|
||||
// GetPodStatus mocks base method.
|
||||
func (m *MockRuntime) GetPodStatus(uid types.UID, name, namespace string) (*container.PodStatus, error) {
|
||||
func (m *MockRuntime) GetPodStatus(ctx context.Context, uid types.UID, name, namespace string) (*container.PodStatus, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetPodStatus", uid, name, namespace)
|
||||
ret := m.ctrl.Call(m, "GetPodStatus", ctx, uid, name, namespace)
|
||||
ret0, _ := ret[0].(*container.PodStatus)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetPodStatus indicates an expected call of GetPodStatus.
|
||||
func (mr *MockRuntimeMockRecorder) GetPodStatus(uid, name, namespace interface{}) *gomock.Call {
|
||||
func (mr *MockRuntimeMockRecorder) GetPodStatus(ctx, uid, name, namespace interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodStatus", reflect.TypeOf((*MockRuntime)(nil).GetPodStatus), uid, name, namespace)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodStatus", reflect.TypeOf((*MockRuntime)(nil).GetPodStatus), ctx, uid, name, namespace)
|
||||
}
|
||||
|
||||
// GetPods mocks base method.
|
||||
func (m *MockRuntime) GetPods(all bool) ([]*container.Pod, error) {
|
||||
func (m *MockRuntime) GetPods(ctx context.Context, all bool) ([]*container.Pod, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetPods", all)
|
||||
ret := m.ctrl.Call(m, "GetPods", ctx, all)
|
||||
ret0, _ := ret[0].([]*container.Pod)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetPods indicates an expected call of GetPods.
|
||||
func (mr *MockRuntimeMockRecorder) GetPods(all interface{}) *gomock.Call {
|
||||
func (mr *MockRuntimeMockRecorder) GetPods(ctx, all interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockRuntime)(nil).GetPods), all)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockRuntime)(nil).GetPods), ctx, all)
|
||||
}
|
||||
|
||||
// ImageStats mocks base method.
|
||||
func (m *MockRuntime) ImageStats() (*container.ImageStats, error) {
|
||||
func (m *MockRuntime) ImageStats(ctx context.Context) (*container.ImageStats, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ImageStats")
|
||||
ret := m.ctrl.Call(m, "ImageStats", ctx)
|
||||
ret0, _ := ret[0].(*container.ImageStats)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ImageStats indicates an expected call of ImageStats.
|
||||
func (mr *MockRuntimeMockRecorder) ImageStats() *gomock.Call {
|
||||
func (mr *MockRuntimeMockRecorder) ImageStats(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageStats", reflect.TypeOf((*MockRuntime)(nil).ImageStats))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageStats", reflect.TypeOf((*MockRuntime)(nil).ImageStats), ctx)
|
||||
}
|
||||
|
||||
// KillPod mocks base method.
|
||||
func (m *MockRuntime) KillPod(pod *v1.Pod, runningPod container.Pod, gracePeriodOverride *int64) error {
|
||||
func (m *MockRuntime) KillPod(ctx context.Context, pod *v1.Pod, runningPod container.Pod, gracePeriodOverride *int64) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "KillPod", pod, runningPod, gracePeriodOverride)
|
||||
ret := m.ctrl.Call(m, "KillPod", ctx, pod, runningPod, gracePeriodOverride)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// KillPod indicates an expected call of KillPod.
|
||||
func (mr *MockRuntimeMockRecorder) KillPod(pod, runningPod, gracePeriodOverride interface{}) *gomock.Call {
|
||||
func (mr *MockRuntimeMockRecorder) KillPod(ctx, pod, runningPod, gracePeriodOverride interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KillPod", reflect.TypeOf((*MockRuntime)(nil).KillPod), pod, runningPod, gracePeriodOverride)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KillPod", reflect.TypeOf((*MockRuntime)(nil).KillPod), ctx, pod, runningPod, gracePeriodOverride)
|
||||
}
|
||||
|
||||
// ListImages mocks base method.
|
||||
func (m *MockRuntime) ListImages() ([]container.Image, error) {
|
||||
func (m *MockRuntime) ListImages(ctx context.Context) ([]container.Image, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ListImages")
|
||||
ret := m.ctrl.Call(m, "ListImages", ctx)
|
||||
ret0, _ := ret[0].([]container.Image)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ListImages indicates an expected call of ListImages.
|
||||
func (mr *MockRuntimeMockRecorder) ListImages() *gomock.Call {
|
||||
func (mr *MockRuntimeMockRecorder) ListImages(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImages", reflect.TypeOf((*MockRuntime)(nil).ListImages))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImages", reflect.TypeOf((*MockRuntime)(nil).ListImages), ctx)
|
||||
}
|
||||
|
||||
// PullImage mocks base method.
|
||||
func (m *MockRuntime) PullImage(image container.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *v10.PodSandboxConfig) (string, error) {
|
||||
func (m *MockRuntime) PullImage(ctx context.Context, image container.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *v10.PodSandboxConfig) (string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "PullImage", image, pullSecrets, podSandboxConfig)
|
||||
ret := m.ctrl.Call(m, "PullImage", ctx, image, pullSecrets, podSandboxConfig)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// PullImage indicates an expected call of PullImage.
|
||||
func (mr *MockRuntimeMockRecorder) PullImage(image, pullSecrets, podSandboxConfig interface{}) *gomock.Call {
|
||||
func (mr *MockRuntimeMockRecorder) PullImage(ctx, image, pullSecrets, podSandboxConfig interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockRuntime)(nil).PullImage), image, pullSecrets, podSandboxConfig)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockRuntime)(nil).PullImage), ctx, image, pullSecrets, podSandboxConfig)
|
||||
}
|
||||
|
||||
// RemoveImage mocks base method.
|
||||
func (m *MockRuntime) RemoveImage(image container.ImageSpec) error {
|
||||
func (m *MockRuntime) RemoveImage(ctx context.Context, image container.ImageSpec) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "RemoveImage", image)
|
||||
ret := m.ctrl.Call(m, "RemoveImage", ctx, image)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// RemoveImage indicates an expected call of RemoveImage.
|
||||
func (mr *MockRuntimeMockRecorder) RemoveImage(image interface{}) *gomock.Call {
|
||||
func (mr *MockRuntimeMockRecorder) RemoveImage(ctx, image interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveImage", reflect.TypeOf((*MockRuntime)(nil).RemoveImage), image)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveImage", reflect.TypeOf((*MockRuntime)(nil).RemoveImage), ctx, image)
|
||||
}
|
||||
|
||||
// Status mocks base method.
|
||||
func (m *MockRuntime) Status() (*container.RuntimeStatus, error) {
|
||||
func (m *MockRuntime) Status(ctx context.Context) (*container.RuntimeStatus, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Status")
|
||||
ret := m.ctrl.Call(m, "Status", ctx)
|
||||
ret0, _ := ret[0].(*container.RuntimeStatus)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Status indicates an expected call of Status.
|
||||
func (mr *MockRuntimeMockRecorder) Status() *gomock.Call {
|
||||
func (mr *MockRuntimeMockRecorder) Status(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockRuntime)(nil).Status))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockRuntime)(nil).Status), ctx)
|
||||
}
|
||||
|
||||
// SyncPod mocks base method.
|
||||
func (m *MockRuntime) SyncPod(pod *v1.Pod, podStatus *container.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) container.PodSyncResult {
|
||||
func (m *MockRuntime) SyncPod(ctx context.Context, pod *v1.Pod, podStatus *container.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) container.PodSyncResult {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SyncPod", pod, podStatus, pullSecrets, backOff)
|
||||
ret := m.ctrl.Call(m, "SyncPod", ctx, pod, podStatus, pullSecrets, backOff)
|
||||
ret0, _ := ret[0].(container.PodSyncResult)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// SyncPod indicates an expected call of SyncPod.
|
||||
func (mr *MockRuntimeMockRecorder) SyncPod(pod, podStatus, pullSecrets, backOff interface{}) *gomock.Call {
|
||||
func (mr *MockRuntimeMockRecorder) SyncPod(ctx, pod, podStatus, pullSecrets, backOff interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPod", reflect.TypeOf((*MockRuntime)(nil).SyncPod), pod, podStatus, pullSecrets, backOff)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPod", reflect.TypeOf((*MockRuntime)(nil).SyncPod), ctx, pod, podStatus, pullSecrets, backOff)
|
||||
}
|
||||
|
||||
// Type mocks base method.
|
||||
@ -344,32 +344,32 @@ func (mr *MockRuntimeMockRecorder) Type() *gomock.Call {
|
||||
}
|
||||
|
||||
// UpdatePodCIDR mocks base method.
|
||||
func (m *MockRuntime) UpdatePodCIDR(podCIDR string) error {
|
||||
func (m *MockRuntime) UpdatePodCIDR(ctx context.Context, podCIDR string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpdatePodCIDR", podCIDR)
|
||||
ret := m.ctrl.Call(m, "UpdatePodCIDR", ctx, podCIDR)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// UpdatePodCIDR indicates an expected call of UpdatePodCIDR.
|
||||
func (mr *MockRuntimeMockRecorder) UpdatePodCIDR(podCIDR interface{}) *gomock.Call {
|
||||
func (mr *MockRuntimeMockRecorder) UpdatePodCIDR(ctx, podCIDR interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePodCIDR", reflect.TypeOf((*MockRuntime)(nil).UpdatePodCIDR), podCIDR)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePodCIDR", reflect.TypeOf((*MockRuntime)(nil).UpdatePodCIDR), ctx, podCIDR)
|
||||
}
|
||||
|
||||
// Version mocks base method.
|
||||
func (m *MockRuntime) Version() (container.Version, error) {
|
||||
func (m *MockRuntime) Version(ctx context.Context) (container.Version, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Version")
|
||||
ret := m.ctrl.Call(m, "Version", ctx)
|
||||
ret0, _ := ret[0].(container.Version)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Version indicates an expected call of Version.
|
||||
func (mr *MockRuntimeMockRecorder) Version() *gomock.Call {
|
||||
func (mr *MockRuntimeMockRecorder) Version(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockRuntime)(nil).Version))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockRuntime)(nil).Version), ctx)
|
||||
}
|
||||
|
||||
// MockStreamingRuntime is a mock of StreamingRuntime interface.
|
||||
@ -396,48 +396,48 @@ func (m *MockStreamingRuntime) EXPECT() *MockStreamingRuntimeMockRecorder {
|
||||
}
|
||||
|
||||
// GetAttach mocks base method.
|
||||
func (m *MockStreamingRuntime) GetAttach(id container.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
||||
func (m *MockStreamingRuntime) GetAttach(ctx context.Context, id container.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetAttach", id, stdin, stdout, stderr, tty)
|
||||
ret := m.ctrl.Call(m, "GetAttach", ctx, id, stdin, stdout, stderr, tty)
|
||||
ret0, _ := ret[0].(*url.URL)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetAttach indicates an expected call of GetAttach.
|
||||
func (mr *MockStreamingRuntimeMockRecorder) GetAttach(id, stdin, stdout, stderr, tty interface{}) *gomock.Call {
|
||||
func (mr *MockStreamingRuntimeMockRecorder) GetAttach(ctx, id, stdin, stdout, stderr, tty interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAttach", reflect.TypeOf((*MockStreamingRuntime)(nil).GetAttach), id, stdin, stdout, stderr, tty)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAttach", reflect.TypeOf((*MockStreamingRuntime)(nil).GetAttach), ctx, id, stdin, stdout, stderr, tty)
|
||||
}
|
||||
|
||||
// GetExec mocks base method.
|
||||
func (m *MockStreamingRuntime) GetExec(id container.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
||||
func (m *MockStreamingRuntime) GetExec(ctx context.Context, id container.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetExec", id, cmd, stdin, stdout, stderr, tty)
|
||||
ret := m.ctrl.Call(m, "GetExec", ctx, id, cmd, stdin, stdout, stderr, tty)
|
||||
ret0, _ := ret[0].(*url.URL)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetExec indicates an expected call of GetExec.
|
||||
func (mr *MockStreamingRuntimeMockRecorder) GetExec(id, cmd, stdin, stdout, stderr, tty interface{}) *gomock.Call {
|
||||
func (mr *MockStreamingRuntimeMockRecorder) GetExec(ctx, id, cmd, stdin, stdout, stderr, tty interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExec", reflect.TypeOf((*MockStreamingRuntime)(nil).GetExec), id, cmd, stdin, stdout, stderr, tty)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExec", reflect.TypeOf((*MockStreamingRuntime)(nil).GetExec), ctx, id, cmd, stdin, stdout, stderr, tty)
|
||||
}
|
||||
|
||||
// GetPortForward mocks base method.
|
||||
func (m *MockStreamingRuntime) GetPortForward(podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) {
|
||||
func (m *MockStreamingRuntime) GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetPortForward", podName, podNamespace, podUID, ports)
|
||||
ret := m.ctrl.Call(m, "GetPortForward", ctx, podName, podNamespace, podUID, ports)
|
||||
ret0, _ := ret[0].(*url.URL)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetPortForward indicates an expected call of GetPortForward.
|
||||
func (mr *MockStreamingRuntimeMockRecorder) GetPortForward(podName, podNamespace, podUID, ports interface{}) *gomock.Call {
|
||||
func (mr *MockStreamingRuntimeMockRecorder) GetPortForward(ctx, podName, podNamespace, podUID, ports interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPortForward", reflect.TypeOf((*MockStreamingRuntime)(nil).GetPortForward), podName, podNamespace, podUID, ports)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPortForward", reflect.TypeOf((*MockStreamingRuntime)(nil).GetPortForward), ctx, podName, podNamespace, podUID, ports)
|
||||
}
|
||||
|
||||
// MockImageService is a mock of ImageService interface.
|
||||
@ -464,77 +464,77 @@ func (m *MockImageService) EXPECT() *MockImageServiceMockRecorder {
|
||||
}
|
||||
|
||||
// GetImageRef mocks base method.
|
||||
func (m *MockImageService) GetImageRef(image container.ImageSpec) (string, error) {
|
||||
func (m *MockImageService) GetImageRef(ctx context.Context, image container.ImageSpec) (string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetImageRef", image)
|
||||
ret := m.ctrl.Call(m, "GetImageRef", ctx, image)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetImageRef indicates an expected call of GetImageRef.
|
||||
func (mr *MockImageServiceMockRecorder) GetImageRef(image interface{}) *gomock.Call {
|
||||
func (mr *MockImageServiceMockRecorder) GetImageRef(ctx, image interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageRef", reflect.TypeOf((*MockImageService)(nil).GetImageRef), image)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageRef", reflect.TypeOf((*MockImageService)(nil).GetImageRef), ctx, image)
|
||||
}
|
||||
|
||||
// ImageStats mocks base method.
|
||||
func (m *MockImageService) ImageStats() (*container.ImageStats, error) {
|
||||
func (m *MockImageService) ImageStats(ctx context.Context) (*container.ImageStats, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ImageStats")
|
||||
ret := m.ctrl.Call(m, "ImageStats", ctx)
|
||||
ret0, _ := ret[0].(*container.ImageStats)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ImageStats indicates an expected call of ImageStats.
|
||||
func (mr *MockImageServiceMockRecorder) ImageStats() *gomock.Call {
|
||||
func (mr *MockImageServiceMockRecorder) ImageStats(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageStats", reflect.TypeOf((*MockImageService)(nil).ImageStats))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageStats", reflect.TypeOf((*MockImageService)(nil).ImageStats), ctx)
|
||||
}
|
||||
|
||||
// ListImages mocks base method.
|
||||
func (m *MockImageService) ListImages() ([]container.Image, error) {
|
||||
func (m *MockImageService) ListImages(ctx context.Context) ([]container.Image, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ListImages")
|
||||
ret := m.ctrl.Call(m, "ListImages", ctx)
|
||||
ret0, _ := ret[0].([]container.Image)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ListImages indicates an expected call of ListImages.
|
||||
func (mr *MockImageServiceMockRecorder) ListImages() *gomock.Call {
|
||||
func (mr *MockImageServiceMockRecorder) ListImages(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImages", reflect.TypeOf((*MockImageService)(nil).ListImages))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImages", reflect.TypeOf((*MockImageService)(nil).ListImages), ctx)
|
||||
}
|
||||
|
||||
// PullImage mocks base method.
|
||||
func (m *MockImageService) PullImage(image container.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *v10.PodSandboxConfig) (string, error) {
|
||||
func (m *MockImageService) PullImage(ctx context.Context, image container.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *v10.PodSandboxConfig) (string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "PullImage", image, pullSecrets, podSandboxConfig)
|
||||
ret := m.ctrl.Call(m, "PullImage", ctx, image, pullSecrets, podSandboxConfig)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// PullImage indicates an expected call of PullImage.
|
||||
func (mr *MockImageServiceMockRecorder) PullImage(image, pullSecrets, podSandboxConfig interface{}) *gomock.Call {
|
||||
func (mr *MockImageServiceMockRecorder) PullImage(ctx, image, pullSecrets, podSandboxConfig interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockImageService)(nil).PullImage), image, pullSecrets, podSandboxConfig)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockImageService)(nil).PullImage), ctx, image, pullSecrets, podSandboxConfig)
|
||||
}
|
||||
|
||||
// RemoveImage mocks base method.
|
||||
func (m *MockImageService) RemoveImage(image container.ImageSpec) error {
|
||||
func (m *MockImageService) RemoveImage(ctx context.Context, image container.ImageSpec) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "RemoveImage", image)
|
||||
ret := m.ctrl.Call(m, "RemoveImage", ctx, image)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// RemoveImage indicates an expected call of RemoveImage.
|
||||
func (mr *MockImageServiceMockRecorder) RemoveImage(image interface{}) *gomock.Call {
|
||||
func (mr *MockImageServiceMockRecorder) RemoveImage(ctx, image interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveImage", reflect.TypeOf((*MockImageService)(nil).RemoveImage), image)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveImage", reflect.TypeOf((*MockImageService)(nil).RemoveImage), ctx, image)
|
||||
}
|
||||
|
||||
// MockAttacher is a mock of Attacher interface.
|
||||
@ -561,17 +561,17 @@ func (m *MockAttacher) EXPECT() *MockAttacherMockRecorder {
|
||||
}
|
||||
|
||||
// AttachContainer mocks base method.
|
||||
func (m *MockAttacher) AttachContainer(id container.ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
func (m *MockAttacher) AttachContainer(ctx context.Context, id container.ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "AttachContainer", id, stdin, stdout, stderr, tty, resize)
|
||||
ret := m.ctrl.Call(m, "AttachContainer", ctx, id, stdin, stdout, stderr, tty, resize)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// AttachContainer indicates an expected call of AttachContainer.
|
||||
func (mr *MockAttacherMockRecorder) AttachContainer(id, stdin, stdout, stderr, tty, resize interface{}) *gomock.Call {
|
||||
func (mr *MockAttacherMockRecorder) AttachContainer(ctx, id, stdin, stdout, stderr, tty, resize interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachContainer", reflect.TypeOf((*MockAttacher)(nil).AttachContainer), id, stdin, stdout, stderr, tty, resize)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachContainer", reflect.TypeOf((*MockAttacher)(nil).AttachContainer), ctx, id, stdin, stdout, stderr, tty, resize)
|
||||
}
|
||||
|
||||
// MockCommandRunner is a mock of CommandRunner interface.
|
||||
@ -598,16 +598,16 @@ func (m *MockCommandRunner) EXPECT() *MockCommandRunnerMockRecorder {
|
||||
}
|
||||
|
||||
// RunInContainer mocks base method.
|
||||
func (m *MockCommandRunner) RunInContainer(id container.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
|
||||
func (m *MockCommandRunner) RunInContainer(ctx context.Context, id container.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "RunInContainer", id, cmd, timeout)
|
||||
ret := m.ctrl.Call(m, "RunInContainer", ctx, id, cmd, timeout)
|
||||
ret0, _ := ret[0].([]byte)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// RunInContainer indicates an expected call of RunInContainer.
|
||||
func (mr *MockCommandRunnerMockRecorder) RunInContainer(id, cmd, timeout interface{}) *gomock.Call {
|
||||
func (mr *MockCommandRunnerMockRecorder) RunInContainer(ctx, id, cmd, timeout interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunInContainer", reflect.TypeOf((*MockCommandRunner)(nil).RunInContainer), id, cmd, timeout)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunInContainer", reflect.TypeOf((*MockCommandRunner)(nil).RunInContainer), ctx, id, cmd, timeout)
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
|
||||
// ListImages lists existing images.
|
||||
func (f *RemoteRuntime) ListImages(ctx context.Context, req *kubeapi.ListImagesRequest) (*kubeapi.ListImagesResponse, error) {
|
||||
images, err := f.ImageService.ListImages(req.Filter)
|
||||
images, err := f.ImageService.ListImages(ctx, req.Filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -38,7 +38,7 @@ func (f *RemoteRuntime) ListImages(ctx context.Context, req *kubeapi.ListImagesR
|
||||
// present, returns a response with ImageStatusResponse.Image set to
|
||||
// nil.
|
||||
func (f *RemoteRuntime) ImageStatus(ctx context.Context, req *kubeapi.ImageStatusRequest) (*kubeapi.ImageStatusResponse, error) {
|
||||
resp, err := f.ImageService.ImageStatus(req.Image, false)
|
||||
resp, err := f.ImageService.ImageStatus(ctx, req.Image, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -48,7 +48,7 @@ func (f *RemoteRuntime) ImageStatus(ctx context.Context, req *kubeapi.ImageStatu
|
||||
|
||||
// PullImage pulls an image with authentication config.
|
||||
func (f *RemoteRuntime) PullImage(ctx context.Context, req *kubeapi.PullImageRequest) (*kubeapi.PullImageResponse, error) {
|
||||
image, err := f.ImageService.PullImage(req.Image, req.Auth, req.SandboxConfig)
|
||||
image, err := f.ImageService.PullImage(ctx, req.Image, req.Auth, req.SandboxConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -62,7 +62,7 @@ func (f *RemoteRuntime) PullImage(ctx context.Context, req *kubeapi.PullImageReq
|
||||
// This call is idempotent, and must not return an error if the image has
|
||||
// already been removed.
|
||||
func (f *RemoteRuntime) RemoveImage(ctx context.Context, req *kubeapi.RemoveImageRequest) (*kubeapi.RemoveImageResponse, error) {
|
||||
err := f.ImageService.RemoveImage(req.Image)
|
||||
err := f.ImageService.RemoveImage(ctx, req.Image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -72,7 +72,7 @@ func (f *RemoteRuntime) RemoveImage(ctx context.Context, req *kubeapi.RemoveImag
|
||||
|
||||
// ImageFsInfo returns information of the filesystem that is used to store images.
|
||||
func (f *RemoteRuntime) ImageFsInfo(ctx context.Context, req *kubeapi.ImageFsInfoRequest) (*kubeapi.ImageFsInfoResponse, error) {
|
||||
fsUsage, err := f.ImageService.ImageFsInfo()
|
||||
fsUsage, err := f.ImageService.ImageFsInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -80,13 +80,13 @@ func (f *RemoteRuntime) Stop() {
|
||||
|
||||
// Version returns the runtime name, runtime version, and runtime API version.
|
||||
func (f *RemoteRuntime) Version(ctx context.Context, req *kubeapi.VersionRequest) (*kubeapi.VersionResponse, error) {
|
||||
return f.RuntimeService.Version(req.Version)
|
||||
return f.RuntimeService.Version(ctx, req.Version)
|
||||
}
|
||||
|
||||
// RunPodSandbox creates and starts a pod-level sandbox. Runtimes must ensure
|
||||
// the sandbox is in the ready state on success.
|
||||
func (f *RemoteRuntime) RunPodSandbox(ctx context.Context, req *kubeapi.RunPodSandboxRequest) (*kubeapi.RunPodSandboxResponse, error) {
|
||||
sandboxID, err := f.RuntimeService.RunPodSandbox(req.Config, req.RuntimeHandler)
|
||||
sandboxID, err := f.RuntimeService.RunPodSandbox(ctx, req.Config, req.RuntimeHandler)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -99,7 +99,7 @@ func (f *RemoteRuntime) RunPodSandbox(ctx context.Context, req *kubeapi.RunPodSa
|
||||
// If there are any running containers in the sandbox, they must be forcibly
|
||||
// terminated.
|
||||
func (f *RemoteRuntime) StopPodSandbox(ctx context.Context, req *kubeapi.StopPodSandboxRequest) (*kubeapi.StopPodSandboxResponse, error) {
|
||||
err := f.RuntimeService.StopPodSandbox(req.PodSandboxId)
|
||||
err := f.RuntimeService.StopPodSandbox(ctx, req.PodSandboxId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -112,7 +112,7 @@ func (f *RemoteRuntime) StopPodSandbox(ctx context.Context, req *kubeapi.StopPod
|
||||
// This call is idempotent, and must not return an error if the sandbox has
|
||||
// already been removed.
|
||||
func (f *RemoteRuntime) RemovePodSandbox(ctx context.Context, req *kubeapi.RemovePodSandboxRequest) (*kubeapi.RemovePodSandboxResponse, error) {
|
||||
err := f.RuntimeService.StopPodSandbox(req.PodSandboxId)
|
||||
err := f.RuntimeService.StopPodSandbox(ctx, req.PodSandboxId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -123,7 +123,7 @@ func (f *RemoteRuntime) RemovePodSandbox(ctx context.Context, req *kubeapi.Remov
|
||||
// PodSandboxStatus returns the status of the PodSandbox. If the PodSandbox is not
|
||||
// present, returns an error.
|
||||
func (f *RemoteRuntime) PodSandboxStatus(ctx context.Context, req *kubeapi.PodSandboxStatusRequest) (*kubeapi.PodSandboxStatusResponse, error) {
|
||||
resp, err := f.RuntimeService.PodSandboxStatus(req.PodSandboxId, false)
|
||||
resp, err := f.RuntimeService.PodSandboxStatus(ctx, req.PodSandboxId, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -133,7 +133,7 @@ func (f *RemoteRuntime) PodSandboxStatus(ctx context.Context, req *kubeapi.PodSa
|
||||
|
||||
// ListPodSandbox returns a list of PodSandboxes.
|
||||
func (f *RemoteRuntime) ListPodSandbox(ctx context.Context, req *kubeapi.ListPodSandboxRequest) (*kubeapi.ListPodSandboxResponse, error) {
|
||||
items, err := f.RuntimeService.ListPodSandbox(req.Filter)
|
||||
items, err := f.RuntimeService.ListPodSandbox(ctx, req.Filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -143,7 +143,7 @@ func (f *RemoteRuntime) ListPodSandbox(ctx context.Context, req *kubeapi.ListPod
|
||||
|
||||
// CreateContainer creates a new container in specified PodSandbox
|
||||
func (f *RemoteRuntime) CreateContainer(ctx context.Context, req *kubeapi.CreateContainerRequest) (*kubeapi.CreateContainerResponse, error) {
|
||||
containerID, err := f.RuntimeService.CreateContainer(req.PodSandboxId, req.Config, req.SandboxConfig)
|
||||
containerID, err := f.RuntimeService.CreateContainer(ctx, req.PodSandboxId, req.Config, req.SandboxConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -153,7 +153,7 @@ func (f *RemoteRuntime) CreateContainer(ctx context.Context, req *kubeapi.Create
|
||||
|
||||
// StartContainer starts the container.
|
||||
func (f *RemoteRuntime) StartContainer(ctx context.Context, req *kubeapi.StartContainerRequest) (*kubeapi.StartContainerResponse, error) {
|
||||
err := f.RuntimeService.StartContainer(req.ContainerId)
|
||||
err := f.RuntimeService.StartContainer(ctx, req.ContainerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -165,7 +165,7 @@ func (f *RemoteRuntime) StartContainer(ctx context.Context, req *kubeapi.StartCo
|
||||
// This call is idempotent, and must not return an error if the container has
|
||||
// already been stopped.
|
||||
func (f *RemoteRuntime) StopContainer(ctx context.Context, req *kubeapi.StopContainerRequest) (*kubeapi.StopContainerResponse, error) {
|
||||
err := f.RuntimeService.StopContainer(req.ContainerId, req.Timeout)
|
||||
err := f.RuntimeService.StopContainer(ctx, req.ContainerId, req.Timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -178,7 +178,7 @@ func (f *RemoteRuntime) StopContainer(ctx context.Context, req *kubeapi.StopCont
|
||||
// This call is idempotent, and must not return an error if the container has
|
||||
// already been removed.
|
||||
func (f *RemoteRuntime) RemoveContainer(ctx context.Context, req *kubeapi.RemoveContainerRequest) (*kubeapi.RemoveContainerResponse, error) {
|
||||
err := f.RuntimeService.RemoveContainer(req.ContainerId)
|
||||
err := f.RuntimeService.RemoveContainer(ctx, req.ContainerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -188,7 +188,7 @@ func (f *RemoteRuntime) RemoveContainer(ctx context.Context, req *kubeapi.Remove
|
||||
|
||||
// ListContainers lists all containers by filters.
|
||||
func (f *RemoteRuntime) ListContainers(ctx context.Context, req *kubeapi.ListContainersRequest) (*kubeapi.ListContainersResponse, error) {
|
||||
items, err := f.RuntimeService.ListContainers(req.Filter)
|
||||
items, err := f.RuntimeService.ListContainers(ctx, req.Filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -199,7 +199,7 @@ func (f *RemoteRuntime) ListContainers(ctx context.Context, req *kubeapi.ListCon
|
||||
// ContainerStatus returns status of the container. If the container is not
|
||||
// present, returns an error.
|
||||
func (f *RemoteRuntime) ContainerStatus(ctx context.Context, req *kubeapi.ContainerStatusRequest) (*kubeapi.ContainerStatusResponse, error) {
|
||||
resp, err := f.RuntimeService.ContainerStatus(req.ContainerId, false)
|
||||
resp, err := f.RuntimeService.ContainerStatus(ctx, req.ContainerId, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -210,7 +210,7 @@ func (f *RemoteRuntime) ContainerStatus(ctx context.Context, req *kubeapi.Contai
|
||||
// ExecSync runs a command in a container synchronously.
|
||||
func (f *RemoteRuntime) ExecSync(ctx context.Context, req *kubeapi.ExecSyncRequest) (*kubeapi.ExecSyncResponse, error) {
|
||||
var exitCode int32
|
||||
stdout, stderr, err := f.RuntimeService.ExecSync(req.ContainerId, req.Cmd, time.Duration(req.Timeout)*time.Second)
|
||||
stdout, stderr, err := f.RuntimeService.ExecSync(ctx, req.ContainerId, req.Cmd, time.Duration(req.Timeout)*time.Second)
|
||||
if err != nil {
|
||||
exitError, ok := err.(utilexec.ExitError)
|
||||
if !ok {
|
||||
@ -228,23 +228,23 @@ func (f *RemoteRuntime) ExecSync(ctx context.Context, req *kubeapi.ExecSyncReque
|
||||
|
||||
// Exec prepares a streaming endpoint to execute a command in the container.
|
||||
func (f *RemoteRuntime) Exec(ctx context.Context, req *kubeapi.ExecRequest) (*kubeapi.ExecResponse, error) {
|
||||
return f.RuntimeService.Exec(req)
|
||||
return f.RuntimeService.Exec(ctx, req)
|
||||
}
|
||||
|
||||
// Attach prepares a streaming endpoint to attach to a running container.
|
||||
func (f *RemoteRuntime) Attach(ctx context.Context, req *kubeapi.AttachRequest) (*kubeapi.AttachResponse, error) {
|
||||
return f.RuntimeService.Attach(req)
|
||||
return f.RuntimeService.Attach(ctx, req)
|
||||
}
|
||||
|
||||
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox.
|
||||
func (f *RemoteRuntime) PortForward(ctx context.Context, req *kubeapi.PortForwardRequest) (*kubeapi.PortForwardResponse, error) {
|
||||
return f.RuntimeService.PortForward(req)
|
||||
return f.RuntimeService.PortForward(ctx, req)
|
||||
}
|
||||
|
||||
// ContainerStats returns stats of the container. If the container does not
|
||||
// exist, the call returns an error.
|
||||
func (f *RemoteRuntime) ContainerStats(ctx context.Context, req *kubeapi.ContainerStatsRequest) (*kubeapi.ContainerStatsResponse, error) {
|
||||
stats, err := f.RuntimeService.ContainerStats(req.ContainerId)
|
||||
stats, err := f.RuntimeService.ContainerStats(ctx, req.ContainerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -254,7 +254,7 @@ func (f *RemoteRuntime) ContainerStats(ctx context.Context, req *kubeapi.Contain
|
||||
|
||||
// ListContainerStats returns stats of all running containers.
|
||||
func (f *RemoteRuntime) ListContainerStats(ctx context.Context, req *kubeapi.ListContainerStatsRequest) (*kubeapi.ListContainerStatsResponse, error) {
|
||||
stats, err := f.RuntimeService.ListContainerStats(req.Filter)
|
||||
stats, err := f.RuntimeService.ListContainerStats(ctx, req.Filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -265,7 +265,7 @@ func (f *RemoteRuntime) ListContainerStats(ctx context.Context, req *kubeapi.Lis
|
||||
// PodSandboxStats returns stats of the pod. If the pod does not
|
||||
// exist, the call returns an error.
|
||||
func (f *RemoteRuntime) PodSandboxStats(ctx context.Context, req *kubeapi.PodSandboxStatsRequest) (*kubeapi.PodSandboxStatsResponse, error) {
|
||||
stats, err := f.RuntimeService.PodSandboxStats(req.PodSandboxId)
|
||||
stats, err := f.RuntimeService.PodSandboxStats(ctx, req.PodSandboxId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -275,7 +275,7 @@ func (f *RemoteRuntime) PodSandboxStats(ctx context.Context, req *kubeapi.PodSan
|
||||
|
||||
// ListPodSandboxStats returns stats of all running pods.
|
||||
func (f *RemoteRuntime) ListPodSandboxStats(ctx context.Context, req *kubeapi.ListPodSandboxStatsRequest) (*kubeapi.ListPodSandboxStatsResponse, error) {
|
||||
stats, err := f.RuntimeService.ListPodSandboxStats(req.Filter)
|
||||
stats, err := f.RuntimeService.ListPodSandboxStats(ctx, req.Filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -285,7 +285,7 @@ func (f *RemoteRuntime) ListPodSandboxStats(ctx context.Context, req *kubeapi.Li
|
||||
|
||||
// UpdateRuntimeConfig updates the runtime configuration based on the given request.
|
||||
func (f *RemoteRuntime) UpdateRuntimeConfig(ctx context.Context, req *kubeapi.UpdateRuntimeConfigRequest) (*kubeapi.UpdateRuntimeConfigResponse, error) {
|
||||
err := f.RuntimeService.UpdateRuntimeConfig(req.RuntimeConfig)
|
||||
err := f.RuntimeService.UpdateRuntimeConfig(ctx, req.RuntimeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -295,7 +295,7 @@ func (f *RemoteRuntime) UpdateRuntimeConfig(ctx context.Context, req *kubeapi.Up
|
||||
|
||||
// Status returns the status of the runtime.
|
||||
func (f *RemoteRuntime) Status(ctx context.Context, req *kubeapi.StatusRequest) (*kubeapi.StatusResponse, error) {
|
||||
resp, err := f.RuntimeService.Status(false)
|
||||
resp, err := f.RuntimeService.Status(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -305,7 +305,7 @@ func (f *RemoteRuntime) Status(ctx context.Context, req *kubeapi.StatusRequest)
|
||||
|
||||
// UpdateContainerResources updates ContainerConfig of the container.
|
||||
func (f *RemoteRuntime) UpdateContainerResources(ctx context.Context, req *kubeapi.UpdateContainerResourcesRequest) (*kubeapi.UpdateContainerResourcesResponse, error) {
|
||||
err := f.RuntimeService.UpdateContainerResources(req.ContainerId, &kubeapi.ContainerResources{Linux: req.Linux})
|
||||
err := f.RuntimeService.UpdateContainerResources(ctx, req.ContainerId, &kubeapi.ContainerResources{Linux: req.Linux})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -315,7 +315,7 @@ func (f *RemoteRuntime) UpdateContainerResources(ctx context.Context, req *kubea
|
||||
|
||||
// ReopenContainerLog reopens the container log file.
|
||||
func (f *RemoteRuntime) ReopenContainerLog(ctx context.Context, req *kubeapi.ReopenContainerLogRequest) (*kubeapi.ReopenContainerLogResponse, error) {
|
||||
err := f.RuntimeService.ReopenContainerLog(req.ContainerId)
|
||||
err := f.RuntimeService.ReopenContainerLog(ctx, req.ContainerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -325,7 +325,7 @@ func (f *RemoteRuntime) ReopenContainerLog(ctx context.Context, req *kubeapi.Reo
|
||||
|
||||
// CheckpointContainer checkpoints the given container.
|
||||
func (f *RemoteRuntime) CheckpointContainer(ctx context.Context, req *kubeapi.CheckpointContainerRequest) (*kubeapi.CheckpointContainerResponse, error) {
|
||||
err := f.RuntimeService.CheckpointContainer(&kubeapi.CheckpointContainerRequest{})
|
||||
err := f.RuntimeService.CheckpointContainer(ctx, &kubeapi.CheckpointContainerRequest{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -79,8 +79,7 @@ func NewRemoteImageService(endpoint string, connectionTimeout time.Duration, tp
|
||||
}
|
||||
|
||||
service := &remoteImageService{timeout: connectionTimeout}
|
||||
|
||||
if err := service.validateServiceConnection(conn, endpoint); err != nil {
|
||||
if err := service.validateServiceConnection(ctx, conn, endpoint); err != nil {
|
||||
return nil, fmt.Errorf("validate service connection: %w", err)
|
||||
}
|
||||
|
||||
@ -90,10 +89,7 @@ func NewRemoteImageService(endpoint string, connectionTimeout time.Duration, tp
|
||||
|
||||
// validateServiceConnection tries to connect to the remote image service by
|
||||
// using the CRI v1 API version and fails if that's not possible.
|
||||
func (r *remoteImageService) validateServiceConnection(conn *grpc.ClientConn, endpoint string) error {
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
defer cancel()
|
||||
|
||||
func (r *remoteImageService) validateServiceConnection(ctx context.Context, conn *grpc.ClientConn, endpoint string) error {
|
||||
klog.V(4).InfoS("Validating the CRI v1 API image version")
|
||||
r.imageClient = runtimeapi.NewImageServiceClient(conn)
|
||||
|
||||
@ -108,8 +104,8 @@ func (r *remoteImageService) validateServiceConnection(conn *grpc.ClientConn, en
|
||||
}
|
||||
|
||||
// ListImages lists available images.
|
||||
func (r *remoteImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
func (r *remoteImageService) ListImages(ctx context.Context, filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
return r.listImagesV1(ctx, filter)
|
||||
@ -128,8 +124,8 @@ func (r *remoteImageService) listImagesV1(ctx context.Context, filter *runtimeap
|
||||
}
|
||||
|
||||
// ImageStatus returns the status of the image.
|
||||
func (r *remoteImageService) ImageStatus(image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) {
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
func (r *remoteImageService) ImageStatus(ctx context.Context, image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
return r.imageStatusV1(ctx, image, verbose)
|
||||
@ -158,8 +154,8 @@ func (r *remoteImageService) imageStatusV1(ctx context.Context, image *runtimeap
|
||||
}
|
||||
|
||||
// PullImage pulls an image with authentication config.
|
||||
func (r *remoteImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
func (r *remoteImageService) PullImage(ctx context.Context, image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
return r.pullImageV1(ctx, image, auth, podSandboxConfig)
|
||||
@ -186,8 +182,8 @@ func (r *remoteImageService) pullImageV1(ctx context.Context, image *runtimeapi.
|
||||
}
|
||||
|
||||
// RemoveImage removes the image.
|
||||
func (r *remoteImageService) RemoveImage(image *runtimeapi.ImageSpec) (err error) {
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
func (r *remoteImageService) RemoveImage(ctx context.Context, image *runtimeapi.ImageSpec) (err error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
if _, err = r.imageClient.RemoveImage(ctx, &runtimeapi.RemoveImageRequest{
|
||||
@ -201,10 +197,10 @@ func (r *remoteImageService) RemoveImage(image *runtimeapi.ImageSpec) (err error
|
||||
}
|
||||
|
||||
// ImageFsInfo returns information of the filesystem that is used to store images.
|
||||
func (r *remoteImageService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) {
|
||||
func (r *remoteImageService) ImageFsInfo(ctx context.Context) ([]*runtimeapi.FilesystemUsage, error) {
|
||||
// Do not set timeout, because `ImageFsInfo` takes time.
|
||||
// TODO(random-liu): Should we assume runtime should cache the result, and set timeout here?
|
||||
ctx, cancel := getContextWithCancel()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
return r.imageFsInfoV1(ctx)
|
||||
|
@ -66,7 +66,7 @@ func TestImageServiceSpansWithTP(t *testing.T) {
|
||||
)
|
||||
ctx := context.Background()
|
||||
imgSvc := createRemoteImageServiceWithTracerProvider(endpoint, tp, t)
|
||||
imgRef, err := imgSvc.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
imgRef, err := imgSvc.PullImage(ctx, &runtimeapi.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "busybox", imgRef)
|
||||
require.NoError(t, err)
|
||||
@ -93,7 +93,7 @@ func TestImageServiceSpansWithoutTP(t *testing.T) {
|
||||
)
|
||||
ctx := context.Background()
|
||||
imgSvc := createRemoteImageServiceWithoutTracerProvider(endpoint, t)
|
||||
imgRef, err := imgSvc.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
imgRef, err := imgSvc.PullImage(ctx, &runtimeapi.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "busybox", imgRef)
|
||||
require.NoError(t, err)
|
||||
|
@ -103,7 +103,7 @@ func NewRemoteRuntimeService(endpoint string, connectionTimeout time.Duration, t
|
||||
logReduction: logreduction.NewLogReduction(identicalErrorDelay),
|
||||
}
|
||||
|
||||
if err := service.validateServiceConnection(conn, endpoint); err != nil {
|
||||
if err := service.validateServiceConnection(ctx, conn, endpoint); err != nil {
|
||||
return nil, fmt.Errorf("validate service connection: %w", err)
|
||||
}
|
||||
|
||||
@ -112,10 +112,7 @@ func NewRemoteRuntimeService(endpoint string, connectionTimeout time.Duration, t
|
||||
|
||||
// validateServiceConnection tries to connect to the remote runtime service by
|
||||
// using the CRI v1 API version and fails if that's not possible.
|
||||
func (r *remoteRuntimeService) validateServiceConnection(conn *grpc.ClientConn, endpoint string) error {
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
defer cancel()
|
||||
|
||||
func (r *remoteRuntimeService) validateServiceConnection(ctx context.Context, conn *grpc.ClientConn, endpoint string) error {
|
||||
klog.V(4).InfoS("Validating the CRI v1 API runtime version")
|
||||
r.runtimeClient = runtimeapi.NewRuntimeServiceClient(conn)
|
||||
|
||||
@ -130,10 +127,10 @@ func (r *remoteRuntimeService) validateServiceConnection(conn *grpc.ClientConn,
|
||||
}
|
||||
|
||||
// Version returns the runtime name, runtime version and runtime API version.
|
||||
func (r *remoteRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) {
|
||||
func (r *remoteRuntimeService) Version(ctx context.Context, apiVersion string) (*runtimeapi.VersionResponse, error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] Version", "apiVersion", apiVersion, "timeout", r.timeout)
|
||||
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
return r.versionV1(ctx, apiVersion)
|
||||
@ -159,14 +156,14 @@ func (r *remoteRuntimeService) versionV1(ctx context.Context, apiVersion string)
|
||||
|
||||
// RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure
|
||||
// the sandbox is in ready state.
|
||||
func (r *remoteRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) {
|
||||
func (r *remoteRuntimeService) RunPodSandbox(ctx context.Context, config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) {
|
||||
// Use 2 times longer timeout for sandbox operation (4 mins by default)
|
||||
// TODO: Make the pod sandbox timeout configurable.
|
||||
timeout := r.timeout * 2
|
||||
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] RunPodSandbox", "config", config, "runtimeHandler", runtimeHandler, "timeout", timeout)
|
||||
|
||||
ctx, cancel := getContextWithTimeout(timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
resp, err := r.runtimeClient.RunPodSandbox(ctx, &runtimeapi.RunPodSandboxRequest{
|
||||
@ -195,10 +192,10 @@ func (r *remoteRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig
|
||||
|
||||
// StopPodSandbox stops the sandbox. If there are any running containers in the
|
||||
// sandbox, they should be forced to termination.
|
||||
func (r *remoteRuntimeService) StopPodSandbox(podSandBoxID string) (err error) {
|
||||
func (r *remoteRuntimeService) StopPodSandbox(ctx context.Context, podSandBoxID string) (err error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] StopPodSandbox", "podSandboxID", podSandBoxID, "timeout", r.timeout)
|
||||
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
if _, err := r.runtimeClient.StopPodSandbox(ctx, &runtimeapi.StopPodSandboxRequest{
|
||||
@ -215,9 +212,9 @@ func (r *remoteRuntimeService) StopPodSandbox(podSandBoxID string) (err error) {
|
||||
|
||||
// RemovePodSandbox removes the sandbox. If there are any containers in the
|
||||
// sandbox, they should be forcibly removed.
|
||||
func (r *remoteRuntimeService) RemovePodSandbox(podSandBoxID string) (err error) {
|
||||
func (r *remoteRuntimeService) RemovePodSandbox(ctx context.Context, podSandBoxID string) (err error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] RemovePodSandbox", "podSandboxID", podSandBoxID, "timeout", r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
if _, err := r.runtimeClient.RemovePodSandbox(ctx, &runtimeapi.RemovePodSandboxRequest{
|
||||
@ -233,9 +230,9 @@ func (r *remoteRuntimeService) RemovePodSandbox(podSandBoxID string) (err error)
|
||||
}
|
||||
|
||||
// PodSandboxStatus returns the status of the PodSandbox.
|
||||
func (r *remoteRuntimeService) PodSandboxStatus(podSandBoxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) {
|
||||
func (r *remoteRuntimeService) PodSandboxStatus(ctx context.Context, podSandBoxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] PodSandboxStatus", "podSandboxID", podSandBoxID, "timeout", r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
return r.podSandboxStatusV1(ctx, podSandBoxID, verbose)
|
||||
@ -263,9 +260,9 @@ func (r *remoteRuntimeService) podSandboxStatusV1(ctx context.Context, podSandBo
|
||||
}
|
||||
|
||||
// ListPodSandbox returns a list of PodSandboxes.
|
||||
func (r *remoteRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
|
||||
func (r *remoteRuntimeService) ListPodSandbox(ctx context.Context, filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] ListPodSandbox", "filter", filter, "timeout", r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
return r.listPodSandboxV1(ctx, filter)
|
||||
@ -286,9 +283,9 @@ func (r *remoteRuntimeService) listPodSandboxV1(ctx context.Context, filter *run
|
||||
}
|
||||
|
||||
// CreateContainer creates a new container in the specified PodSandbox.
|
||||
func (r *remoteRuntimeService) CreateContainer(podSandBoxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
func (r *remoteRuntimeService) CreateContainer(ctx context.Context, podSandBoxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] CreateContainer", "podSandboxID", podSandBoxID, "timeout", r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
return r.createContainerV1(ctx, podSandBoxID, config, sandboxConfig)
|
||||
@ -317,9 +314,9 @@ func (r *remoteRuntimeService) createContainerV1(ctx context.Context, podSandBox
|
||||
}
|
||||
|
||||
// StartContainer starts the container.
|
||||
func (r *remoteRuntimeService) StartContainer(containerID string) (err error) {
|
||||
func (r *remoteRuntimeService) StartContainer(ctx context.Context, containerID string) (err error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] StartContainer", "containerID", containerID, "timeout", r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
if _, err := r.runtimeClient.StartContainer(ctx, &runtimeapi.StartContainerRequest{
|
||||
@ -334,12 +331,12 @@ func (r *remoteRuntimeService) StartContainer(containerID string) (err error) {
|
||||
}
|
||||
|
||||
// StopContainer stops a running container with a grace period (i.e., timeout).
|
||||
func (r *remoteRuntimeService) StopContainer(containerID string, timeout int64) (err error) {
|
||||
func (r *remoteRuntimeService) StopContainer(ctx context.Context, containerID string, timeout int64) (err error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] StopContainer", "containerID", containerID, "timeout", timeout)
|
||||
// Use timeout + default timeout (2 minutes) as timeout to leave extra time
|
||||
// for SIGKILL container and request latency.
|
||||
t := r.timeout + time.Duration(timeout)*time.Second
|
||||
ctx, cancel := getContextWithTimeout(t)
|
||||
ctx, cancel := context.WithTimeout(ctx, t)
|
||||
defer cancel()
|
||||
|
||||
r.logReduction.ClearID(containerID)
|
||||
@ -358,9 +355,9 @@ func (r *remoteRuntimeService) StopContainer(containerID string, timeout int64)
|
||||
|
||||
// RemoveContainer removes the container. If the container is running, the container
|
||||
// should be forced to removal.
|
||||
func (r *remoteRuntimeService) RemoveContainer(containerID string) (err error) {
|
||||
func (r *remoteRuntimeService) RemoveContainer(ctx context.Context, containerID string) (err error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] RemoveContainer", "containerID", containerID, "timeout", r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
r.logReduction.ClearID(containerID)
|
||||
@ -376,9 +373,9 @@ func (r *remoteRuntimeService) RemoveContainer(containerID string) (err error) {
|
||||
}
|
||||
|
||||
// ListContainers lists containers by filters.
|
||||
func (r *remoteRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
|
||||
func (r *remoteRuntimeService) ListContainers(ctx context.Context, filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] ListContainers", "filter", filter, "timeout", r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
return r.listContainersV1(ctx, filter)
|
||||
@ -398,9 +395,9 @@ func (r *remoteRuntimeService) listContainersV1(ctx context.Context, filter *run
|
||||
}
|
||||
|
||||
// ContainerStatus returns the container status.
|
||||
func (r *remoteRuntimeService) ContainerStatus(containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) {
|
||||
func (r *remoteRuntimeService) ContainerStatus(ctx context.Context, containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] ContainerStatus", "containerID", containerID, "timeout", r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
return r.containerStatusV1(ctx, containerID, verbose)
|
||||
@ -433,9 +430,9 @@ func (r *remoteRuntimeService) containerStatusV1(ctx context.Context, containerI
|
||||
}
|
||||
|
||||
// UpdateContainerResources updates a containers resource config
|
||||
func (r *remoteRuntimeService) UpdateContainerResources(containerID string, resources *runtimeapi.ContainerResources) (err error) {
|
||||
func (r *remoteRuntimeService) UpdateContainerResources(ctx context.Context, containerID string, resources *runtimeapi.ContainerResources) (err error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] UpdateContainerResources", "containerID", containerID, "timeout", r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
if _, err := r.runtimeClient.UpdateContainerResources(ctx, &runtimeapi.UpdateContainerResourcesRequest{
|
||||
@ -453,17 +450,16 @@ func (r *remoteRuntimeService) UpdateContainerResources(containerID string, reso
|
||||
|
||||
// ExecSync executes a command in the container, and returns the stdout output.
|
||||
// If command exits with a non-zero exit code, an error is returned.
|
||||
func (r *remoteRuntimeService) ExecSync(containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) {
|
||||
func (r *remoteRuntimeService) ExecSync(ctx context.Context, containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] ExecSync", "containerID", containerID, "timeout", timeout)
|
||||
// Do not set timeout when timeout is 0.
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
if timeout != 0 {
|
||||
// Use timeout + default timeout (2 minutes) as timeout to leave some time for
|
||||
// the runtime to do cleanup.
|
||||
ctx, cancel = getContextWithTimeout(r.timeout + timeout)
|
||||
ctx, cancel = context.WithTimeout(ctx, r.timeout+timeout)
|
||||
} else {
|
||||
ctx, cancel = getContextWithCancel()
|
||||
ctx, cancel = context.WithCancel(ctx)
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
@ -502,9 +498,9 @@ func (r *remoteRuntimeService) execSyncV1(ctx context.Context, containerID strin
|
||||
}
|
||||
|
||||
// Exec prepares a streaming endpoint to execute a command in the container, and returns the address.
|
||||
func (r *remoteRuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
|
||||
func (r *remoteRuntimeService) Exec(ctx context.Context, req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] Exec", "timeout", r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
return r.execV1(ctx, req)
|
||||
@ -529,9 +525,9 @@ func (r *remoteRuntimeService) execV1(ctx context.Context, req *runtimeapi.ExecR
|
||||
}
|
||||
|
||||
// Attach prepares a streaming endpoint to attach to a running container, and returns the address.
|
||||
func (r *remoteRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
|
||||
func (r *remoteRuntimeService) Attach(ctx context.Context, req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] Attach", "containerID", req.ContainerId, "timeout", r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
return r.attachV1(ctx, req)
|
||||
@ -555,9 +551,9 @@ func (r *remoteRuntimeService) attachV1(ctx context.Context, req *runtimeapi.Att
|
||||
}
|
||||
|
||||
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address.
|
||||
func (r *remoteRuntimeService) PortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
|
||||
func (r *remoteRuntimeService) PortForward(ctx context.Context, req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] PortForward", "podSandboxID", req.PodSandboxId, "port", req.Port, "timeout", r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
return r.portForwardV1(ctx, req)
|
||||
@ -584,9 +580,9 @@ func (r *remoteRuntimeService) portForwardV1(ctx context.Context, req *runtimeap
|
||||
// UpdateRuntimeConfig updates the config of a runtime service. The only
|
||||
// update payload currently supported is the pod CIDR assigned to a node,
|
||||
// and the runtime service just proxies it down to the network plugin.
|
||||
func (r *remoteRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) (err error) {
|
||||
func (r *remoteRuntimeService) UpdateRuntimeConfig(ctx context.Context, runtimeConfig *runtimeapi.RuntimeConfig) (err error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] UpdateRuntimeConfig", "runtimeConfig", runtimeConfig, "timeout", r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
// Response doesn't contain anything of interest. This translates to an
|
||||
@ -603,9 +599,9 @@ func (r *remoteRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.Run
|
||||
}
|
||||
|
||||
// Status returns the status of the runtime.
|
||||
func (r *remoteRuntimeService) Status(verbose bool) (*runtimeapi.StatusResponse, error) {
|
||||
func (r *remoteRuntimeService) Status(ctx context.Context, verbose bool) (*runtimeapi.StatusResponse, error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] Status", "timeout", r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
return r.statusV1(ctx, verbose)
|
||||
@ -633,9 +629,9 @@ func (r *remoteRuntimeService) statusV1(ctx context.Context, verbose bool) (*run
|
||||
}
|
||||
|
||||
// ContainerStats returns the stats of the container.
|
||||
func (r *remoteRuntimeService) ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) {
|
||||
func (r *remoteRuntimeService) ContainerStats(ctx context.Context, containerID string) (*runtimeapi.ContainerStats, error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] ContainerStats", "containerID", containerID, "timeout", r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
return r.containerStatsV1(ctx, containerID)
|
||||
@ -658,11 +654,11 @@ func (r *remoteRuntimeService) containerStatsV1(ctx context.Context, containerID
|
||||
}
|
||||
|
||||
// ListContainerStats returns the list of ContainerStats given the filter.
|
||||
func (r *remoteRuntimeService) ListContainerStats(filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) {
|
||||
func (r *remoteRuntimeService) ListContainerStats(ctx context.Context, filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] ListContainerStats", "filter", filter)
|
||||
// Do not set timeout, because writable layer stats collection takes time.
|
||||
// TODO(random-liu): Should we assume runtime should cache the result, and set timeout here?
|
||||
ctx, cancel := getContextWithCancel()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
return r.listContainerStatsV1(ctx, filter)
|
||||
@ -682,9 +678,9 @@ func (r *remoteRuntimeService) listContainerStatsV1(ctx context.Context, filter
|
||||
}
|
||||
|
||||
// PodSandboxStats returns the stats of the pod.
|
||||
func (r *remoteRuntimeService) PodSandboxStats(podSandboxID string) (*runtimeapi.PodSandboxStats, error) {
|
||||
func (r *remoteRuntimeService) PodSandboxStats(ctx context.Context, podSandboxID string) (*runtimeapi.PodSandboxStats, error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] PodSandboxStats", "podSandboxID", podSandboxID, "timeout", r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
return r.podSandboxStatsV1(ctx, podSandboxID)
|
||||
@ -707,10 +703,10 @@ func (r *remoteRuntimeService) podSandboxStatsV1(ctx context.Context, podSandbox
|
||||
}
|
||||
|
||||
// ListPodSandboxStats returns the list of pod sandbox stats given the filter
|
||||
func (r *remoteRuntimeService) ListPodSandboxStats(filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) {
|
||||
func (r *remoteRuntimeService) ListPodSandboxStats(ctx context.Context, filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] ListPodSandboxStats", "filter", filter)
|
||||
// Set timeout, because runtimes are able to cache disk stats results
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
return r.listPodSandboxStatsV1(ctx, filter)
|
||||
@ -730,9 +726,9 @@ func (r *remoteRuntimeService) listPodSandboxStatsV1(ctx context.Context, filter
|
||||
}
|
||||
|
||||
// ReopenContainerLog reopens the container log file.
|
||||
func (r *remoteRuntimeService) ReopenContainerLog(containerID string) (err error) {
|
||||
func (r *remoteRuntimeService) ReopenContainerLog(ctx context.Context, containerID string) (err error) {
|
||||
klog.V(10).InfoS("[RemoteRuntimeService] ReopenContainerLog", "containerID", containerID, "timeout", r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
if _, err := r.runtimeClient.ReopenContainerLog(ctx, &runtimeapi.ReopenContainerLogRequest{ContainerId: containerID}); err != nil {
|
||||
@ -745,7 +741,7 @@ func (r *remoteRuntimeService) ReopenContainerLog(containerID string) (err error
|
||||
}
|
||||
|
||||
// CheckpointContainer triggers a checkpoint of the given CheckpointContainerRequest
|
||||
func (r *remoteRuntimeService) CheckpointContainer(options *runtimeapi.CheckpointContainerRequest) error {
|
||||
func (r *remoteRuntimeService) CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error {
|
||||
klog.V(10).InfoS(
|
||||
"[RemoteRuntimeService] CheckpointContainer",
|
||||
"options",
|
||||
@ -758,18 +754,18 @@ func (r *remoteRuntimeService) CheckpointContainer(options *runtimeapi.Checkpoin
|
||||
return errors.New("CheckpointContainer requires the timeout value to be > 0")
|
||||
}
|
||||
|
||||
ctx, cancel := func() (context.Context, context.CancelFunc) {
|
||||
ctx, cancel := func(ctx context.Context) (context.Context, context.CancelFunc) {
|
||||
defaultTimeout := int64(r.timeout / time.Second)
|
||||
if options.Timeout > defaultTimeout {
|
||||
// The user requested a specific timeout, let's use that if it
|
||||
// is larger than the CRI default.
|
||||
return getContextWithTimeout(time.Duration(options.Timeout) * time.Second)
|
||||
return context.WithTimeout(ctx, time.Duration(options.Timeout)*time.Second)
|
||||
}
|
||||
// If the user requested a timeout less than the
|
||||
// CRI default, let's use the CRI default.
|
||||
options.Timeout = defaultTimeout
|
||||
return getContextWithTimeout(r.timeout)
|
||||
}()
|
||||
return context.WithTimeout(ctx, r.timeout)
|
||||
}(ctx)
|
||||
defer cancel()
|
||||
|
||||
_, err := r.runtimeClient.CheckpointContainer(
|
||||
|
@ -87,7 +87,7 @@ func TestGetSpans(t *testing.T) {
|
||||
)
|
||||
ctx := context.Background()
|
||||
rtSvc := createRemoteRuntimeServiceWithTracerProvider(endpoint, tp, t)
|
||||
_, err := rtSvc.Version(apitest.FakeVersion)
|
||||
_, err := rtSvc.Version(ctx, apitest.FakeVersion)
|
||||
require.NoError(t, err)
|
||||
err = tp.ForceFlush(ctx)
|
||||
require.NoError(t, err)
|
||||
@ -106,8 +106,9 @@ func TestVersion(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
ctx := context.Background()
|
||||
rtSvc := createRemoteRuntimeService(endpoint, t)
|
||||
version, err := rtSvc.Version(apitest.FakeVersion)
|
||||
version, err := rtSvc.Version(ctx, apitest.FakeVersion)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, apitest.FakeVersion, version.Version)
|
||||
assert.Equal(t, apitest.FakeRuntimeName, version.RuntimeName)
|
||||
|
@ -17,9 +17,7 @@ limitations under the License.
|
||||
package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
)
|
||||
@ -28,16 +26,6 @@ import (
|
||||
// grpc library default is 4MB
|
||||
const maxMsgSize = 1024 * 1024 * 16
|
||||
|
||||
// getContextWithTimeout returns a context with timeout.
|
||||
func getContextWithTimeout(timeout time.Duration) (context.Context, context.CancelFunc) {
|
||||
return context.WithTimeout(context.Background(), timeout)
|
||||
}
|
||||
|
||||
// getContextWithCancel returns a context with cancel.
|
||||
func getContextWithCancel() (context.Context, context.CancelFunc) {
|
||||
return context.WithCancel(context.Background())
|
||||
}
|
||||
|
||||
// verifySandboxStatus verified whether all required fields are set in PodSandboxStatus.
|
||||
func verifySandboxStatus(status *runtimeapi.PodSandboxStatus) error {
|
||||
if status.Id == "" {
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package portforward
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@ -240,6 +241,7 @@ Loop:
|
||||
// portForward invokes the httpStreamHandler's forwarder.PortForward
|
||||
// function for the given stream pair.
|
||||
func (h *httpStreamHandler) portForward(p *httpStreamPair) {
|
||||
ctx := context.Background()
|
||||
defer p.dataStream.Close()
|
||||
defer p.errorStream.Close()
|
||||
|
||||
@ -247,7 +249,7 @@ func (h *httpStreamHandler) portForward(p *httpStreamPair) {
|
||||
port, _ := strconv.ParseInt(portString, 10, 32)
|
||||
|
||||
klog.V(5).InfoS("Connection request invoking forwarder.PortForward for port", "connection", h.conn, "request", p.requestID, "port", portString)
|
||||
err := h.forwarder.PortForward(h.pod, h.uid, int32(port), p.dataStream)
|
||||
err := h.forwarder.PortForward(ctx, h.pod, h.uid, int32(port), p.dataStream)
|
||||
klog.V(5).InfoS("Connection request done invoking forwarder.PortForward for port", "connection", h.conn, "request", p.requestID, "port", portString)
|
||||
|
||||
if err != nil {
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package portforward
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
@ -30,7 +31,7 @@ import (
|
||||
// in a pod.
|
||||
type PortForwarder interface {
|
||||
// PortForwarder copies data between a data stream and a port in a pod.
|
||||
PortForward(name string, uid types.UID, port int32, stream io.ReadWriteCloser) error
|
||||
PortForward(ctx context.Context, name string, uid types.UID, port int32, stream io.ReadWriteCloser) error
|
||||
}
|
||||
|
||||
// ServePortForward handles a port forwarding request. A single request is
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package portforward
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -182,11 +183,12 @@ func (h *websocketStreamHandler) run() {
|
||||
}
|
||||
|
||||
func (h *websocketStreamHandler) portForward(p *websocketStreamPair) {
|
||||
ctx := context.Background()
|
||||
defer p.dataStream.Close()
|
||||
defer p.errorStream.Close()
|
||||
|
||||
klog.V(5).InfoS("Connection invoking forwarder.PortForward for port", "connection", h.conn, "port", p.port)
|
||||
err := h.forwarder.PortForward(h.pod, h.uid, p.port, p.dataStream)
|
||||
err := h.forwarder.PortForward(ctx, h.pod, h.uid, p.port, p.dataStream)
|
||||
klog.V(5).InfoS("Connection done invoking forwarder.PortForward for port", "connection", h.conn, "port", p.port)
|
||||
|
||||
if err != nil {
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@ -33,7 +34,7 @@ import (
|
||||
type Attacher interface {
|
||||
// AttachContainer attaches to the running container in the pod, copying data between in/out/err
|
||||
// and the container's stdin/stdout/stderr.
|
||||
AttachContainer(name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error
|
||||
AttachContainer(ctx context.Context, name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error
|
||||
}
|
||||
|
||||
// ServeAttach handles requests to attach to a container. After creating/receiving the required
|
||||
@ -46,7 +47,7 @@ func ServeAttach(w http.ResponseWriter, req *http.Request, attacher Attacher, po
|
||||
}
|
||||
defer ctx.conn.Close()
|
||||
|
||||
err := attacher.AttachContainer(podName, uid, container, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty, ctx.resizeChan)
|
||||
err := attacher.AttachContainer(req.Context(), podName, uid, container, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty, ctx.resizeChan)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error attaching to container: %v", err)
|
||||
runtime.HandleError(err)
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@ -35,7 +36,7 @@ import (
|
||||
type Executor interface {
|
||||
// ExecInContainer executes a command in a container in the pod, copying data
|
||||
// between in/out/err and the container's stdin/stdout/stderr.
|
||||
ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error
|
||||
ExecInContainer(ctx context.Context, name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error
|
||||
}
|
||||
|
||||
// ServeExec handles requests to execute a command in a container. After
|
||||
@ -49,7 +50,7 @@ func ServeExec(w http.ResponseWriter, req *http.Request, executor Executor, podN
|
||||
}
|
||||
defer ctx.conn.Close()
|
||||
|
||||
err := executor.ExecInContainer(podName, uid, container, cmd, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty, ctx.resizeChan, 0)
|
||||
err := executor.ExecInContainer(req.Context(), podName, uid, container, cmd, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty, ctx.resizeChan, 0)
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(utilexec.ExitError); ok && exitErr.Exited() {
|
||||
rc := exitErr.ExitStatus()
|
||||
|
@ -70,9 +70,9 @@ func NewOptions(req *http.Request) (*Options, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// context contains the connection and streams used when
|
||||
// connectionContext contains the connection and streams used when
|
||||
// forwarding an attach or execute session into a container.
|
||||
type context struct {
|
||||
type connectionContext struct {
|
||||
conn io.Closer
|
||||
stdinStream io.ReadCloser
|
||||
stdoutStream io.WriteCloser
|
||||
@ -102,8 +102,8 @@ func waitStreamReply(replySent <-chan struct{}, notify chan<- struct{}, stop <-c
|
||||
}
|
||||
}
|
||||
|
||||
func createStreams(req *http.Request, w http.ResponseWriter, opts *Options, supportedStreamProtocols []string, idleTimeout, streamCreationTimeout time.Duration) (*context, bool) {
|
||||
var ctx *context
|
||||
func createStreams(req *http.Request, w http.ResponseWriter, opts *Options, supportedStreamProtocols []string, idleTimeout, streamCreationTimeout time.Duration) (*connectionContext, bool) {
|
||||
var ctx *connectionContext
|
||||
var ok bool
|
||||
if wsstream.IsWebSocketRequest(req) {
|
||||
ctx, ok = createWebSocketStreams(req, w, opts, idleTimeout)
|
||||
@ -122,7 +122,7 @@ func createStreams(req *http.Request, w http.ResponseWriter, opts *Options, supp
|
||||
return ctx, true
|
||||
}
|
||||
|
||||
func createHTTPStreamStreams(req *http.Request, w http.ResponseWriter, opts *Options, supportedStreamProtocols []string, idleTimeout, streamCreationTimeout time.Duration) (*context, bool) {
|
||||
func createHTTPStreamStreams(req *http.Request, w http.ResponseWriter, opts *Options, supportedStreamProtocols []string, idleTimeout, streamCreationTimeout time.Duration) (*connectionContext, bool) {
|
||||
protocol, err := httpstream.Handshake(req, w, supportedStreamProtocols)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
@ -194,7 +194,7 @@ func createHTTPStreamStreams(req *http.Request, w http.ResponseWriter, opts *Opt
|
||||
type protocolHandler interface {
|
||||
// waitForStreams waits for the expected streams or a timeout, returning a
|
||||
// remoteCommandContext if all the streams were received, or an error if not.
|
||||
waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error)
|
||||
waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*connectionContext, error)
|
||||
// supportsTerminalResizing returns true if the protocol handler supports terminal resizing
|
||||
supportsTerminalResizing() bool
|
||||
}
|
||||
@ -204,8 +204,8 @@ type protocolHandler interface {
|
||||
// the process' exit code.
|
||||
type v4ProtocolHandler struct{}
|
||||
|
||||
func (*v4ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) {
|
||||
ctx := &context{}
|
||||
func (*v4ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*connectionContext, error) {
|
||||
ctx := &connectionContext{}
|
||||
receivedStreams := 0
|
||||
replyChan := make(chan struct{})
|
||||
stop := make(chan struct{})
|
||||
@ -255,8 +255,8 @@ func (*v4ProtocolHandler) supportsTerminalResizing() bool { return true }
|
||||
// v3ProtocolHandler implements the V3 protocol version for streaming command execution.
|
||||
type v3ProtocolHandler struct{}
|
||||
|
||||
func (*v3ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) {
|
||||
ctx := &context{}
|
||||
func (*v3ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*connectionContext, error) {
|
||||
ctx := &connectionContext{}
|
||||
receivedStreams := 0
|
||||
replyChan := make(chan struct{})
|
||||
stop := make(chan struct{})
|
||||
@ -306,8 +306,8 @@ func (*v3ProtocolHandler) supportsTerminalResizing() bool { return true }
|
||||
// v2ProtocolHandler implements the V2 protocol version for streaming command execution.
|
||||
type v2ProtocolHandler struct{}
|
||||
|
||||
func (*v2ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) {
|
||||
ctx := &context{}
|
||||
func (*v2ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*connectionContext, error) {
|
||||
ctx := &connectionContext{}
|
||||
receivedStreams := 0
|
||||
replyChan := make(chan struct{})
|
||||
stop := make(chan struct{})
|
||||
@ -354,8 +354,8 @@ func (*v2ProtocolHandler) supportsTerminalResizing() bool { return false }
|
||||
// v1ProtocolHandler implements the V1 protocol version for streaming command execution.
|
||||
type v1ProtocolHandler struct{}
|
||||
|
||||
func (*v1ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) {
|
||||
ctx := &context{}
|
||||
func (*v1ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*connectionContext, error) {
|
||||
ctx := &connectionContext{}
|
||||
receivedStreams := 0
|
||||
replyChan := make(chan struct{})
|
||||
stop := make(chan struct{})
|
||||
|
@ -68,9 +68,9 @@ func writeChannel(real bool) wsstream.ChannelType {
|
||||
return wsstream.IgnoreChannel
|
||||
}
|
||||
|
||||
// createWebSocketStreams returns a context containing the websocket connection and
|
||||
// createWebSocketStreams returns a connectionContext containing the websocket connection and
|
||||
// streams needed to perform an exec or an attach.
|
||||
func createWebSocketStreams(req *http.Request, w http.ResponseWriter, opts *Options, idleTimeout time.Duration) (*context, bool) {
|
||||
func createWebSocketStreams(req *http.Request, w http.ResponseWriter, opts *Options, idleTimeout time.Duration) (*connectionContext, bool) {
|
||||
channels := createChannels(opts)
|
||||
conn := wsstream.NewConn(map[string]wsstream.ChannelProtocolConfig{
|
||||
"": {
|
||||
@ -112,7 +112,7 @@ func createWebSocketStreams(req *http.Request, w http.ResponseWriter, opts *Opti
|
||||
streams[errorChannel].Write([]byte{})
|
||||
}
|
||||
|
||||
ctx := &context{
|
||||
ctx := &connectionContext{
|
||||
conn: conn,
|
||||
stdinStream: streams[stdinChannel],
|
||||
stdoutStream: streams[stdoutChannel],
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package streaming
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io"
|
||||
@ -61,9 +62,9 @@ type Server interface {
|
||||
|
||||
// Runtime is the interface to execute the commands and provide the streams.
|
||||
type Runtime interface {
|
||||
Exec(containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error
|
||||
Attach(containerID string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error
|
||||
PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error
|
||||
Exec(ctx context.Context, containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error
|
||||
Attach(ctx context.Context, containerID string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error
|
||||
PortForward(ctx context.Context, podSandboxID string, port int32, stream io.ReadWriteCloser) error
|
||||
}
|
||||
|
||||
// Config defines the options used for running the stream server.
|
||||
@ -369,14 +370,14 @@ var _ remotecommandserver.Executor = &criAdapter{}
|
||||
var _ remotecommandserver.Attacher = &criAdapter{}
|
||||
var _ portforward.PortForwarder = &criAdapter{}
|
||||
|
||||
func (a *criAdapter) ExecInContainer(podName string, podUID types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error {
|
||||
return a.Runtime.Exec(container, cmd, in, out, err, tty, resize)
|
||||
func (a *criAdapter) ExecInContainer(ctx context.Context, podName string, podUID types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error {
|
||||
return a.Runtime.Exec(ctx, container, cmd, in, out, err, tty, resize)
|
||||
}
|
||||
|
||||
func (a *criAdapter) AttachContainer(podName string, podUID types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
return a.Runtime.Attach(container, in, out, err, tty, resize)
|
||||
func (a *criAdapter) AttachContainer(ctx context.Context, podName string, podUID types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
return a.Runtime.Attach(ctx, container, in, out, err, tty, resize)
|
||||
}
|
||||
|
||||
func (a *criAdapter) PortForward(podName string, podUID types.UID, port int32, stream io.ReadWriteCloser) error {
|
||||
return a.Runtime.PortForward(podName, port, stream)
|
||||
func (a *criAdapter) PortForward(ctx context.Context, podName string, podUID types.UID, port int32, stream io.ReadWriteCloser) error {
|
||||
return a.Runtime.PortForward(ctx, podName, port, stream)
|
||||
}
|
||||
|
@ -414,19 +414,19 @@ type fakeRuntime struct {
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (f *fakeRuntime) Exec(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
func (f *fakeRuntime) Exec(_ context.Context, containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
assert.Equal(f.t, testContainerID, containerID)
|
||||
doServerStreams(f.t, "exec", stdin, stdout, stderr)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeRuntime) Attach(containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
func (f *fakeRuntime) Attach(_ context.Context, containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
assert.Equal(f.t, testContainerID, containerID)
|
||||
doServerStreams(f.t, "attach", stdin, stdout, stderr)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeRuntime) PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error {
|
||||
func (f *fakeRuntime) PortForward(_ context.Context, podSandboxID string, port int32, stream io.ReadWriteCloser) error {
|
||||
assert.Equal(f.t, testPodSandboxID, podSandboxID)
|
||||
assert.EqualValues(f.t, testPort, port)
|
||||
doServerStreams(f.t, "portforward", stream, stream, nil)
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package eviction
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
@ -230,6 +231,7 @@ func (m *managerImpl) IsUnderPIDPressure() bool {
|
||||
// synchronize is the main control loop that enforces eviction thresholds.
|
||||
// Returns the pod that was killed, or nil if no pod was killed.
|
||||
func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc) []*v1.Pod {
|
||||
ctx := context.Background()
|
||||
// if we have nothing to do, just return
|
||||
thresholds := m.config.Thresholds
|
||||
if len(thresholds) == 0 && !m.localStorageCapacityIsolation {
|
||||
@ -240,7 +242,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
|
||||
// build the ranking functions (if not yet known)
|
||||
// TODO: have a function in cadvisor that lets us know if global housekeeping has completed
|
||||
if m.dedicatedImageFs == nil {
|
||||
hasImageFs, ok := diskInfoProvider.HasDedicatedImageFs()
|
||||
hasImageFs, ok := diskInfoProvider.HasDedicatedImageFs(ctx)
|
||||
if ok != nil {
|
||||
return nil
|
||||
}
|
||||
@ -251,7 +253,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
|
||||
|
||||
activePods := podFunc()
|
||||
updateStats := true
|
||||
summary, err := m.summaryProvider.Get(updateStats)
|
||||
summary, err := m.summaryProvider.Get(ctx, updateStats)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Eviction manager: failed to get summary stats")
|
||||
return nil
|
||||
@ -343,7 +345,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
|
||||
m.recorder.Eventf(m.nodeRef, v1.EventTypeWarning, "EvictionThresholdMet", "Attempting to reclaim %s", resourceToReclaim)
|
||||
|
||||
// check if there are node-level resources we can reclaim to reduce pressure before evicting end-user pods.
|
||||
if m.reclaimNodeLevelResources(thresholdToReclaim.Signal, resourceToReclaim) {
|
||||
if m.reclaimNodeLevelResources(ctx, thresholdToReclaim.Signal, resourceToReclaim) {
|
||||
klog.InfoS("Eviction manager: able to reduce resource pressure without evicting pods.", "resourceName", resourceToReclaim)
|
||||
return nil
|
||||
}
|
||||
@ -418,17 +420,17 @@ func (m *managerImpl) waitForPodsCleanup(podCleanedUpFunc PodCleanedUpFunc, pods
|
||||
}
|
||||
|
||||
// reclaimNodeLevelResources attempts to reclaim node level resources. returns true if thresholds were satisfied and no pod eviction is required.
|
||||
func (m *managerImpl) reclaimNodeLevelResources(signalToReclaim evictionapi.Signal, resourceToReclaim v1.ResourceName) bool {
|
||||
func (m *managerImpl) reclaimNodeLevelResources(ctx context.Context, signalToReclaim evictionapi.Signal, resourceToReclaim v1.ResourceName) bool {
|
||||
nodeReclaimFuncs := m.signalToNodeReclaimFuncs[signalToReclaim]
|
||||
for _, nodeReclaimFunc := range nodeReclaimFuncs {
|
||||
// attempt to reclaim the pressured resource.
|
||||
if err := nodeReclaimFunc(); err != nil {
|
||||
if err := nodeReclaimFunc(ctx); err != nil {
|
||||
klog.InfoS("Eviction manager: unexpected error when attempting to reduce resource pressure", "resourceName", resourceToReclaim, "err", err)
|
||||
}
|
||||
|
||||
}
|
||||
if len(nodeReclaimFuncs) > 0 {
|
||||
summary, err := m.summaryProvider.Get(true)
|
||||
summary, err := m.summaryProvider.Get(ctx, true)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Eviction manager: failed to get summary stats after resource reclaim")
|
||||
return false
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package eviction
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
@ -67,7 +68,7 @@ type mockDiskInfoProvider struct {
|
||||
}
|
||||
|
||||
// HasDedicatedImageFs returns the mocked value
|
||||
func (m *mockDiskInfoProvider) HasDedicatedImageFs() (bool, error) {
|
||||
func (m *mockDiskInfoProvider) HasDedicatedImageFs(_ context.Context) (bool, error) {
|
||||
return m.dedicatedImageFs, nil
|
||||
}
|
||||
|
||||
@ -81,7 +82,7 @@ type mockDiskGC struct {
|
||||
}
|
||||
|
||||
// DeleteUnusedImages returns the mocked values.
|
||||
func (m *mockDiskGC) DeleteUnusedImages() error {
|
||||
func (m *mockDiskGC) DeleteUnusedImages(_ context.Context) error {
|
||||
m.imageGCInvoked = true
|
||||
if m.summaryAfterGC != nil && m.fakeSummaryProvider != nil {
|
||||
m.fakeSummaryProvider.result = m.summaryAfterGC
|
||||
@ -90,7 +91,7 @@ func (m *mockDiskGC) DeleteUnusedImages() error {
|
||||
}
|
||||
|
||||
// DeleteAllUnusedContainers returns the mocked value
|
||||
func (m *mockDiskGC) DeleteAllUnusedContainers() error {
|
||||
func (m *mockDiskGC) DeleteAllUnusedContainers(_ context.Context) error {
|
||||
m.containerGCInvoked = true
|
||||
if m.summaryAfterGC != nil && m.fakeSummaryProvider != nil {
|
||||
m.fakeSummaryProvider.result = m.summaryAfterGC
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package eviction
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
@ -1185,11 +1186,11 @@ type fakeSummaryProvider struct {
|
||||
result *statsapi.Summary
|
||||
}
|
||||
|
||||
func (f *fakeSummaryProvider) Get(updateStats bool) (*statsapi.Summary, error) {
|
||||
func (f *fakeSummaryProvider) Get(ctx context.Context, updateStats bool) (*statsapi.Summary, error) {
|
||||
return f.result, nil
|
||||
}
|
||||
|
||||
func (f *fakeSummaryProvider) GetCPUAndMemoryStats() (*statsapi.Summary, error) {
|
||||
func (f *fakeSummaryProvider) GetCPUAndMemoryStats(ctx context.Context) (*statsapi.Summary, error) {
|
||||
return f.result, nil
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@ limitations under the License.
|
||||
package eviction
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
time "time"
|
||||
|
||||
@ -129,18 +130,18 @@ func (m *MockDiskInfoProvider) EXPECT() *MockDiskInfoProviderMockRecorder {
|
||||
}
|
||||
|
||||
// HasDedicatedImageFs mocks base method.
|
||||
func (m *MockDiskInfoProvider) HasDedicatedImageFs() (bool, error) {
|
||||
func (m *MockDiskInfoProvider) HasDedicatedImageFs(ctx context.Context) (bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "HasDedicatedImageFs")
|
||||
ret := m.ctrl.Call(m, "HasDedicatedImageFs", ctx)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// HasDedicatedImageFs indicates an expected call of HasDedicatedImageFs.
|
||||
func (mr *MockDiskInfoProviderMockRecorder) HasDedicatedImageFs() *gomock.Call {
|
||||
func (mr *MockDiskInfoProviderMockRecorder) HasDedicatedImageFs(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasDedicatedImageFs", reflect.TypeOf((*MockDiskInfoProvider)(nil).HasDedicatedImageFs))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasDedicatedImageFs", reflect.TypeOf((*MockDiskInfoProvider)(nil).HasDedicatedImageFs), ctx)
|
||||
}
|
||||
|
||||
// MockImageGC is a mock of ImageGC interface.
|
||||
@ -167,17 +168,17 @@ func (m *MockImageGC) EXPECT() *MockImageGCMockRecorder {
|
||||
}
|
||||
|
||||
// DeleteUnusedImages mocks base method.
|
||||
func (m *MockImageGC) DeleteUnusedImages() error {
|
||||
func (m *MockImageGC) DeleteUnusedImages(ctx context.Context) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteUnusedImages")
|
||||
ret := m.ctrl.Call(m, "DeleteUnusedImages", ctx)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// DeleteUnusedImages indicates an expected call of DeleteUnusedImages.
|
||||
func (mr *MockImageGCMockRecorder) DeleteUnusedImages() *gomock.Call {
|
||||
func (mr *MockImageGCMockRecorder) DeleteUnusedImages(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUnusedImages", reflect.TypeOf((*MockImageGC)(nil).DeleteUnusedImages))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUnusedImages", reflect.TypeOf((*MockImageGC)(nil).DeleteUnusedImages), ctx)
|
||||
}
|
||||
|
||||
// MockContainerGC is a mock of ContainerGC interface.
|
||||
@ -204,17 +205,17 @@ func (m *MockContainerGC) EXPECT() *MockContainerGCMockRecorder {
|
||||
}
|
||||
|
||||
// DeleteAllUnusedContainers mocks base method.
|
||||
func (m *MockContainerGC) DeleteAllUnusedContainers() error {
|
||||
func (m *MockContainerGC) DeleteAllUnusedContainers(ctx context.Context) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteAllUnusedContainers")
|
||||
ret := m.ctrl.Call(m, "DeleteAllUnusedContainers", ctx)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// DeleteAllUnusedContainers indicates an expected call of DeleteAllUnusedContainers.
|
||||
func (mr *MockContainerGCMockRecorder) DeleteAllUnusedContainers() *gomock.Call {
|
||||
func (mr *MockContainerGCMockRecorder) DeleteAllUnusedContainers(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllUnusedContainers", reflect.TypeOf((*MockContainerGC)(nil).DeleteAllUnusedContainers))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllUnusedContainers", reflect.TypeOf((*MockContainerGC)(nil).DeleteAllUnusedContainers), ctx)
|
||||
}
|
||||
|
||||
// MockCgroupNotifier is a mock of CgroupNotifier interface.
|
||||
|
@ -18,6 +18,7 @@ limitations under the License.
|
||||
package eviction
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@ -71,19 +72,19 @@ type Manager interface {
|
||||
// DiskInfoProvider is responsible for informing the manager how disk is configured.
|
||||
type DiskInfoProvider interface {
|
||||
// HasDedicatedImageFs returns true if the imagefs is on a separate device from the rootfs.
|
||||
HasDedicatedImageFs() (bool, error)
|
||||
HasDedicatedImageFs(ctx context.Context) (bool, error)
|
||||
}
|
||||
|
||||
// ImageGC is responsible for performing garbage collection of unused images.
|
||||
type ImageGC interface {
|
||||
// DeleteUnusedImages deletes unused images.
|
||||
DeleteUnusedImages() error
|
||||
DeleteUnusedImages(ctx context.Context) error
|
||||
}
|
||||
|
||||
// ContainerGC is responsible for performing garbage collection of unused containers.
|
||||
type ContainerGC interface {
|
||||
// DeleteAllUnusedContainers deletes all unused containers, even those that belong to pods that are terminated, but not deleted.
|
||||
DeleteAllUnusedContainers() error
|
||||
DeleteAllUnusedContainers(ctx context.Context) error
|
||||
}
|
||||
|
||||
// KillPodFunc kills a pod.
|
||||
@ -131,7 +132,7 @@ type thresholdsObservedAt map[evictionapi.Threshold]time.Time
|
||||
type nodeConditionsObservedAt map[v1.NodeConditionType]time.Time
|
||||
|
||||
// nodeReclaimFunc is a function that knows how to reclaim a resource from the node without impacting pods.
|
||||
type nodeReclaimFunc func() error
|
||||
type nodeReclaimFunc func(ctx context.Context) error
|
||||
|
||||
// nodeReclaimFuncs is an ordered list of nodeReclaimFunc
|
||||
type nodeReclaimFuncs []nodeReclaimFunc
|
||||
|
@ -17,9 +17,10 @@ limitations under the License.
|
||||
package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
@ -43,9 +44,9 @@ type throttledImageService struct {
|
||||
limiter flowcontrol.RateLimiter
|
||||
}
|
||||
|
||||
func (ts throttledImageService) PullImage(image kubecontainer.ImageSpec, secrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
func (ts throttledImageService) PullImage(ctx context.Context, image kubecontainer.ImageSpec, secrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
if ts.limiter.TryAccept() {
|
||||
return ts.ImageService.PullImage(image, secrets, podSandboxConfig)
|
||||
return ts.ImageService.PullImage(ctx, image, secrets, podSandboxConfig)
|
||||
}
|
||||
return "", fmt.Errorf("pull QPS exceeded")
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
goerrors "errors"
|
||||
"fmt"
|
||||
"math"
|
||||
@ -24,9 +25,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -41,7 +42,7 @@ import (
|
||||
// collection.
|
||||
type StatsProvider interface {
|
||||
// ImageFsStats returns the stats of the image filesystem.
|
||||
ImageFsStats() (*statsapi.FsStats, error)
|
||||
ImageFsStats(ctx context.Context) (*statsapi.FsStats, error)
|
||||
}
|
||||
|
||||
// ImageGCManager is an interface for managing lifecycle of all images.
|
||||
@ -49,7 +50,7 @@ type StatsProvider interface {
|
||||
type ImageGCManager interface {
|
||||
// Applies the garbage collection policy. Errors include being unable to free
|
||||
// enough space as per the garbage collection policy.
|
||||
GarbageCollect() error
|
||||
GarbageCollect(ctx context.Context) error
|
||||
|
||||
// Start async garbage collection of images.
|
||||
Start()
|
||||
@ -57,7 +58,7 @@ type ImageGCManager interface {
|
||||
GetImageList() ([]container.Image, error)
|
||||
|
||||
// Delete all unused images.
|
||||
DeleteUnusedImages() error
|
||||
DeleteUnusedImages(ctx context.Context) error
|
||||
}
|
||||
|
||||
// ImageGCPolicy is a policy for garbage collecting images. Policy defines an allowed band in
|
||||
@ -178,13 +179,14 @@ func NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, r
|
||||
}
|
||||
|
||||
func (im *realImageGCManager) Start() {
|
||||
ctx := context.Background()
|
||||
go wait.Until(func() {
|
||||
// Initial detection make detected time "unknown" in the past.
|
||||
var ts time.Time
|
||||
if im.initialized {
|
||||
ts = time.Now()
|
||||
}
|
||||
_, err := im.detectImages(ts)
|
||||
_, err := im.detectImages(ctx, ts)
|
||||
if err != nil {
|
||||
klog.InfoS("Failed to monitor images", "err", err)
|
||||
} else {
|
||||
@ -194,7 +196,7 @@ func (im *realImageGCManager) Start() {
|
||||
|
||||
// Start a goroutine periodically updates image cache.
|
||||
go wait.Until(func() {
|
||||
images, err := im.runtime.ListImages()
|
||||
images, err := im.runtime.ListImages(ctx)
|
||||
if err != nil {
|
||||
klog.InfoS("Failed to update image list", "err", err)
|
||||
} else {
|
||||
@ -209,20 +211,20 @@ func (im *realImageGCManager) GetImageList() ([]container.Image, error) {
|
||||
return im.imageCache.get(), nil
|
||||
}
|
||||
|
||||
func (im *realImageGCManager) detectImages(detectTime time.Time) (sets.String, error) {
|
||||
func (im *realImageGCManager) detectImages(ctx context.Context, detectTime time.Time) (sets.String, error) {
|
||||
imagesInUse := sets.NewString()
|
||||
|
||||
// Always consider the container runtime pod sandbox image in use
|
||||
imageRef, err := im.runtime.GetImageRef(container.ImageSpec{Image: im.sandboxImage})
|
||||
imageRef, err := im.runtime.GetImageRef(ctx, container.ImageSpec{Image: im.sandboxImage})
|
||||
if err == nil && imageRef != "" {
|
||||
imagesInUse.Insert(imageRef)
|
||||
}
|
||||
|
||||
images, err := im.runtime.ListImages()
|
||||
images, err := im.runtime.ListImages(ctx)
|
||||
if err != nil {
|
||||
return imagesInUse, err
|
||||
}
|
||||
pods, err := im.runtime.GetPods(true)
|
||||
pods, err := im.runtime.GetPods(ctx, true)
|
||||
if err != nil {
|
||||
return imagesInUse, err
|
||||
}
|
||||
@ -276,9 +278,9 @@ func (im *realImageGCManager) detectImages(detectTime time.Time) (sets.String, e
|
||||
return imagesInUse, nil
|
||||
}
|
||||
|
||||
func (im *realImageGCManager) GarbageCollect() error {
|
||||
func (im *realImageGCManager) GarbageCollect(ctx context.Context) error {
|
||||
// Get disk usage on disk holding images.
|
||||
fsStats, err := im.statsProvider.ImageFsStats()
|
||||
fsStats, err := im.statsProvider.ImageFsStats(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -308,7 +310,7 @@ func (im *realImageGCManager) GarbageCollect() error {
|
||||
if usagePercent >= im.policy.HighThresholdPercent {
|
||||
amountToFree := capacity*int64(100-im.policy.LowThresholdPercent)/100 - available
|
||||
klog.InfoS("Disk usage on image filesystem is over the high threshold, trying to free bytes down to the low threshold", "usage", usagePercent, "highThreshold", im.policy.HighThresholdPercent, "amountToFree", amountToFree, "lowThreshold", im.policy.LowThresholdPercent)
|
||||
freed, err := im.freeSpace(amountToFree, time.Now())
|
||||
freed, err := im.freeSpace(ctx, amountToFree, time.Now())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -323,9 +325,9 @@ func (im *realImageGCManager) GarbageCollect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (im *realImageGCManager) DeleteUnusedImages() error {
|
||||
func (im *realImageGCManager) DeleteUnusedImages(ctx context.Context) error {
|
||||
klog.InfoS("Attempting to delete unused images")
|
||||
_, err := im.freeSpace(math.MaxInt64, time.Now())
|
||||
_, err := im.freeSpace(ctx, math.MaxInt64, time.Now())
|
||||
return err
|
||||
}
|
||||
|
||||
@ -335,8 +337,8 @@ func (im *realImageGCManager) DeleteUnusedImages() error {
|
||||
// bytes freed is always returned.
|
||||
// Note that error may be nil and the number of bytes free may be less
|
||||
// than bytesToFree.
|
||||
func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) (int64, error) {
|
||||
imagesInUse, err := im.detectImages(freeTime)
|
||||
func (im *realImageGCManager) freeSpace(ctx context.Context, bytesToFree int64, freeTime time.Time) (int64, error) {
|
||||
imagesInUse, err := im.detectImages(ctx, freeTime)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -385,7 +387,7 @@ func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) (
|
||||
|
||||
// Remove image. Continue despite errors.
|
||||
klog.InfoS("Removing image to free bytes", "imageID", image.id, "size", image.size)
|
||||
err := im.runtime.RemoveImage(container.ImageSpec{Image: image.id})
|
||||
err := im.runtime.RemoveImage(ctx, container.ImageSpec{Image: image.id})
|
||||
if err != nil {
|
||||
deletionErrors = append(deletionErrors, err)
|
||||
continue
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
@ -91,6 +92,7 @@ func makeContainer(id int) *container.Container {
|
||||
}
|
||||
|
||||
func TestDetectImagesInitialDetect(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
mockStatsProvider := statstest.NewMockProvider(mockCtrl)
|
||||
@ -119,7 +121,7 @@ func TestDetectImagesInitialDetect(t *testing.T) {
|
||||
}
|
||||
|
||||
startTime := time.Now().Add(-time.Millisecond)
|
||||
_, err := manager.detectImages(zero)
|
||||
_, err := manager.detectImages(ctx, zero)
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 3)
|
||||
@ -138,6 +140,7 @@ func TestDetectImagesInitialDetect(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDetectImagesWithNewImage(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
mockStatsProvider := statstest.NewMockProvider(mockCtrl)
|
||||
@ -156,7 +159,7 @@ func TestDetectImagesWithNewImage(t *testing.T) {
|
||||
}},
|
||||
}
|
||||
|
||||
_, err := manager.detectImages(zero)
|
||||
_, err := manager.detectImages(ctx, zero)
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 2)
|
||||
@ -170,7 +173,7 @@ func TestDetectImagesWithNewImage(t *testing.T) {
|
||||
|
||||
detectedTime := zero.Add(time.Second)
|
||||
startTime := time.Now().Add(-time.Millisecond)
|
||||
_, err = manager.detectImages(detectedTime)
|
||||
_, err = manager.detectImages(ctx, detectedTime)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 3)
|
||||
noContainer, ok := manager.getImageRecord(imageID(0))
|
||||
@ -188,6 +191,7 @@ func TestDetectImagesWithNewImage(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeleteUnusedImagesExemptSandboxImage(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
mockStatsProvider := statstest.NewMockProvider(mockCtrl)
|
||||
@ -200,13 +204,14 @@ func TestDeleteUnusedImagesExemptSandboxImage(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
err := manager.DeleteUnusedImages()
|
||||
err := manager.DeleteUnusedImages(ctx)
|
||||
assert := assert.New(t)
|
||||
assert.Len(fakeRuntime.ImageList, 1)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestDeletePinnedImage(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
mockStatsProvider := statstest.NewMockProvider(mockCtrl)
|
||||
|
||||
@ -223,13 +228,14 @@ func TestDeletePinnedImage(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
err := manager.DeleteUnusedImages()
|
||||
err := manager.DeleteUnusedImages(ctx)
|
||||
assert := assert.New(t)
|
||||
assert.Len(fakeRuntime.ImageList, 2)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestDoNotDeletePinnedImage(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
mockStatsProvider := statstest.NewMockProvider(mockCtrl)
|
||||
|
||||
@ -246,7 +252,7 @@ func TestDoNotDeletePinnedImage(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
spaceFreed, err := manager.freeSpace(4096, time.Now())
|
||||
spaceFreed, err := manager.freeSpace(ctx, 4096, time.Now())
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(1024, spaceFreed)
|
||||
@ -254,6 +260,7 @@ func TestDoNotDeletePinnedImage(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeleteUnPinnedImage(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
mockStatsProvider := statstest.NewMockProvider(mockCtrl)
|
||||
|
||||
@ -270,7 +277,7 @@ func TestDeleteUnPinnedImage(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
spaceFreed, err := manager.freeSpace(2048, time.Now())
|
||||
spaceFreed, err := manager.freeSpace(ctx, 2048, time.Now())
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(2048, spaceFreed)
|
||||
@ -278,6 +285,7 @@ func TestDeleteUnPinnedImage(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAllPinnedImages(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
mockStatsProvider := statstest.NewMockProvider(mockCtrl)
|
||||
|
||||
@ -295,7 +303,7 @@ func TestAllPinnedImages(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
spaceFreed, err := manager.freeSpace(2048, time.Now())
|
||||
spaceFreed, err := manager.freeSpace(ctx, 2048, time.Now())
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(0, spaceFreed)
|
||||
@ -303,6 +311,7 @@ func TestAllPinnedImages(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDetectImagesContainerStopped(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
mockStatsProvider := statstest.NewMockProvider(mockCtrl)
|
||||
@ -320,7 +329,7 @@ func TestDetectImagesContainerStopped(t *testing.T) {
|
||||
}},
|
||||
}
|
||||
|
||||
_, err := manager.detectImages(zero)
|
||||
_, err := manager.detectImages(ctx, zero)
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 2)
|
||||
@ -329,7 +338,7 @@ func TestDetectImagesContainerStopped(t *testing.T) {
|
||||
|
||||
// Simulate container being stopped.
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{}
|
||||
_, err = manager.detectImages(time.Now())
|
||||
_, err = manager.detectImages(ctx, time.Now())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 2)
|
||||
container1, ok := manager.getImageRecord(imageID(0))
|
||||
@ -343,6 +352,7 @@ func TestDetectImagesContainerStopped(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDetectImagesWithRemovedImages(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
mockStatsProvider := statstest.NewMockProvider(mockCtrl)
|
||||
@ -360,19 +370,20 @@ func TestDetectImagesWithRemovedImages(t *testing.T) {
|
||||
}},
|
||||
}
|
||||
|
||||
_, err := manager.detectImages(zero)
|
||||
_, err := manager.detectImages(ctx, zero)
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 2)
|
||||
|
||||
// Simulate both images being removed.
|
||||
fakeRuntime.ImageList = []container.Image{}
|
||||
_, err = manager.detectImages(time.Now())
|
||||
_, err = manager.detectImages(ctx, time.Now())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 0)
|
||||
}
|
||||
|
||||
func TestFreeSpaceImagesInUseContainersAreIgnored(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
mockStatsProvider := statstest.NewMockProvider(mockCtrl)
|
||||
@ -390,7 +401,7 @@ func TestFreeSpaceImagesInUseContainersAreIgnored(t *testing.T) {
|
||||
}},
|
||||
}
|
||||
|
||||
spaceFreed, err := manager.freeSpace(2048, time.Now())
|
||||
spaceFreed, err := manager.freeSpace(ctx, 2048, time.Now())
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(1024, spaceFreed)
|
||||
@ -398,6 +409,7 @@ func TestFreeSpaceImagesInUseContainersAreIgnored(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeleteUnusedImagesRemoveAllUnusedImages(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
mockStatsProvider := statstest.NewMockProvider(mockCtrl)
|
||||
@ -416,13 +428,14 @@ func TestDeleteUnusedImagesRemoveAllUnusedImages(t *testing.T) {
|
||||
}},
|
||||
}
|
||||
|
||||
err := manager.DeleteUnusedImages()
|
||||
err := manager.DeleteUnusedImages(ctx)
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.Len(fakeRuntime.ImageList, 1)
|
||||
}
|
||||
|
||||
func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
mockStatsProvider := statstest.NewMockProvider(mockCtrl)
|
||||
@ -442,7 +455,7 @@ func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) {
|
||||
}
|
||||
|
||||
// Make 1 be more recently used than 0.
|
||||
_, err := manager.detectImages(zero)
|
||||
_, err := manager.detectImages(ctx, zero)
|
||||
require.NoError(t, err)
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &container.Pod{
|
||||
@ -451,20 +464,20 @@ func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) {
|
||||
},
|
||||
}},
|
||||
}
|
||||
_, err = manager.detectImages(time.Now())
|
||||
_, err = manager.detectImages(ctx, time.Now())
|
||||
require.NoError(t, err)
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &container.Pod{
|
||||
Containers: []*container.Container{},
|
||||
}},
|
||||
}
|
||||
_, err = manager.detectImages(time.Now())
|
||||
_, err = manager.detectImages(ctx, time.Now())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, manager.imageRecordsLen(), 2)
|
||||
|
||||
// We're setting the delete time one minute in the future, so the time the image
|
||||
// was first detected and the delete time are different.
|
||||
spaceFreed, err := manager.freeSpace(1024, time.Now().Add(time.Minute))
|
||||
spaceFreed, err := manager.freeSpace(ctx, 1024, time.Now().Add(time.Minute))
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(1024, spaceFreed)
|
||||
@ -472,6 +485,7 @@ func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
mockStatsProvider := statstest.NewMockProvider(mockCtrl)
|
||||
@ -489,20 +503,20 @@ func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) {
|
||||
}
|
||||
|
||||
// Make 1 more recently detected but used at the same time as 0.
|
||||
_, err := manager.detectImages(zero)
|
||||
_, err := manager.detectImages(ctx, zero)
|
||||
require.NoError(t, err)
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
makeImage(0, 1024),
|
||||
makeImage(1, 2048),
|
||||
}
|
||||
_, err = manager.detectImages(time.Now())
|
||||
_, err = manager.detectImages(ctx, time.Now())
|
||||
require.NoError(t, err)
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{}
|
||||
_, err = manager.detectImages(time.Now())
|
||||
_, err = manager.detectImages(ctx, time.Now())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, manager.imageRecordsLen(), 2)
|
||||
|
||||
spaceFreed, err := manager.freeSpace(1024, time.Now())
|
||||
spaceFreed, err := manager.freeSpace(ctx, 1024, time.Now())
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(2048, spaceFreed)
|
||||
@ -510,6 +524,7 @@ func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGarbageCollectBelowLowThreshold(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
policy := ImageGCPolicy{
|
||||
HighThresholdPercent: 90,
|
||||
LowThresholdPercent: 80,
|
||||
@ -520,15 +535,16 @@ func TestGarbageCollectBelowLowThreshold(t *testing.T) {
|
||||
manager, _ := newRealImageGCManager(policy, mockStatsProvider)
|
||||
|
||||
// Expect 40% usage.
|
||||
mockStatsProvider.EXPECT().ImageFsStats().Return(&statsapi.FsStats{
|
||||
mockStatsProvider.EXPECT().ImageFsStats(ctx).Return(&statsapi.FsStats{
|
||||
AvailableBytes: uint64Ptr(600),
|
||||
CapacityBytes: uint64Ptr(1000),
|
||||
}, nil)
|
||||
|
||||
assert.NoError(t, manager.GarbageCollect())
|
||||
assert.NoError(t, manager.GarbageCollect(ctx))
|
||||
}
|
||||
|
||||
func TestGarbageCollectCadvisorFailure(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
policy := ImageGCPolicy{
|
||||
HighThresholdPercent: 90,
|
||||
LowThresholdPercent: 80,
|
||||
@ -538,11 +554,12 @@ func TestGarbageCollectCadvisorFailure(t *testing.T) {
|
||||
mockStatsProvider := statstest.NewMockProvider(mockCtrl)
|
||||
manager, _ := newRealImageGCManager(policy, mockStatsProvider)
|
||||
|
||||
mockStatsProvider.EXPECT().ImageFsStats().Return(&statsapi.FsStats{}, fmt.Errorf("error"))
|
||||
assert.NotNil(t, manager.GarbageCollect())
|
||||
mockStatsProvider.EXPECT().ImageFsStats(ctx).Return(&statsapi.FsStats{}, fmt.Errorf("error"))
|
||||
assert.NotNil(t, manager.GarbageCollect(ctx))
|
||||
}
|
||||
|
||||
func TestGarbageCollectBelowSuccess(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
policy := ImageGCPolicy{
|
||||
HighThresholdPercent: 90,
|
||||
LowThresholdPercent: 80,
|
||||
@ -554,7 +571,7 @@ func TestGarbageCollectBelowSuccess(t *testing.T) {
|
||||
manager, fakeRuntime := newRealImageGCManager(policy, mockStatsProvider)
|
||||
|
||||
// Expect 95% usage and most of it gets freed.
|
||||
mockStatsProvider.EXPECT().ImageFsStats().Return(&statsapi.FsStats{
|
||||
mockStatsProvider.EXPECT().ImageFsStats(ctx).Return(&statsapi.FsStats{
|
||||
AvailableBytes: uint64Ptr(50),
|
||||
CapacityBytes: uint64Ptr(1000),
|
||||
}, nil)
|
||||
@ -562,10 +579,11 @@ func TestGarbageCollectBelowSuccess(t *testing.T) {
|
||||
makeImage(0, 450),
|
||||
}
|
||||
|
||||
assert.NoError(t, manager.GarbageCollect())
|
||||
assert.NoError(t, manager.GarbageCollect(ctx))
|
||||
}
|
||||
|
||||
func TestGarbageCollectNotEnoughFreed(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
policy := ImageGCPolicy{
|
||||
HighThresholdPercent: 90,
|
||||
LowThresholdPercent: 80,
|
||||
@ -576,7 +594,7 @@ func TestGarbageCollectNotEnoughFreed(t *testing.T) {
|
||||
manager, fakeRuntime := newRealImageGCManager(policy, mockStatsProvider)
|
||||
|
||||
// Expect 95% usage and little of it gets freed.
|
||||
mockStatsProvider.EXPECT().ImageFsStats().Return(&statsapi.FsStats{
|
||||
mockStatsProvider.EXPECT().ImageFsStats(ctx).Return(&statsapi.FsStats{
|
||||
AvailableBytes: uint64Ptr(50),
|
||||
CapacityBytes: uint64Ptr(1000),
|
||||
}, nil)
|
||||
@ -584,10 +602,11 @@ func TestGarbageCollectNotEnoughFreed(t *testing.T) {
|
||||
makeImage(0, 50),
|
||||
}
|
||||
|
||||
assert.NotNil(t, manager.GarbageCollect())
|
||||
assert.NotNil(t, manager.GarbageCollect(ctx))
|
||||
}
|
||||
|
||||
func TestGarbageCollectImageNotOldEnough(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
policy := ImageGCPolicy{
|
||||
HighThresholdPercent: 90,
|
||||
LowThresholdPercent: 80,
|
||||
@ -620,11 +639,11 @@ func TestGarbageCollectImageNotOldEnough(t *testing.T) {
|
||||
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
t.Log(fakeClock.Now())
|
||||
_, err := manager.detectImages(fakeClock.Now())
|
||||
_, err := manager.detectImages(ctx, fakeClock.Now())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, manager.imageRecordsLen(), 2)
|
||||
// no space freed since one image is in used, and another one is not old enough
|
||||
spaceFreed, err := manager.freeSpace(1024, fakeClock.Now())
|
||||
spaceFreed, err := manager.freeSpace(ctx, 1024, fakeClock.Now())
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(0, spaceFreed)
|
||||
@ -632,7 +651,7 @@ func TestGarbageCollectImageNotOldEnough(t *testing.T) {
|
||||
|
||||
// move clock by minAge duration, then 1 image will be garbage collected
|
||||
fakeClock.Step(policy.MinAge)
|
||||
spaceFreed, err = manager.freeSpace(1024, fakeClock.Now())
|
||||
spaceFreed, err = manager.freeSpace(ctx, 1024, fakeClock.Now())
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(1024, spaceFreed)
|
||||
assert.Len(fakeRuntime.ImageList, 1)
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@ -95,7 +96,7 @@ func (m *imageManager) logIt(ref *v1.ObjectReference, eventtype, event, prefix,
|
||||
|
||||
// EnsureImageExists pulls the image for the specified pod and container, and returns
|
||||
// (imageRef, error message, error).
|
||||
func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, string, error) {
|
||||
func (m *imageManager) EnsureImageExists(ctx context.Context, pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, string, error) {
|
||||
logPrefix := fmt.Sprintf("%s/%s/%s", pod.Namespace, pod.Name, container.Image)
|
||||
ref, err := kubecontainer.GenerateContainerRef(pod, container)
|
||||
if err != nil {
|
||||
@ -122,7 +123,7 @@ func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, p
|
||||
Image: image,
|
||||
Annotations: podAnnotations,
|
||||
}
|
||||
imageRef, err := m.imageService.GetImageRef(spec)
|
||||
imageRef, err := m.imageService.GetImageRef(ctx, spec)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
|
||||
m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, klog.Warning)
|
||||
@ -151,7 +152,7 @@ func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, p
|
||||
m.logIt(ref, v1.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("Pulling image %q", container.Image), klog.Info)
|
||||
startTime := time.Now()
|
||||
pullChan := make(chan pullResult)
|
||||
m.puller.pullImage(spec, pullSecrets, pullChan, podSandboxConfig)
|
||||
m.puller.pullImage(ctx, spec, pullSecrets, pullChan, podSandboxConfig)
|
||||
imagePullResult := <-pullChan
|
||||
if imagePullResult.err != nil {
|
||||
m.logIt(ref, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, imagePullResult.err), klog.Warning)
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
@ -203,10 +204,11 @@ func TestParallelPuller(t *testing.T) {
|
||||
puller, fakeClock, fakeRuntime, container := pullerTestEnv(c, useSerializedEnv)
|
||||
|
||||
t.Run(c.testName, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
for _, expected := range c.expected {
|
||||
fakeRuntime.CalledFunctions = nil
|
||||
fakeClock.Step(time.Second)
|
||||
_, _, err := puller.EnsureImageExists(pod, container, nil, nil)
|
||||
_, _, err := puller.EnsureImageExists(ctx, pod, container, nil, nil)
|
||||
fakeRuntime.AssertCalls(expected.calls)
|
||||
assert.Equal(t, expected.err, err)
|
||||
}
|
||||
@ -230,10 +232,11 @@ func TestSerializedPuller(t *testing.T) {
|
||||
puller, fakeClock, fakeRuntime, container := pullerTestEnv(c, useSerializedEnv)
|
||||
|
||||
t.Run(c.testName, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
for _, expected := range c.expected {
|
||||
fakeRuntime.CalledFunctions = nil
|
||||
fakeClock.Step(time.Second)
|
||||
_, _, err := puller.EnsureImageExists(pod, container, nil, nil)
|
||||
_, _, err := puller.EnsureImageExists(ctx, pod, container, nil, nil)
|
||||
fakeRuntime.AssertCalls(expected.calls)
|
||||
assert.Equal(t, expected.err, err)
|
||||
}
|
||||
@ -290,11 +293,12 @@ func TestPullAndListImageWithPodAnnotations(t *testing.T) {
|
||||
fakeClock.Step(time.Second)
|
||||
|
||||
t.Run(c.testName, func(t *testing.T) {
|
||||
_, _, err := puller.EnsureImageExists(pod, container, nil, nil)
|
||||
ctx := context.Background()
|
||||
_, _, err := puller.EnsureImageExists(ctx, pod, container, nil, nil)
|
||||
fakeRuntime.AssertCalls(c.expected[0].calls)
|
||||
assert.Equal(t, c.expected[0].err, err, "tick=%d", 0)
|
||||
|
||||
images, _ := fakeRuntime.ListImages()
|
||||
images, _ := fakeRuntime.ListImages(ctx)
|
||||
assert.Equal(t, 1, len(images), "ListImages() count")
|
||||
|
||||
image := images[0]
|
||||
|
@ -17,9 +17,10 @@ limitations under the License.
|
||||
package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
@ -31,7 +32,7 @@ type pullResult struct {
|
||||
}
|
||||
|
||||
type imagePuller interface {
|
||||
pullImage(kubecontainer.ImageSpec, []v1.Secret, chan<- pullResult, *runtimeapi.PodSandboxConfig)
|
||||
pullImage(context.Context, kubecontainer.ImageSpec, []v1.Secret, chan<- pullResult, *runtimeapi.PodSandboxConfig)
|
||||
}
|
||||
|
||||
var _, _ imagePuller = ¶llelImagePuller{}, &serialImagePuller{}
|
||||
@ -44,9 +45,9 @@ func newParallelImagePuller(imageService kubecontainer.ImageService) imagePuller
|
||||
return ¶llelImagePuller{imageService}
|
||||
}
|
||||
|
||||
func (pip *parallelImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult, podSandboxConfig *runtimeapi.PodSandboxConfig) {
|
||||
func (pip *parallelImagePuller) pullImage(ctx context.Context, spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult, podSandboxConfig *runtimeapi.PodSandboxConfig) {
|
||||
go func() {
|
||||
imageRef, err := pip.imageService.PullImage(spec, pullSecrets, podSandboxConfig)
|
||||
imageRef, err := pip.imageService.PullImage(ctx, spec, pullSecrets, podSandboxConfig)
|
||||
pullChan <- pullResult{
|
||||
imageRef: imageRef,
|
||||
err: err,
|
||||
@ -69,14 +70,16 @@ func newSerialImagePuller(imageService kubecontainer.ImageService) imagePuller {
|
||||
}
|
||||
|
||||
type imagePullRequest struct {
|
||||
ctx context.Context
|
||||
spec kubecontainer.ImageSpec
|
||||
pullSecrets []v1.Secret
|
||||
pullChan chan<- pullResult
|
||||
podSandboxConfig *runtimeapi.PodSandboxConfig
|
||||
}
|
||||
|
||||
func (sip *serialImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult, podSandboxConfig *runtimeapi.PodSandboxConfig) {
|
||||
func (sip *serialImagePuller) pullImage(ctx context.Context, spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult, podSandboxConfig *runtimeapi.PodSandboxConfig) {
|
||||
sip.pullRequests <- &imagePullRequest{
|
||||
ctx: ctx,
|
||||
spec: spec,
|
||||
pullSecrets: pullSecrets,
|
||||
pullChan: pullChan,
|
||||
@ -86,7 +89,7 @@ func (sip *serialImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecret
|
||||
|
||||
func (sip *serialImagePuller) processImagePullRequests() {
|
||||
for pullRequest := range sip.pullRequests {
|
||||
imageRef, err := sip.imageService.PullImage(pullRequest.spec, pullRequest.pullSecrets, pullRequest.podSandboxConfig)
|
||||
imageRef, err := sip.imageService.PullImage(pullRequest.ctx, pullRequest.spec, pullRequest.pullSecrets, pullRequest.podSandboxConfig)
|
||||
pullRequest.pullChan <- pullResult{
|
||||
imageRef: imageRef,
|
||||
err: err,
|
||||
|
@ -17,9 +17,10 @@ limitations under the License.
|
||||
package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
)
|
||||
|
||||
@ -50,7 +51,7 @@ var (
|
||||
// Implementations are expected to be thread safe.
|
||||
type ImageManager interface {
|
||||
// EnsureImageExists ensures that image specified in `container` exists.
|
||||
EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, string, error)
|
||||
EnsureImageExists(ctx context.Context, pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, string, error)
|
||||
|
||||
// TODO(ronl): consolidating image managing and deleting operation in this interface
|
||||
}
|
||||
|
@ -197,7 +197,7 @@ type SyncHandler interface {
|
||||
HandlePodRemoves(pods []*v1.Pod)
|
||||
HandlePodReconcile(pods []*v1.Pod)
|
||||
HandlePodSyncs(pods []*v1.Pod)
|
||||
HandlePodCleanups() error
|
||||
HandlePodCleanups(ctx context.Context) error
|
||||
}
|
||||
|
||||
// Option is a functional option type for Kubelet
|
||||
@ -340,6 +340,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
nodeStatusMaxImages int32,
|
||||
seccompDefault bool,
|
||||
) (*Kubelet, error) {
|
||||
ctx := context.Background()
|
||||
logger := klog.TODO()
|
||||
|
||||
if rootDirectory == "" {
|
||||
@ -701,7 +702,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
klet.pleg = pleg.NewGenericPLEG(klet.containerRuntime, plegChannelCapacity, plegRelistPeriod, klet.podCache, clock.RealClock{})
|
||||
klet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
|
||||
klet.runtimeState.addHealthCheck("PLEG", klet.pleg.Healthy)
|
||||
if _, err := klet.updatePodCIDR(kubeCfg.PodCIDR); err != nil {
|
||||
if _, err := klet.updatePodCIDR(ctx, kubeCfg.PodCIDR); err != nil {
|
||||
klog.ErrorS(err, "Pod CIDR update failed")
|
||||
}
|
||||
|
||||
@ -1123,7 +1124,7 @@ type Kubelet struct {
|
||||
clock clock.WithTicker
|
||||
|
||||
// handlers called during the tryUpdateNodeStatus cycle
|
||||
setNodeStatusFuncs []func(*v1.Node) error
|
||||
setNodeStatusFuncs []func(context.Context, *v1.Node) error
|
||||
|
||||
lastNodeUnschedulableLock sync.Mutex
|
||||
// maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
|
||||
@ -1199,23 +1200,23 @@ type Kubelet struct {
|
||||
}
|
||||
|
||||
// ListPodStats is delegated to StatsProvider, which implements stats.Provider interface
|
||||
func (kl *Kubelet) ListPodStats() ([]statsapi.PodStats, error) {
|
||||
return kl.StatsProvider.ListPodStats()
|
||||
func (kl *Kubelet) ListPodStats(ctx context.Context) ([]statsapi.PodStats, error) {
|
||||
return kl.StatsProvider.ListPodStats(ctx)
|
||||
}
|
||||
|
||||
// ListPodCPUAndMemoryStats is delegated to StatsProvider, which implements stats.Provider interface
|
||||
func (kl *Kubelet) ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error) {
|
||||
return kl.StatsProvider.ListPodCPUAndMemoryStats()
|
||||
func (kl *Kubelet) ListPodCPUAndMemoryStats(ctx context.Context) ([]statsapi.PodStats, error) {
|
||||
return kl.StatsProvider.ListPodCPUAndMemoryStats(ctx)
|
||||
}
|
||||
|
||||
// ListPodStatsAndUpdateCPUNanoCoreUsage is delegated to StatsProvider, which implements stats.Provider interface
|
||||
func (kl *Kubelet) ListPodStatsAndUpdateCPUNanoCoreUsage() ([]statsapi.PodStats, error) {
|
||||
return kl.StatsProvider.ListPodStatsAndUpdateCPUNanoCoreUsage()
|
||||
func (kl *Kubelet) ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]statsapi.PodStats, error) {
|
||||
return kl.StatsProvider.ListPodStatsAndUpdateCPUNanoCoreUsage(ctx)
|
||||
}
|
||||
|
||||
// ImageFsStats is delegated to StatsProvider, which implements stats.Provider interface
|
||||
func (kl *Kubelet) ImageFsStats() (*statsapi.FsStats, error) {
|
||||
return kl.StatsProvider.ImageFsStats()
|
||||
func (kl *Kubelet) ImageFsStats(ctx context.Context) (*statsapi.FsStats, error) {
|
||||
return kl.StatsProvider.ImageFsStats(ctx)
|
||||
}
|
||||
|
||||
// GetCgroupStats is delegated to StatsProvider, which implements stats.Provider interface
|
||||
@ -1234,8 +1235,8 @@ func (kl *Kubelet) RootFsStats() (*statsapi.FsStats, error) {
|
||||
}
|
||||
|
||||
// GetContainerInfo is delegated to StatsProvider, which implements stats.Provider interface
|
||||
func (kl *Kubelet) GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) {
|
||||
return kl.StatsProvider.GetContainerInfo(podFullName, uid, containerName, req)
|
||||
func (kl *Kubelet) GetContainerInfo(ctx context.Context, podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) {
|
||||
return kl.StatsProvider.GetContainerInfo(ctx, podFullName, uid, containerName, req)
|
||||
}
|
||||
|
||||
// GetRawContainerInfo is delegated to StatsProvider, which implements stats.Provider interface
|
||||
@ -1298,7 +1299,8 @@ func (kl *Kubelet) setupDataDirs() error {
|
||||
func (kl *Kubelet) StartGarbageCollection() {
|
||||
loggedContainerGCFailure := false
|
||||
go wait.Until(func() {
|
||||
if err := kl.containerGC.GarbageCollect(); err != nil {
|
||||
ctx := context.Background()
|
||||
if err := kl.containerGC.GarbageCollect(ctx); err != nil {
|
||||
klog.ErrorS(err, "Container garbage collection failed")
|
||||
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.ContainerGCFailed, err.Error())
|
||||
loggedContainerGCFailure = true
|
||||
@ -1321,7 +1323,8 @@ func (kl *Kubelet) StartGarbageCollection() {
|
||||
|
||||
prevImageGCFailed := false
|
||||
go wait.Until(func() {
|
||||
if err := kl.imageManager.GarbageCollect(); err != nil {
|
||||
ctx := context.Background()
|
||||
if err := kl.imageManager.GarbageCollect(ctx); err != nil {
|
||||
if prevImageGCFailed {
|
||||
klog.ErrorS(err, "Image garbage collection failed multiple times in a row")
|
||||
// Only create an event for repeated failures
|
||||
@ -1433,6 +1436,7 @@ func (kl *Kubelet) initializeRuntimeDependentModules() {
|
||||
|
||||
// Run starts the kubelet reacting to config updates
|
||||
func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
||||
ctx := context.Background()
|
||||
if kl.logServer == nil {
|
||||
kl.logServer = http.StripPrefix("/logs/", http.FileServer(http.Dir("/var/log/")))
|
||||
}
|
||||
@ -1481,7 +1485,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
||||
|
||||
// Start the pod lifecycle event generator.
|
||||
kl.pleg.Start()
|
||||
kl.syncLoop(updates, kl)
|
||||
kl.syncLoop(ctx, updates, kl)
|
||||
}
|
||||
|
||||
// syncPod is the transaction script for the sync of a single pod (setting up)
|
||||
@ -1532,7 +1536,10 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
||||
// This operation writes all events that are dispatched in order to provide
|
||||
// the most accurate information possible about an error situation to aid debugging.
|
||||
// Callers should not write an event if this operation returns an error.
|
||||
func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (isTerminal bool, err error) {
|
||||
func (kl *Kubelet) syncPod(_ context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (isTerminal bool, err error) {
|
||||
// TODO(#113606): connect this with the incoming context parameter, which comes from the pod worker.
|
||||
// Currently, using that context causes test failures.
|
||||
ctx := context.TODO()
|
||||
klog.V(4).InfoS("syncPod enter", "pod", klog.KObj(pod), "podUID", pod.UID)
|
||||
defer func() {
|
||||
klog.V(4).InfoS("syncPod exit", "pod", klog.KObj(pod), "podUID", pod.UID, "isTerminal", isTerminal)
|
||||
@ -1619,7 +1626,7 @@ func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType
|
||||
klog.V(2).InfoS("Pod is not runnable and must have running containers stopped", "pod", klog.KObj(pod), "podUID", pod.UID, "message", runnable.Message)
|
||||
var syncErr error
|
||||
p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus)
|
||||
if err := kl.killPod(pod, p, nil); err != nil {
|
||||
if err := kl.killPod(ctx, pod, p, nil); err != nil {
|
||||
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err)
|
||||
syncErr = fmt.Errorf("error killing pod: %v", err)
|
||||
utilruntime.HandleError(syncErr)
|
||||
@ -1671,7 +1678,7 @@ func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType
|
||||
podKilled := false
|
||||
if !pcm.Exists(pod) && !firstSync {
|
||||
p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus)
|
||||
if err := kl.killPod(pod, p, nil); err == nil {
|
||||
if err := kl.killPod(ctx, pod, p, nil); err == nil {
|
||||
podKilled = true
|
||||
} else {
|
||||
klog.ErrorS(err, "KillPod failed", "pod", klog.KObj(pod), "podStatus", podStatus)
|
||||
@ -1753,7 +1760,7 @@ func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType
|
||||
kl.probeManager.AddPod(pod)
|
||||
|
||||
// Call the container runtime's SyncPod callback
|
||||
result := kl.containerRuntime.SyncPod(pod, podStatus, pullSecrets, kl.backOff)
|
||||
result := kl.containerRuntime.SyncPod(ctx, pod, podStatus, pullSecrets, kl.backOff)
|
||||
kl.reasonCache.Update(pod.UID, result)
|
||||
if err := result.Error(); err != nil {
|
||||
// Do not return error if the only failures were pods in backoff
|
||||
@ -1774,7 +1781,10 @@ func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType
|
||||
// syncTerminatingPod is expected to terminate all running containers in a pod. Once this method
|
||||
// returns without error, the pod's local state can be safely cleaned up. If runningPod is passed,
|
||||
// we perform no status updates.
|
||||
func (kl *Kubelet) syncTerminatingPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, runningPod *kubecontainer.Pod, gracePeriod *int64, podStatusFn func(*v1.PodStatus)) error {
|
||||
func (kl *Kubelet) syncTerminatingPod(_ context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, runningPod *kubecontainer.Pod, gracePeriod *int64, podStatusFn func(*v1.PodStatus)) error {
|
||||
// TODO(#113606): connect this with the incoming context parameter, which comes from the pod worker.
|
||||
// Currently, using that context causes test failures.
|
||||
ctx := context.Background()
|
||||
klog.V(4).InfoS("syncTerminatingPod enter", "pod", klog.KObj(pod), "podUID", pod.UID)
|
||||
defer klog.V(4).InfoS("syncTerminatingPod exit", "pod", klog.KObj(pod), "podUID", pod.UID)
|
||||
|
||||
@ -1788,7 +1798,7 @@ func (kl *Kubelet) syncTerminatingPod(ctx context.Context, pod *v1.Pod, podStatu
|
||||
} else {
|
||||
klog.V(4).InfoS("Pod terminating with grace period", "pod", klog.KObj(pod), "podUID", pod.UID, "gracePeriod", nil)
|
||||
}
|
||||
if err := kl.killPod(pod, *runningPod, gracePeriod); err != nil {
|
||||
if err := kl.killPod(ctx, pod, *runningPod, gracePeriod); err != nil {
|
||||
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err)
|
||||
// there was an error killing the pod, so we return that error directly
|
||||
utilruntime.HandleError(err)
|
||||
@ -1813,7 +1823,7 @@ func (kl *Kubelet) syncTerminatingPod(ctx context.Context, pod *v1.Pod, podStatu
|
||||
kl.probeManager.StopLivenessAndStartup(pod)
|
||||
|
||||
p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus)
|
||||
if err := kl.killPod(pod, p, gracePeriod); err != nil {
|
||||
if err := kl.killPod(ctx, pod, p, gracePeriod); err != nil {
|
||||
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err)
|
||||
// there was an error killing the pod, so we return that error directly
|
||||
utilruntime.HandleError(err)
|
||||
@ -1831,7 +1841,7 @@ func (kl *Kubelet) syncTerminatingPod(ctx context.Context, pod *v1.Pod, podStatu
|
||||
// catch race conditions introduced by callers updating pod status out of order.
|
||||
// TODO: have KillPod return the terminal status of stopped containers and write that into the
|
||||
// cache immediately
|
||||
podStatus, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err := kl.containerRuntime.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Unable to read pod status prior to final pod termination", "pod", klog.KObj(pod), "podUID", pod.UID)
|
||||
return err
|
||||
@ -2019,7 +2029,7 @@ func (kl *Kubelet) canRunPod(pod *v1.Pod) lifecycle.PodAdmitResult {
|
||||
// any new change seen, will run a sync against desired state and running state. If
|
||||
// no changes are seen to the configuration, will synchronize the last known desired
|
||||
// state every sync-frequency seconds. Never returns.
|
||||
func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHandler) {
|
||||
func (kl *Kubelet) syncLoop(ctx context.Context, updates <-chan kubetypes.PodUpdate, handler SyncHandler) {
|
||||
klog.InfoS("Starting kubelet main sync loop")
|
||||
// The syncTicker wakes up kubelet to checks if there are any pod workers
|
||||
// that need to be sync'd. A one-second period is sufficient because the
|
||||
@ -2054,7 +2064,7 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand
|
||||
duration = base
|
||||
|
||||
kl.syncLoopMonitor.Store(kl.clock.Now())
|
||||
if !kl.syncLoopIteration(updates, handler, syncTicker.C, housekeepingTicker.C, plegCh) {
|
||||
if !kl.syncLoopIteration(ctx, updates, handler, syncTicker.C, housekeepingTicker.C, plegCh) {
|
||||
break
|
||||
}
|
||||
kl.syncLoopMonitor.Store(kl.clock.Now())
|
||||
@ -2093,7 +2103,7 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand
|
||||
// - housekeepingCh: trigger cleanup of pods
|
||||
// - health manager: sync pods that have failed or in which one or more
|
||||
// containers have failed health checks
|
||||
func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handler SyncHandler,
|
||||
func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan kubetypes.PodUpdate, handler SyncHandler,
|
||||
syncCh <-chan time.Time, housekeepingCh <-chan time.Time, plegCh <-chan *pleg.PodLifecycleEvent) bool {
|
||||
select {
|
||||
case u, open := <-configCh:
|
||||
@ -2189,7 +2199,7 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
|
||||
} else {
|
||||
start := time.Now()
|
||||
klog.V(4).InfoS("SyncLoop (housekeeping)")
|
||||
if err := handler.HandlePodCleanups(); err != nil {
|
||||
if err := handler.HandlePodCleanups(ctx); err != nil {
|
||||
klog.ErrorS(err, "Failed cleaning pods")
|
||||
}
|
||||
duration := time.Since(start)
|
||||
@ -2363,8 +2373,9 @@ func (kl *Kubelet) LatestLoopEntryTime() time.Time {
|
||||
func (kl *Kubelet) updateRuntimeUp() {
|
||||
kl.updateRuntimeMux.Lock()
|
||||
defer kl.updateRuntimeMux.Unlock()
|
||||
ctx := context.Background()
|
||||
|
||||
s, err := kl.containerRuntime.Status()
|
||||
s, err := kl.containerRuntime.Status(ctx)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Container runtime sanity check failed")
|
||||
return
|
||||
@ -2448,6 +2459,7 @@ func (kl *Kubelet) cleanUpContainersInPod(podID types.UID, exitedContainerID str
|
||||
// Function is executed only during Kubelet start which improves latency to ready node by updating
|
||||
// pod CIDR, runtime status and node statuses ASAP.
|
||||
func (kl *Kubelet) fastStatusUpdateOnce() {
|
||||
ctx := context.Background()
|
||||
for {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
node, err := kl.GetNode()
|
||||
@ -2457,7 +2469,7 @@ func (kl *Kubelet) fastStatusUpdateOnce() {
|
||||
}
|
||||
if len(node.Spec.PodCIDRs) != 0 {
|
||||
podCIDRs := strings.Join(node.Spec.PodCIDRs, ",")
|
||||
if _, err := kl.updatePodCIDR(podCIDRs); err != nil {
|
||||
if _, err := kl.updatePodCIDR(ctx, podCIDRs); err != nil {
|
||||
klog.ErrorS(err, "Pod CIDR update failed", "CIDR", podCIDRs)
|
||||
continue
|
||||
}
|
||||
@ -2474,12 +2486,13 @@ func (kl *Kubelet) fastStatusUpdateOnce() {
|
||||
// engine will be asked to checkpoint the given container into the kubelet's default
|
||||
// checkpoint directory.
|
||||
func (kl *Kubelet) CheckpointContainer(
|
||||
ctx context.Context,
|
||||
podUID types.UID,
|
||||
podFullName,
|
||||
containerName string,
|
||||
options *runtimeapi.CheckpointContainerRequest,
|
||||
) error {
|
||||
container, err := kl.findContainer(podFullName, podUID, containerName)
|
||||
container, err := kl.findContainer(ctx, podFullName, podUID, containerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -2499,7 +2512,7 @@ func (kl *Kubelet) CheckpointContainer(
|
||||
|
||||
options.ContainerId = string(container.ID.ID)
|
||||
|
||||
if err := kl.containerRuntime.CheckpointContainer(options); err != nil {
|
||||
if err := kl.containerRuntime.CheckpointContainer(ctx, options); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -191,8 +191,8 @@ func (kl *Kubelet) GetPods() []*v1.Pod {
|
||||
// container runtime cache. This function converts kubecontainer.Pod to
|
||||
// v1.Pod, so only the fields that exist in both kubecontainer.Pod and
|
||||
// v1.Pod are considered meaningful.
|
||||
func (kl *Kubelet) GetRunningPods() ([]*v1.Pod, error) {
|
||||
pods, err := kl.runtimeCache.GetPods()
|
||||
func (kl *Kubelet) GetRunningPods(ctx context.Context) ([]*v1.Pod, error) {
|
||||
pods, err := kl.runtimeCache.GetPods(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -17,9 +17,10 @@ limitations under the License.
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
@ -40,7 +41,7 @@ func (kl *Kubelet) providerRequiresNetworkingConfiguration() bool {
|
||||
|
||||
// updatePodCIDR updates the pod CIDR in the runtime state if it is different
|
||||
// from the current CIDR. Return true if pod CIDR is actually changed.
|
||||
func (kl *Kubelet) updatePodCIDR(cidr string) (bool, error) {
|
||||
func (kl *Kubelet) updatePodCIDR(ctx context.Context, cidr string) (bool, error) {
|
||||
kl.updatePodCIDRMux.Lock()
|
||||
defer kl.updatePodCIDRMux.Unlock()
|
||||
|
||||
@ -52,7 +53,7 @@ func (kl *Kubelet) updatePodCIDR(cidr string) (bool, error) {
|
||||
|
||||
// kubelet -> generic runtime -> runtime shim -> network plugin
|
||||
// docker/non-cri implementations have a passthrough UpdatePodCIDR
|
||||
if err := kl.getRuntime().UpdatePodCIDR(cidr); err != nil {
|
||||
if err := kl.getRuntime().UpdatePodCIDR(ctx, cidr); err != nil {
|
||||
// If updatePodCIDR would fail, theoretically pod CIDR could not change.
|
||||
// But it is better to be on the safe side to still return true here.
|
||||
return true, fmt.Errorf("failed to update pod CIDR: %v", err)
|
||||
|
@ -424,7 +424,7 @@ func (kl *Kubelet) initialNode(ctx context.Context) (*v1.Node, error) {
|
||||
}
|
||||
}
|
||||
|
||||
kl.setNodeStatus(node)
|
||||
kl.setNodeStatus(ctx, node)
|
||||
|
||||
return node, nil
|
||||
}
|
||||
@ -435,6 +435,7 @@ func (kl *Kubelet) initialNode(ctx context.Context) (*v1.Node, error) {
|
||||
func (kl *Kubelet) syncNodeStatus() {
|
||||
kl.syncNodeStatusMux.Lock()
|
||||
defer kl.syncNodeStatusMux.Unlock()
|
||||
ctx := context.Background()
|
||||
|
||||
if kl.kubeClient == nil || kl.heartbeatClient == nil {
|
||||
return
|
||||
@ -443,17 +444,17 @@ func (kl *Kubelet) syncNodeStatus() {
|
||||
// This will exit immediately if it doesn't need to do anything.
|
||||
kl.registerWithAPIServer()
|
||||
}
|
||||
if err := kl.updateNodeStatus(); err != nil {
|
||||
if err := kl.updateNodeStatus(ctx); err != nil {
|
||||
klog.ErrorS(err, "Unable to update node status")
|
||||
}
|
||||
}
|
||||
|
||||
// updateNodeStatus updates node status to master with retries if there is any
|
||||
// change or enough time passed from the last sync.
|
||||
func (kl *Kubelet) updateNodeStatus() error {
|
||||
func (kl *Kubelet) updateNodeStatus(ctx context.Context) error {
|
||||
klog.V(5).InfoS("Updating node status")
|
||||
for i := 0; i < nodeStatusUpdateRetry; i++ {
|
||||
if err := kl.tryUpdateNodeStatus(i); err != nil {
|
||||
if err := kl.tryUpdateNodeStatus(ctx, i); err != nil {
|
||||
if i > 0 && kl.onRepeatedHeartbeatFailure != nil {
|
||||
kl.onRepeatedHeartbeatFailure()
|
||||
}
|
||||
@ -467,7 +468,7 @@ func (kl *Kubelet) updateNodeStatus() error {
|
||||
|
||||
// tryUpdateNodeStatus tries to update node status to master if there is any
|
||||
// change or enough time passed from the last sync.
|
||||
func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
|
||||
func (kl *Kubelet) tryUpdateNodeStatus(ctx context.Context, tryNumber int) error {
|
||||
// In large clusters, GET and PUT operations on Node objects coming
|
||||
// from here are the majority of load on apiserver and etcd.
|
||||
// To reduce the load on etcd, we are serving GET operations from
|
||||
@ -478,7 +479,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
|
||||
if tryNumber == 0 {
|
||||
util.FromApiserverCache(&opts)
|
||||
}
|
||||
node, err := kl.heartbeatClient.CoreV1().Nodes().Get(context.TODO(), string(kl.nodeName), opts)
|
||||
node, err := kl.heartbeatClient.CoreV1().Nodes().Get(ctx, string(kl.nodeName), opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting node %q: %v", kl.nodeName, err)
|
||||
}
|
||||
@ -494,7 +495,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
|
||||
// node.Spec.PodCIDR being non-empty. We also need to know if pod CIDR is
|
||||
// actually changed.
|
||||
podCIDRs := strings.Join(node.Spec.PodCIDRs, ",")
|
||||
if podCIDRChanged, err = kl.updatePodCIDR(podCIDRs); err != nil {
|
||||
if podCIDRChanged, err = kl.updatePodCIDR(ctx, podCIDRs); err != nil {
|
||||
klog.ErrorS(err, "Error updating pod CIDR")
|
||||
}
|
||||
}
|
||||
@ -518,7 +519,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
|
||||
areRequiredLabelsNotPresent = true
|
||||
}
|
||||
|
||||
kl.setNodeStatus(node)
|
||||
kl.setNodeStatus(ctx, node)
|
||||
|
||||
now := kl.clock.Now()
|
||||
if now.Before(kl.lastStatusReportTime.Add(kl.nodeStatusReportFrequency)) {
|
||||
@ -570,7 +571,7 @@ func (kl *Kubelet) recordEvent(eventType, event, message string) {
|
||||
}
|
||||
|
||||
// record if node schedulable change.
|
||||
func (kl *Kubelet) recordNodeSchedulableEvent(node *v1.Node) error {
|
||||
func (kl *Kubelet) recordNodeSchedulableEvent(ctx context.Context, node *v1.Node) error {
|
||||
kl.lastNodeUnschedulableLock.Lock()
|
||||
defer kl.lastNodeUnschedulableLock.Unlock()
|
||||
if kl.lastNodeUnschedulable != node.Spec.Unschedulable {
|
||||
@ -588,10 +589,10 @@ func (kl *Kubelet) recordNodeSchedulableEvent(node *v1.Node) error {
|
||||
// any fields that are currently set.
|
||||
// TODO(madhusudancs): Simplify the logic for setting node conditions and
|
||||
// refactor the node status condition code out to a different file.
|
||||
func (kl *Kubelet) setNodeStatus(node *v1.Node) {
|
||||
func (kl *Kubelet) setNodeStatus(ctx context.Context, node *v1.Node) {
|
||||
for i, f := range kl.setNodeStatusFuncs {
|
||||
klog.V(5).InfoS("Setting node status condition code", "position", i, "node", klog.KObj(node))
|
||||
if err := f(node); err != nil {
|
||||
if err := f(ctx, node); err != nil {
|
||||
klog.ErrorS(err, "Failed to set some node status fields", "node", klog.KObj(node))
|
||||
}
|
||||
}
|
||||
@ -610,7 +611,7 @@ func (kl *Kubelet) getLastObservedNodeAddresses() []v1.NodeAddress {
|
||||
|
||||
// defaultNodeStatusFuncs is a factory that generates the default set of
|
||||
// setNodeStatus funcs
|
||||
func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error {
|
||||
func (kl *Kubelet) defaultNodeStatusFuncs() []func(context.Context, *v1.Node) error {
|
||||
// if cloud is not nil, we expect the cloud resource sync manager to exist
|
||||
var nodeAddressesFunc func() ([]v1.NodeAddress, error)
|
||||
if kl.cloud != nil {
|
||||
@ -620,7 +621,7 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error {
|
||||
if kl.appArmorValidator != nil {
|
||||
validateHostFunc = kl.appArmorValidator.ValidateHost
|
||||
}
|
||||
var setters []func(n *v1.Node) error
|
||||
var setters []func(ctx context.Context, n *v1.Node) error
|
||||
setters = append(setters,
|
||||
nodestatus.NodeAddress(kl.nodeIPs, kl.nodeIPValidator, kl.hostname, kl.hostnameOverridden, kl.externalCloudProvider, kl.cloud, nodeAddressesFunc),
|
||||
nodestatus.MachineInfo(string(kl.nodeName), kl.maxPods, kl.podsPerCore, kl.GetCachedMachineInfo, kl.containerManager.GetCapacity,
|
||||
|
@ -177,6 +177,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// generate one more in inputImageList than we configure the Kubelet to report,
|
||||
// or 5 images if unlimited
|
||||
numTestImages := int(tc.nodeStatusMaxImages) + 1
|
||||
@ -290,7 +291,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
kubelet.updateRuntimeUp()
|
||||
assert.NoError(t, kubelet.updateNodeStatus())
|
||||
assert.NoError(t, kubelet.updateNodeStatus(ctx))
|
||||
actions := kubeClient.Actions()
|
||||
require.Len(t, actions, 2)
|
||||
require.True(t, actions[1].Matches("patch", "nodes"))
|
||||
@ -315,6 +316,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
@ -478,7 +480,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
kubelet.updateRuntimeUp()
|
||||
assert.NoError(t, kubelet.updateNodeStatus())
|
||||
assert.NoError(t, kubelet.updateNodeStatus(ctx))
|
||||
|
||||
actions := kubeClient.Actions()
|
||||
assert.Len(t, actions, 2)
|
||||
@ -506,6 +508,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateExistingNodeStatusTimeout(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
@ -559,7 +562,7 @@ func TestUpdateExistingNodeStatusTimeout(t *testing.T) {
|
||||
}
|
||||
|
||||
// should return an error, but not hang
|
||||
assert.Error(t, kubelet.updateNodeStatus())
|
||||
assert.Error(t, kubelet.updateNodeStatus(ctx))
|
||||
|
||||
// should have attempted multiple times
|
||||
if actualAttempts := atomic.LoadInt64(&attempts); actualAttempts < nodeStatusUpdateRetry {
|
||||
@ -572,6 +575,7 @@ func TestUpdateExistingNodeStatusTimeout(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
@ -681,13 +685,13 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
||||
|
||||
checkNodeStatus := func(status v1.ConditionStatus, reason string) {
|
||||
kubeClient.ClearActions()
|
||||
assert.NoError(t, kubelet.updateNodeStatus())
|
||||
assert.NoError(t, kubelet.updateNodeStatus(ctx))
|
||||
actions := kubeClient.Actions()
|
||||
require.Len(t, actions, 2)
|
||||
require.True(t, actions[1].Matches("patch", "nodes"))
|
||||
require.Equal(t, actions[1].GetSubresource(), "status")
|
||||
|
||||
updatedNode, err := kubeClient.CoreV1().Nodes().Get(context.TODO(), testKubeletHostname, metav1.GetOptions{})
|
||||
updatedNode, err := kubeClient.CoreV1().Nodes().Get(ctx, testKubeletHostname, metav1.GetOptions{})
|
||||
require.NoError(t, err, "can't apply node status patch")
|
||||
|
||||
for i, cond := range updatedNode.Status.Conditions {
|
||||
@ -781,17 +785,19 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateNodeStatusError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||
// No matching node for the kubelet
|
||||
testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain
|
||||
assert.Error(t, kubelet.updateNodeStatus())
|
||||
assert.Error(t, kubelet.updateNodeStatus(ctx))
|
||||
assert.Len(t, testKubelet.fakeKubeClient.Actions(), nodeStatusUpdateRetry)
|
||||
}
|
||||
|
||||
func TestUpdateNodeStatusWithLease(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
clock := testKubelet.fakeClock
|
||||
@ -911,7 +917,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
|
||||
// Update node status when node status is created.
|
||||
// Report node status.
|
||||
kubelet.updateRuntimeUp()
|
||||
assert.NoError(t, kubelet.updateNodeStatus())
|
||||
assert.NoError(t, kubelet.updateNodeStatus(ctx))
|
||||
|
||||
actions := kubeClient.Actions()
|
||||
assert.Len(t, actions, 2)
|
||||
@ -934,7 +940,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
|
||||
// Update node status again when nothing is changed (except heartbeat time).
|
||||
// Report node status if it has exceeded the duration of nodeStatusReportFrequency.
|
||||
clock.Step(time.Minute)
|
||||
assert.NoError(t, kubelet.updateNodeStatus())
|
||||
assert.NoError(t, kubelet.updateNodeStatus(ctx))
|
||||
|
||||
// 2 more action (There were 2 actions before).
|
||||
actions = kubeClient.Actions()
|
||||
@ -959,7 +965,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
|
||||
// Update node status again when nothing is changed (except heartbeat time).
|
||||
// Do not report node status if it is within the duration of nodeStatusReportFrequency.
|
||||
clock.Step(10 * time.Second)
|
||||
assert.NoError(t, kubelet.updateNodeStatus())
|
||||
assert.NoError(t, kubelet.updateNodeStatus(ctx))
|
||||
|
||||
// Only 1 more action (There were 4 actions before).
|
||||
actions = kubeClient.Actions()
|
||||
@ -977,7 +983,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
|
||||
newMachineInfo := oldMachineInfo.Clone()
|
||||
newMachineInfo.MemoryCapacity = uint64(newMemoryCapacity)
|
||||
kubelet.setCachedMachineInfo(newMachineInfo)
|
||||
assert.NoError(t, kubelet.updateNodeStatus())
|
||||
assert.NoError(t, kubelet.updateNodeStatus(ctx))
|
||||
|
||||
// 2 more action (There were 5 actions before).
|
||||
actions = kubeClient.Actions()
|
||||
@ -1009,7 +1015,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
|
||||
updatedNode.Spec.PodCIDR = podCIDRs[0]
|
||||
updatedNode.Spec.PodCIDRs = podCIDRs
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*updatedNode}}).ReactionChain
|
||||
assert.NoError(t, kubelet.updateNodeStatus())
|
||||
assert.NoError(t, kubelet.updateNodeStatus(ctx))
|
||||
assert.Equal(t, strings.Join(podCIDRs, ","), kubelet.runtimeState.podCIDR(), "Pod CIDR should be updated now")
|
||||
// 2 more action (There were 7 actions before).
|
||||
actions = kubeClient.Actions()
|
||||
@ -1022,7 +1028,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
|
||||
clock.Step(10 * time.Second)
|
||||
assert.Equal(t, strings.Join(podCIDRs, ","), kubelet.runtimeState.podCIDR(), "Pod CIDR should already be updated")
|
||||
|
||||
assert.NoError(t, kubelet.updateNodeStatus())
|
||||
assert.NoError(t, kubelet.updateNodeStatus(ctx))
|
||||
// Only 1 more action (There were 9 actions before).
|
||||
actions = kubeClient.Actions()
|
||||
assert.Len(t, actions, 10)
|
||||
@ -1078,6 +1084,7 @@ func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Setup
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
@ -1094,7 +1101,7 @@ func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
|
||||
kubelet.volumeManager = fakeVolumeManager
|
||||
|
||||
// Only test VolumesInUse setter
|
||||
kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
|
||||
kubelet.setNodeStatusFuncs = []func(context.Context, *v1.Node) error{
|
||||
nodestatus.VolumesInUse(kubelet.volumeManager.ReconcilerStatesHasBeenSynced,
|
||||
kubelet.volumeManager.GetVolumesInUse),
|
||||
}
|
||||
@ -1103,7 +1110,7 @@ func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*tc.existingNode}}).ReactionChain
|
||||
|
||||
// Execute
|
||||
assert.NoError(t, kubelet.updateNodeStatus())
|
||||
assert.NoError(t, kubelet.updateNodeStatus(ctx))
|
||||
|
||||
// Validate
|
||||
actions := kubeClient.Actions()
|
||||
@ -1345,6 +1352,7 @@ func TestTryRegisterWithApiServer(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
const nodeStatusMaxImages = 5
|
||||
|
||||
// generate one more in inputImageList than we configure the Kubelet to report
|
||||
@ -1403,7 +1411,7 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
|
||||
}
|
||||
|
||||
kubelet.updateRuntimeUp()
|
||||
assert.NoError(t, kubelet.updateNodeStatus())
|
||||
assert.NoError(t, kubelet.updateNodeStatus(ctx))
|
||||
actions := kubeClient.Actions()
|
||||
require.Len(t, actions, 2)
|
||||
require.True(t, actions[1].Matches("patch", "nodes"))
|
||||
@ -2817,6 +2825,7 @@ func TestUpdateNodeAddresses(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
oldNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: v1.NodeSpec{},
|
||||
@ -2832,15 +2841,15 @@ func TestUpdateNodeAddresses(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
_, err := kubeClient.CoreV1().Nodes().Update(context.TODO(), oldNode, metav1.UpdateOptions{})
|
||||
_, err := kubeClient.CoreV1().Nodes().Update(ctx, oldNode, metav1.UpdateOptions{})
|
||||
assert.NoError(t, err)
|
||||
kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
|
||||
func(node *v1.Node) error {
|
||||
kubelet.setNodeStatusFuncs = []func(context.Context, *v1.Node) error{
|
||||
func(_ context.Context, node *v1.Node) error {
|
||||
node.Status.Addresses = expectedNode.Status.Addresses
|
||||
return nil
|
||||
},
|
||||
}
|
||||
assert.NoError(t, kubelet.updateNodeStatus())
|
||||
assert.NoError(t, kubelet.updateNodeStatus(ctx))
|
||||
|
||||
actions := kubeClient.Actions()
|
||||
lastAction := actions[len(actions)-1]
|
||||
|
@ -466,7 +466,7 @@ func (kl *Kubelet) GetPodCgroupParent(pod *v1.Pod) string {
|
||||
|
||||
// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by
|
||||
// the container runtime to set parameters for launching a container.
|
||||
func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) {
|
||||
func (kl *Kubelet) GenerateRunContainerOptions(ctx context.Context, pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) {
|
||||
opts, err := kl.containerManager.GetResources(pod, container)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@ -519,7 +519,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai
|
||||
|
||||
// only do this check if the experimental behavior is enabled, otherwise allow it to default to false
|
||||
if kl.experimentalHostUserNamespaceDefaulting {
|
||||
opts.EnableHostUserNamespace = kl.enableHostUserNamespace(pod)
|
||||
opts.EnableHostUserNamespace = kl.enableHostUserNamespace(ctx, pod)
|
||||
}
|
||||
|
||||
return opts, cleanupAction, nil
|
||||
@ -854,9 +854,9 @@ func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, co
|
||||
// killPod instructs the container runtime to kill the pod. This method requires that
|
||||
// the pod status contains the result of the last syncPod, otherwise it may fail to
|
||||
// terminate newly created containers and sandboxes.
|
||||
func (kl *Kubelet) killPod(pod *v1.Pod, p kubecontainer.Pod, gracePeriodOverride *int64) error {
|
||||
func (kl *Kubelet) killPod(ctx context.Context, pod *v1.Pod, p kubecontainer.Pod, gracePeriodOverride *int64) error {
|
||||
// Call the container runtime KillPod method which stops all known running containers of the pod
|
||||
if err := kl.containerRuntime.KillPod(pod, p, gracePeriodOverride); err != nil {
|
||||
if err := kl.containerRuntime.KillPod(ctx, pod, p, gracePeriodOverride); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := kl.containerManager.UpdateQOSCgroups(); err != nil {
|
||||
@ -1054,7 +1054,7 @@ func (kl *Kubelet) deleteOrphanedMirrorPods() {
|
||||
// is executing which means no new pods can appear.
|
||||
// NOTE: This function is executed by the main sync loop, so it
|
||||
// should not contain any blocking calls.
|
||||
func (kl *Kubelet) HandlePodCleanups() error {
|
||||
func (kl *Kubelet) HandlePodCleanups(ctx context.Context) error {
|
||||
// The kubelet lacks checkpointing, so we need to introspect the set of pods
|
||||
// in the cgroup tree prior to inspecting the set of pods in our pod manager.
|
||||
// this ensures our view of the cgroup tree does not mistakenly observe pods
|
||||
@ -1118,7 +1118,7 @@ func (kl *Kubelet) HandlePodCleanups() error {
|
||||
|
||||
// Terminate any pods that are observed in the runtime but not
|
||||
// present in the list of known running pods from config.
|
||||
runningRuntimePods, err := kl.runtimeCache.GetPods()
|
||||
runningRuntimePods, err := kl.runtimeCache.GetPods(ctx)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error listing containers")
|
||||
return err
|
||||
@ -1156,7 +1156,7 @@ func (kl *Kubelet) HandlePodCleanups() error {
|
||||
// in the cache. We need to bypass the cache to get the latest set of
|
||||
// running pods to clean up the volumes.
|
||||
// TODO: Evaluate the performance impact of bypassing the runtime cache.
|
||||
runningRuntimePods, err = kl.containerRuntime.GetPods(false)
|
||||
runningRuntimePods, err = kl.containerRuntime.GetPods(ctx, false)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error listing containers")
|
||||
return err
|
||||
@ -1876,8 +1876,8 @@ func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
|
||||
|
||||
// findContainer finds and returns the container with the given pod ID, full name, and container name.
|
||||
// It returns nil if not found.
|
||||
func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) {
|
||||
pods, err := kl.containerRuntime.GetPods(false)
|
||||
func (kl *Kubelet) findContainer(ctx context.Context, podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) {
|
||||
pods, err := kl.containerRuntime.GetPods(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1889,8 +1889,8 @@ func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, container
|
||||
}
|
||||
|
||||
// RunInContainer runs a command in a container, returns the combined stdout, stderr as an array of bytes
|
||||
func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) {
|
||||
container, err := kl.findContainer(podFullName, podUID, containerName)
|
||||
func (kl *Kubelet) RunInContainer(ctx context.Context, podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) {
|
||||
container, err := kl.findContainer(ctx, podFullName, podUID, containerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1898,24 +1898,24 @@ func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containe
|
||||
return nil, fmt.Errorf("container not found (%q)", containerName)
|
||||
}
|
||||
// TODO(tallclair): Pass a proper timeout value.
|
||||
return kl.runner.RunInContainer(container.ID, cmd, 0)
|
||||
return kl.runner.RunInContainer(ctx, container.ID, cmd, 0)
|
||||
}
|
||||
|
||||
// GetExec gets the URL the exec will be served from, or nil if the Kubelet will serve it.
|
||||
func (kl *Kubelet) GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) {
|
||||
container, err := kl.findContainer(podFullName, podUID, containerName)
|
||||
func (kl *Kubelet) GetExec(ctx context.Context, podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) {
|
||||
container, err := kl.findContainer(ctx, podFullName, podUID, containerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if container == nil {
|
||||
return nil, fmt.Errorf("container not found (%q)", containerName)
|
||||
}
|
||||
return kl.streamingRuntime.GetExec(container.ID, cmd, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, streamOpts.TTY)
|
||||
return kl.streamingRuntime.GetExec(ctx, container.ID, cmd, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, streamOpts.TTY)
|
||||
}
|
||||
|
||||
// GetAttach gets the URL the attach will be served from, or nil if the Kubelet will serve it.
|
||||
func (kl *Kubelet) GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) {
|
||||
container, err := kl.findContainer(podFullName, podUID, containerName)
|
||||
func (kl *Kubelet) GetAttach(ctx context.Context, podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) {
|
||||
container, err := kl.findContainer(ctx, podFullName, podUID, containerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1936,12 +1936,12 @@ func (kl *Kubelet) GetAttach(podFullName string, podUID types.UID, containerName
|
||||
}
|
||||
tty := containerSpec.TTY
|
||||
|
||||
return kl.streamingRuntime.GetAttach(container.ID, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, tty)
|
||||
return kl.streamingRuntime.GetAttach(ctx, container.ID, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, tty)
|
||||
}
|
||||
|
||||
// GetPortForward gets the URL the port-forward will be served from, or nil if the Kubelet will serve it.
|
||||
func (kl *Kubelet) GetPortForward(podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) {
|
||||
pods, err := kl.containerRuntime.GetPods(false)
|
||||
func (kl *Kubelet) GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) {
|
||||
pods, err := kl.containerRuntime.GetPods(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1954,7 +1954,7 @@ func (kl *Kubelet) GetPortForward(podName, podNamespace string, podUID types.UID
|
||||
return nil, fmt.Errorf("pod not found (%q)", podFullName)
|
||||
}
|
||||
|
||||
return kl.streamingRuntime.GetPortForward(podName, podNamespace, podUID, portForwardOpts.Ports)
|
||||
return kl.streamingRuntime.GetPortForward(ctx, podName, podNamespace, podUID, portForwardOpts.Ports)
|
||||
}
|
||||
|
||||
// cleanupOrphanedPodCgroups removes cgroups that should no longer exist.
|
||||
@ -1995,9 +1995,9 @@ func (kl *Kubelet) cleanupOrphanedPodCgroups(pcm cm.PodContainerManager, cgroupP
|
||||
// NOTE: when if a container shares any namespace with another container it must also share the user namespace
|
||||
// or it will not have the correct capabilities in the namespace. This means that host user namespace
|
||||
// is enabled per pod, not per container.
|
||||
func (kl *Kubelet) enableHostUserNamespace(pod *v1.Pod) bool {
|
||||
func (kl *Kubelet) enableHostUserNamespace(ctx context.Context, pod *v1.Pod) bool {
|
||||
if kubecontainer.HasPrivilegedContainer(pod) || hasHostNamespace(pod) ||
|
||||
hasHostVolume(pod) || hasNonNamespacedCapability(pod) || kl.hasHostMountPVC(pod) {
|
||||
hasHostVolume(pod) || hasNonNamespacedCapability(pod) || kl.hasHostMountPVC(ctx, pod) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@ -2037,7 +2037,7 @@ func hasHostNamespace(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
// hasHostMountPVC returns true if a PVC is referencing a HostPath volume.
|
||||
func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool {
|
||||
func (kl *Kubelet) hasHostMountPVC(ctx context.Context, pod *v1.Pod) bool {
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
pvcName := ""
|
||||
switch {
|
||||
@ -2048,13 +2048,13 @@ func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool {
|
||||
default:
|
||||
continue
|
||||
}
|
||||
pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
|
||||
pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(ctx, pvcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.InfoS("Unable to retrieve pvc", "pvc", klog.KRef(pod.Namespace, pvcName), "err", err)
|
||||
continue
|
||||
}
|
||||
if pvc != nil {
|
||||
referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.InfoS("Unable to retrieve pv", "pvName", pvc.Spec.VolumeName, "err", err)
|
||||
continue
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
@ -298,6 +299,7 @@ fd00::6 podFoo.domainFoo podFoo
|
||||
}
|
||||
|
||||
func TestRunInContainerNoSuchPod(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
@ -308,6 +310,7 @@ func TestRunInContainerNoSuchPod(t *testing.T) {
|
||||
podNamespace := "nsFoo"
|
||||
containerName := "containerFoo"
|
||||
output, err := kubelet.RunInContainer(
|
||||
ctx,
|
||||
kubecontainer.GetPodFullName(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: podNamespace}}),
|
||||
"",
|
||||
containerName,
|
||||
@ -317,6 +320,7 @@ func TestRunInContainerNoSuchPod(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRunInContainer(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
for _, testError := range []error{nil, errors.New("bar")} {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
@ -342,7 +346,7 @@ func TestRunInContainer(t *testing.T) {
|
||||
}},
|
||||
}
|
||||
cmd := []string{"ls"}
|
||||
actualOutput, err := kubelet.RunInContainer("podFoo_nsFoo", "", "containerFoo", cmd)
|
||||
actualOutput, err := kubelet.RunInContainer(ctx, "podFoo_nsFoo", "", "containerFoo", cmd)
|
||||
assert.Equal(t, containerID, fakeCommandRunner.ContainerID, "(testError=%v) ID", testError)
|
||||
assert.Equal(t, cmd, fakeCommandRunner.Cmd, "(testError=%v) command", testError)
|
||||
// this isn't 100% foolproof as a bug in a real CommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test
|
||||
@ -2962,6 +2966,7 @@ func TestGetExec(t *testing.T) {
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
@ -2983,7 +2988,7 @@ func TestGetExec(t *testing.T) {
|
||||
kubelet.containerRuntime = fakeRuntime
|
||||
kubelet.streamingRuntime = fakeRuntime
|
||||
|
||||
redirect, err := kubelet.GetExec(tc.podFullName, podUID, tc.container, tc.command, remotecommand.Options{})
|
||||
redirect, err := kubelet.GetExec(ctx, tc.podFullName, podUID, tc.container, tc.command, remotecommand.Options{})
|
||||
if tc.expectError {
|
||||
assert.Error(t, err, description)
|
||||
} else {
|
||||
@ -3016,6 +3021,7 @@ func TestGetPortForward(t *testing.T) {
|
||||
}}
|
||||
|
||||
for _, tc := range testcases {
|
||||
ctx := context.Background()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
@ -3037,7 +3043,7 @@ func TestGetPortForward(t *testing.T) {
|
||||
kubelet.containerRuntime = fakeRuntime
|
||||
kubelet.streamingRuntime = fakeRuntime
|
||||
|
||||
redirect, err := kubelet.GetPortForward(tc.podName, podNamespace, podUID, portforward.V4Options{})
|
||||
redirect, err := kubelet.GetPortForward(ctx, tc.podName, podNamespace, podUID, portforward.V4Options{})
|
||||
if tc.expectError {
|
||||
assert.Error(t, err, description)
|
||||
} else {
|
||||
@ -3086,6 +3092,7 @@ func TestHasHostMountPVC(t *testing.T) {
|
||||
}
|
||||
|
||||
run := func(t *testing.T, v testcase) {
|
||||
ctx := context.Background()
|
||||
testKubelet := newTestKubelet(t, false)
|
||||
defer testKubelet.Cleanup()
|
||||
pod := &v1.Pod{
|
||||
@ -3134,7 +3141,7 @@ func TestHasHostMountPVC(t *testing.T) {
|
||||
return true, volumeToReturn, v.pvError
|
||||
})
|
||||
|
||||
actual := testKubelet.kubelet.hasHostMountPVC(pod)
|
||||
actual := testKubelet.kubelet.hasHostMountPVC(ctx, pod)
|
||||
if actual != v.expected {
|
||||
t.Errorf("expected %t but got %t", v.expected, actual)
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ type fakeImageGCManager struct {
|
||||
}
|
||||
|
||||
func (f *fakeImageGCManager) GetImageList() ([]kubecontainer.Image, error) {
|
||||
return f.fakeImageService.ListImages()
|
||||
return f.fakeImageService.ListImages(context.Background())
|
||||
}
|
||||
|
||||
type TestKubelet struct {
|
||||
@ -410,6 +410,7 @@ func newTestPods(count int) []*v1.Pod {
|
||||
}
|
||||
|
||||
func TestSyncLoopAbort(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
@ -422,11 +423,11 @@ func TestSyncLoopAbort(t *testing.T) {
|
||||
close(ch)
|
||||
|
||||
// sanity check (also prevent this test from hanging in the next step)
|
||||
ok := kubelet.syncLoopIteration(ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1))
|
||||
ok := kubelet.syncLoopIteration(ctx, ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1))
|
||||
require.False(t, ok, "Expected syncLoopIteration to return !ok since update chan was closed")
|
||||
|
||||
// this should terminate immediately; if it hangs then the syncLoopIteration isn't aborting properly
|
||||
kubelet.syncLoop(ch, kubelet)
|
||||
kubelet.syncLoop(ctx, ch, kubelet)
|
||||
}
|
||||
|
||||
func TestSyncPodsStartPod(t *testing.T) {
|
||||
@ -447,6 +448,7 @@ func TestSyncPodsStartPod(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHandlePodCleanupsPerQOS(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
|
||||
@ -474,7 +476,7 @@ func TestHandlePodCleanupsPerQOS(t *testing.T) {
|
||||
// within a goroutine so a two second delay should be enough time to
|
||||
// mark the pod as killed (within this test case).
|
||||
|
||||
kubelet.HandlePodCleanups()
|
||||
kubelet.HandlePodCleanups(ctx)
|
||||
|
||||
// assert that unwanted pods were killed
|
||||
if actual, expected := kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion, []types.UID{"12345678"}; !reflect.DeepEqual(actual, expected) {
|
||||
@ -485,9 +487,9 @@ func TestHandlePodCleanupsPerQOS(t *testing.T) {
|
||||
// simulate Runtime.KillPod
|
||||
fakeRuntime.PodList = nil
|
||||
|
||||
kubelet.HandlePodCleanups()
|
||||
kubelet.HandlePodCleanups()
|
||||
kubelet.HandlePodCleanups()
|
||||
kubelet.HandlePodCleanups(ctx)
|
||||
kubelet.HandlePodCleanups(ctx)
|
||||
kubelet.HandlePodCleanups(ctx)
|
||||
|
||||
destroyCount := 0
|
||||
err := wait.Poll(100*time.Millisecond, 10*time.Second, func() (bool, error) {
|
||||
@ -644,6 +646,7 @@ func TestDispatchWorkOfActivePod(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHandlePodCleanups(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
|
||||
@ -662,7 +665,7 @@ func TestHandlePodCleanups(t *testing.T) {
|
||||
}
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
kubelet.HandlePodCleanups()
|
||||
kubelet.HandlePodCleanups(ctx)
|
||||
|
||||
// assert that unwanted pods were queued to kill
|
||||
if actual, expected := kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion, []types.UID{"12345678"}; !reflect.DeepEqual(actual, expected) {
|
||||
@ -1133,6 +1136,7 @@ func TestHandlePluginResources(t *testing.T) {
|
||||
|
||||
// TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal.
|
||||
func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
|
||||
@ -1149,7 +1153,7 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
|
||||
}
|
||||
// Sync with empty pods so that the entry in status map will be removed.
|
||||
kl.podManager.SetPods([]*v1.Pod{})
|
||||
kl.HandlePodCleanups()
|
||||
kl.HandlePodCleanups(ctx)
|
||||
if _, found := kl.statusManager.GetPodStatus(podToTest.UID); found {
|
||||
t.Fatalf("expected to not have status cached for pod2")
|
||||
}
|
||||
@ -1379,6 +1383,7 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeleteOrphanedMirrorPods(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
|
||||
@ -1428,7 +1433,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) {
|
||||
}
|
||||
|
||||
// Sync with an empty pod list to delete all mirror pods.
|
||||
kl.HandlePodCleanups()
|
||||
kl.HandlePodCleanups(ctx)
|
||||
assert.Len(t, manager.GetPods(), 0, "Expected 0 mirror pods")
|
||||
for i, pod := range orphanPods {
|
||||
name := kubecontainer.GetPodFullName(pod)
|
||||
@ -1447,6 +1452,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetContainerInfoForMirrorPods(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// pods contain one static and one mirror pod with the same name but
|
||||
// different UIDs.
|
||||
pods := []*v1.Pod{
|
||||
@ -1505,7 +1511,7 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) {
|
||||
|
||||
kubelet.podManager.SetPods(pods)
|
||||
// Use the mirror pod UID to retrieve the stats.
|
||||
stats, err := kubelet.GetContainerInfo("qux_ns", "5678", "foo", cadvisorReq)
|
||||
stats, err := kubelet.GetContainerInfo(ctx, "qux_ns", "5678", "foo", cadvisorReq)
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, stats)
|
||||
}
|
||||
@ -1666,11 +1672,13 @@ func TestCheckpointContainer(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
options := &runtimeapi.CheckpointContainerRequest{}
|
||||
if test.checkpointLocation != "" {
|
||||
options.Location = test.checkpointLocation
|
||||
}
|
||||
status := kubelet.CheckpointContainer(
|
||||
ctx,
|
||||
fakePod.Pod.ID,
|
||||
fmt.Sprintf(
|
||||
"%s_%s",
|
||||
@ -1818,6 +1826,7 @@ func podWithUIDNameNsSpec(uid types.UID, name, namespace string, spec v1.PodSpec
|
||||
}
|
||||
|
||||
func TestDeletePodDirsForDeletedPods(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kl := testKubelet.kubelet
|
||||
@ -1835,18 +1844,19 @@ func TestDeletePodDirsForDeletedPods(t *testing.T) {
|
||||
|
||||
// Pod 1 has been deleted and no longer exists.
|
||||
kl.podManager.SetPods([]*v1.Pod{pods[0]})
|
||||
kl.HandlePodCleanups()
|
||||
kl.HandlePodCleanups(ctx)
|
||||
assert.True(t, dirExists(kl.getPodDir(pods[0].UID)), "Expected directory to exist for pod 0")
|
||||
assert.False(t, dirExists(kl.getPodDir(pods[1].UID)), "Expected directory to be deleted for pod 1")
|
||||
}
|
||||
|
||||
func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*v1.Pod, podsToCheck []*v1.Pod, shouldExist bool) {
|
||||
ctx := context.Background()
|
||||
t.Helper()
|
||||
kl := testKubelet.kubelet
|
||||
|
||||
kl.podManager.SetPods(pods)
|
||||
kl.HandlePodSyncs(pods)
|
||||
kl.HandlePodCleanups()
|
||||
kl.HandlePodCleanups(ctx)
|
||||
for i, pod := range podsToCheck {
|
||||
exist := dirExists(kl.getPodDir(pod.UID))
|
||||
assert.Equal(t, shouldExist, exist, "directory of pod %d", i)
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
@ -89,6 +90,7 @@ func (f *fakePodPullingTimeRecorder) RecordImageStartedPulling(podUID types.UID)
|
||||
func (f *fakePodPullingTimeRecorder) RecordImageFinishedPulling(podUID types.UID) {}
|
||||
|
||||
func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, keyring credentialprovider.DockerKeyring) (*kubeGenericRuntimeManager, error) {
|
||||
ctx := context.Background()
|
||||
recorder := &record.FakeRecorder{}
|
||||
logManager, err := logs.NewContainerLogManager(runtimeService, osInterface, "1", 2)
|
||||
if err != nil {
|
||||
@ -113,7 +115,7 @@ func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS
|
||||
memoryThrottlingFactor: 0.8,
|
||||
}
|
||||
|
||||
typedVersion, err := runtimeService.Version(kubeRuntimeAPIVersion)
|
||||
typedVersion, err := runtimeService.Version(ctx, kubeRuntimeAPIVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@ -119,8 +120,8 @@ func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *runtimeapi.PodSand
|
||||
|
||||
// getImageUser gets uid or user name that will run the command(s) from image. The function
|
||||
// guarantees that only one of them is set.
|
||||
func (m *kubeGenericRuntimeManager) getImageUser(image string) (*int64, string, error) {
|
||||
resp, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image}, false)
|
||||
func (m *kubeGenericRuntimeManager) getImageUser(ctx context.Context, image string) (*int64, string, error) {
|
||||
resp, err := m.imageService.ImageStatus(ctx, &runtimeapi.ImageSpec{Image: image}, false)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -31,7 +32,7 @@ import (
|
||||
|
||||
type podStatusProviderFunc func(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error)
|
||||
|
||||
func (f podStatusProviderFunc) GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
|
||||
func (f podStatusProviderFunc) GetPodStatus(_ context.Context, uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
|
||||
return f(uid, name, namespace)
|
||||
}
|
||||
|
||||
@ -217,10 +218,11 @@ func TestGetImageUser(t *testing.T) {
|
||||
|
||||
i.SetFakeImages([]string{"test-image-ref1", "test-image-ref2", "test-image-ref3"})
|
||||
for j, test := range tests {
|
||||
ctx := context.Background()
|
||||
i.Images[test.originalImage.name].Username = test.originalImage.username
|
||||
i.Images[test.originalImage.name].Uid = test.originalImage.uid
|
||||
|
||||
uid, username, err := m.getImageUser(test.originalImage.name)
|
||||
uid, username, err := m.getImageUser(ctx, test.originalImage.name)
|
||||
assert.NoError(t, err, "TestCase[%d]", j)
|
||||
|
||||
if test.expectedImageUserValues.uid == (*int64)(nil) {
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
internalapi "k8s.io/cri-api/pkg/apis"
|
||||
@ -59,130 +60,130 @@ func recordError(operation string, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) {
|
||||
func (in instrumentedRuntimeService) Version(ctx context.Context, apiVersion string) (*runtimeapi.VersionResponse, error) {
|
||||
const operation = "version"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.service.Version(apiVersion)
|
||||
out, err := in.service.Version(ctx, apiVersion)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) Status(verbose bool) (*runtimeapi.StatusResponse, error) {
|
||||
func (in instrumentedRuntimeService) Status(ctx context.Context, verbose bool) (*runtimeapi.StatusResponse, error) {
|
||||
const operation = "status"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.service.Status(verbose)
|
||||
out, err := in.service.Status(ctx, verbose)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
func (in instrumentedRuntimeService) CreateContainer(ctx context.Context, podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
const operation = "create_container"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.service.CreateContainer(podSandboxID, config, sandboxConfig)
|
||||
out, err := in.service.CreateContainer(ctx, podSandboxID, config, sandboxConfig)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) StartContainer(containerID string) error {
|
||||
func (in instrumentedRuntimeService) StartContainer(ctx context.Context, containerID string) error {
|
||||
const operation = "start_container"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.service.StartContainer(containerID)
|
||||
err := in.service.StartContainer(ctx, containerID)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) StopContainer(containerID string, timeout int64) error {
|
||||
func (in instrumentedRuntimeService) StopContainer(ctx context.Context, containerID string, timeout int64) error {
|
||||
const operation = "stop_container"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.service.StopContainer(containerID, timeout)
|
||||
err := in.service.StopContainer(ctx, containerID, timeout)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) RemoveContainer(containerID string) error {
|
||||
func (in instrumentedRuntimeService) RemoveContainer(ctx context.Context, containerID string) error {
|
||||
const operation = "remove_container"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.service.RemoveContainer(containerID)
|
||||
err := in.service.RemoveContainer(ctx, containerID)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
|
||||
func (in instrumentedRuntimeService) ListContainers(ctx context.Context, filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
|
||||
const operation = "list_containers"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.service.ListContainers(filter)
|
||||
out, err := in.service.ListContainers(ctx, filter)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) ContainerStatus(containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) {
|
||||
func (in instrumentedRuntimeService) ContainerStatus(ctx context.Context, containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) {
|
||||
const operation = "container_status"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.service.ContainerStatus(containerID, verbose)
|
||||
out, err := in.service.ContainerStatus(ctx, containerID, verbose)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) UpdateContainerResources(containerID string, resources *runtimeapi.ContainerResources) error {
|
||||
func (in instrumentedRuntimeService) UpdateContainerResources(ctx context.Context, containerID string, resources *runtimeapi.ContainerResources) error {
|
||||
const operation = "update_container"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.service.UpdateContainerResources(containerID, resources)
|
||||
err := in.service.UpdateContainerResources(ctx, containerID, resources)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) ReopenContainerLog(containerID string) error {
|
||||
func (in instrumentedRuntimeService) ReopenContainerLog(ctx context.Context, containerID string) error {
|
||||
const operation = "reopen_container_log"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.service.ReopenContainerLog(containerID)
|
||||
err := in.service.ReopenContainerLog(ctx, containerID)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) ExecSync(containerID string, cmd []string, timeout time.Duration) ([]byte, []byte, error) {
|
||||
func (in instrumentedRuntimeService) ExecSync(ctx context.Context, containerID string, cmd []string, timeout time.Duration) ([]byte, []byte, error) {
|
||||
const operation = "exec_sync"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
stdout, stderr, err := in.service.ExecSync(containerID, cmd, timeout)
|
||||
stdout, stderr, err := in.service.ExecSync(ctx, containerID, cmd, timeout)
|
||||
recordError(operation, err)
|
||||
return stdout, stderr, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
|
||||
func (in instrumentedRuntimeService) Exec(ctx context.Context, req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
|
||||
const operation = "exec"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
resp, err := in.service.Exec(req)
|
||||
resp, err := in.service.Exec(ctx, req)
|
||||
recordError(operation, err)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
|
||||
func (in instrumentedRuntimeService) Attach(ctx context.Context, req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
|
||||
const operation = "attach"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
resp, err := in.service.Attach(req)
|
||||
resp, err := in.service.Attach(ctx, req)
|
||||
recordError(operation, err)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) {
|
||||
func (in instrumentedRuntimeService) RunPodSandbox(ctx context.Context, config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) {
|
||||
const operation = "run_podsandbox"
|
||||
startTime := time.Now()
|
||||
defer recordOperation(operation, startTime)
|
||||
defer metrics.RunPodSandboxDuration.WithLabelValues(runtimeHandler).Observe(metrics.SinceInSeconds(startTime))
|
||||
|
||||
out, err := in.service.RunPodSandbox(config, runtimeHandler)
|
||||
out, err := in.service.RunPodSandbox(ctx, config, runtimeHandler)
|
||||
recordError(operation, err)
|
||||
if err != nil {
|
||||
metrics.RunPodSandboxErrors.WithLabelValues(runtimeHandler).Inc()
|
||||
@ -190,146 +191,146 @@ func (in instrumentedRuntimeService) RunPodSandbox(config *runtimeapi.PodSandbox
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) StopPodSandbox(podSandboxID string) error {
|
||||
func (in instrumentedRuntimeService) StopPodSandbox(ctx context.Context, podSandboxID string) error {
|
||||
const operation = "stop_podsandbox"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.service.StopPodSandbox(podSandboxID)
|
||||
err := in.service.StopPodSandbox(ctx, podSandboxID)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) RemovePodSandbox(podSandboxID string) error {
|
||||
func (in instrumentedRuntimeService) RemovePodSandbox(ctx context.Context, podSandboxID string) error {
|
||||
const operation = "remove_podsandbox"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.service.RemovePodSandbox(podSandboxID)
|
||||
err := in.service.RemovePodSandbox(ctx, podSandboxID)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) PodSandboxStatus(podSandboxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) {
|
||||
func (in instrumentedRuntimeService) PodSandboxStatus(ctx context.Context, podSandboxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) {
|
||||
const operation = "podsandbox_status"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.service.PodSandboxStatus(podSandboxID, verbose)
|
||||
out, err := in.service.PodSandboxStatus(ctx, podSandboxID, verbose)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
|
||||
func (in instrumentedRuntimeService) ListPodSandbox(ctx context.Context, filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
|
||||
const operation = "list_podsandbox"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.service.ListPodSandbox(filter)
|
||||
out, err := in.service.ListPodSandbox(ctx, filter)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) {
|
||||
func (in instrumentedRuntimeService) ContainerStats(ctx context.Context, containerID string) (*runtimeapi.ContainerStats, error) {
|
||||
const operation = "container_stats"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.service.ContainerStats(containerID)
|
||||
out, err := in.service.ContainerStats(ctx, containerID)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) ListContainerStats(filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) {
|
||||
func (in instrumentedRuntimeService) ListContainerStats(ctx context.Context, filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) {
|
||||
const operation = "list_container_stats"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.service.ListContainerStats(filter)
|
||||
out, err := in.service.ListContainerStats(ctx, filter)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) PodSandboxStats(podSandboxID string) (*runtimeapi.PodSandboxStats, error) {
|
||||
func (in instrumentedRuntimeService) PodSandboxStats(ctx context.Context, podSandboxID string) (*runtimeapi.PodSandboxStats, error) {
|
||||
const operation = "podsandbox_stats"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.service.PodSandboxStats(podSandboxID)
|
||||
out, err := in.service.PodSandboxStats(ctx, podSandboxID)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) ListPodSandboxStats(filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) {
|
||||
func (in instrumentedRuntimeService) ListPodSandboxStats(ctx context.Context, filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) {
|
||||
const operation = "list_podsandbox_stats"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.service.ListPodSandboxStats(filter)
|
||||
out, err := in.service.ListPodSandboxStats(ctx, filter)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) PortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
|
||||
func (in instrumentedRuntimeService) PortForward(ctx context.Context, req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
|
||||
const operation = "port_forward"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
resp, err := in.service.PortForward(req)
|
||||
resp, err := in.service.PortForward(ctx, req)
|
||||
recordError(operation, err)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error {
|
||||
func (in instrumentedRuntimeService) UpdateRuntimeConfig(ctx context.Context, runtimeConfig *runtimeapi.RuntimeConfig) error {
|
||||
const operation = "update_runtime_config"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.service.UpdateRuntimeConfig(runtimeConfig)
|
||||
err := in.service.UpdateRuntimeConfig(ctx, runtimeConfig)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in instrumentedImageManagerService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
|
||||
func (in instrumentedImageManagerService) ListImages(ctx context.Context, filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
|
||||
const operation = "list_images"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.service.ListImages(filter)
|
||||
out, err := in.service.ListImages(ctx, filter)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedImageManagerService) ImageStatus(image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) {
|
||||
func (in instrumentedImageManagerService) ImageStatus(ctx context.Context, image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) {
|
||||
const operation = "image_status"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.service.ImageStatus(image, verbose)
|
||||
out, err := in.service.ImageStatus(ctx, image, verbose)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedImageManagerService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
func (in instrumentedImageManagerService) PullImage(ctx context.Context, image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
const operation = "pull_image"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
imageRef, err := in.service.PullImage(image, auth, podSandboxConfig)
|
||||
imageRef, err := in.service.PullImage(ctx, image, auth, podSandboxConfig)
|
||||
recordError(operation, err)
|
||||
return imageRef, err
|
||||
}
|
||||
|
||||
func (in instrumentedImageManagerService) RemoveImage(image *runtimeapi.ImageSpec) error {
|
||||
func (in instrumentedImageManagerService) RemoveImage(ctx context.Context, image *runtimeapi.ImageSpec) error {
|
||||
const operation = "remove_image"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.service.RemoveImage(image)
|
||||
err := in.service.RemoveImage(ctx, image)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in instrumentedImageManagerService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) {
|
||||
func (in instrumentedImageManagerService) ImageFsInfo(ctx context.Context) ([]*runtimeapi.FilesystemUsage, error) {
|
||||
const operation = "image_fs_info"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
fsInfo, err := in.service.ImageFsInfo()
|
||||
fsInfo, err := in.service.ImageFsInfo(ctx)
|
||||
recordError(operation, err)
|
||||
return fsInfo, nil
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) CheckpointContainer(options *runtimeapi.CheckpointContainerRequest) error {
|
||||
func (in instrumentedRuntimeService) CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error {
|
||||
const operation = "checkpoint_container"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.service.CheckpointContainer(options)
|
||||
err := in.service.CheckpointContainer(ctx, options)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"testing"
|
||||
@ -70,14 +71,16 @@ func TestRecordOperation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInstrumentedVersion(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, _, _ := createTestRuntimeManager()
|
||||
irs := newInstrumentedRuntimeService(fakeRuntime)
|
||||
vr, err := irs.Version("1")
|
||||
vr, err := irs.Version(ctx, "1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, kubeRuntimeAPIVersion, vr.Version)
|
||||
}
|
||||
|
||||
func TestStatus(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, _, _ := createTestRuntimeManager()
|
||||
fakeRuntime.FakeStatus = &runtimeapi.RuntimeStatus{
|
||||
Conditions: []*runtimeapi.RuntimeCondition{
|
||||
@ -86,7 +89,7 @@ func TestStatus(t *testing.T) {
|
||||
},
|
||||
}
|
||||
irs := newInstrumentedRuntimeService(fakeRuntime)
|
||||
actural, err := irs.Status(false)
|
||||
actural, err := irs.Status(ctx, false)
|
||||
assert.NoError(t, err)
|
||||
expected := &runtimeapi.RuntimeStatus{
|
||||
Conditions: []*runtimeapi.RuntimeCondition{
|
||||
|
@ -170,11 +170,11 @@ func calcRestartCountByLogDir(path string) (int, error) {
|
||||
// * create the container
|
||||
// * start the container
|
||||
// * run the post start lifecycle hooks (if applicable)
|
||||
func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, spec *startSpec, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string, podIPs []string) (string, error) {
|
||||
func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, spec *startSpec, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string, podIPs []string) (string, error) {
|
||||
container := spec.container
|
||||
|
||||
// Step 1: pull the image.
|
||||
imageRef, msg, err := m.imagePuller.EnsureImageExists(pod, container, pullSecrets, podSandboxConfig)
|
||||
imageRef, msg, err := m.imagePuller.EnsureImageExists(ctx, pod, container, pullSecrets, podSandboxConfig)
|
||||
if err != nil {
|
||||
s, _ := grpcstatus.FromError(err)
|
||||
m.recordContainerEvent(pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
|
||||
@ -212,7 +212,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
|
||||
return s.Message(), ErrCreateContainerConfig
|
||||
}
|
||||
|
||||
containerConfig, cleanupAction, err := m.generateContainerConfig(container, pod, restartCount, podIP, imageRef, podIPs, target)
|
||||
containerConfig, cleanupAction, err := m.generateContainerConfig(ctx, container, pod, restartCount, podIP, imageRef, podIPs, target)
|
||||
if cleanupAction != nil {
|
||||
defer cleanupAction()
|
||||
}
|
||||
@ -229,7 +229,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
|
||||
return s.Message(), ErrPreCreateHook
|
||||
}
|
||||
|
||||
containerID, err := m.runtimeService.CreateContainer(podSandboxID, containerConfig, podSandboxConfig)
|
||||
containerID, err := m.runtimeService.CreateContainer(ctx, podSandboxID, containerConfig, podSandboxConfig)
|
||||
if err != nil {
|
||||
s, _ := grpcstatus.FromError(err)
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
|
||||
@ -244,7 +244,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.CreatedContainer, fmt.Sprintf("Created container %s", container.Name))
|
||||
|
||||
// Step 3: start the container.
|
||||
err = m.runtimeService.StartContainer(containerID)
|
||||
err = m.runtimeService.StartContainer(ctx, containerID)
|
||||
if err != nil {
|
||||
s, _ := grpcstatus.FromError(err)
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Error: %v", s.Message())
|
||||
@ -277,13 +277,13 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
|
||||
Type: m.runtimeName,
|
||||
ID: containerID,
|
||||
}
|
||||
msg, handlerErr := m.runner.Run(kubeContainerID, pod, container, container.Lifecycle.PostStart)
|
||||
msg, handlerErr := m.runner.Run(ctx, kubeContainerID, pod, container, container.Lifecycle.PostStart)
|
||||
if handlerErr != nil {
|
||||
klog.ErrorS(handlerErr, "Failed to execute PostStartHook", "pod", klog.KObj(pod),
|
||||
"podUID", pod.UID, "containerName", container.Name, "containerID", kubeContainerID.String())
|
||||
// do not record the message in the event so that secrets won't leak from the server.
|
||||
m.recordContainerEvent(pod, container, kubeContainerID.ID, v1.EventTypeWarning, events.FailedPostStartHook, "PostStartHook failed")
|
||||
if err := m.killContainer(pod, kubeContainerID, container.Name, "FailedPostStartHook", reasonFailedPostStartHook, nil); err != nil {
|
||||
if err := m.killContainer(ctx, pod, kubeContainerID, container.Name, "FailedPostStartHook", reasonFailedPostStartHook, nil); err != nil {
|
||||
klog.ErrorS(err, "Failed to kill container", "pod", klog.KObj(pod),
|
||||
"podUID", pod.UID, "containerName", container.Name, "containerID", kubeContainerID.String())
|
||||
}
|
||||
@ -295,13 +295,13 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
|
||||
}
|
||||
|
||||
// generateContainerConfig generates container config for kubelet runtime v1.
|
||||
func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string, podIPs []string, nsTarget *kubecontainer.ContainerID) (*runtimeapi.ContainerConfig, func(), error) {
|
||||
opts, cleanupAction, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, podIPs)
|
||||
func (m *kubeGenericRuntimeManager) generateContainerConfig(ctx context.Context, container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string, podIPs []string, nsTarget *kubecontainer.ContainerID) (*runtimeapi.ContainerConfig, func(), error) {
|
||||
opts, cleanupAction, err := m.runtimeHelper.GenerateRunContainerOptions(ctx, pod, container, podIP, podIPs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
uid, username, err := m.getImageUser(container.Image)
|
||||
uid, username, err := m.getImageUser(ctx, container.Image)
|
||||
if err != nil {
|
||||
return nil, cleanupAction, err
|
||||
}
|
||||
@ -432,7 +432,7 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO
|
||||
// getKubeletContainers lists containers managed by kubelet.
|
||||
// The boolean parameter specifies whether returns all containers including
|
||||
// those already exited and dead containers (used for garbage collection).
|
||||
func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([]*runtimeapi.Container, error) {
|
||||
func (m *kubeGenericRuntimeManager) getKubeletContainers(ctx context.Context, allContainers bool) ([]*runtimeapi.Container, error) {
|
||||
filter := &runtimeapi.ContainerFilter{}
|
||||
if !allContainers {
|
||||
filter.State = &runtimeapi.ContainerStateValue{
|
||||
@ -440,7 +440,7 @@ func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([]
|
||||
}
|
||||
}
|
||||
|
||||
containers, err := m.runtimeService.ListContainers(filter)
|
||||
containers, err := m.runtimeService.ListContainers(ctx, filter)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "ListContainers failed")
|
||||
return nil, err
|
||||
@ -491,9 +491,9 @@ func (m *kubeGenericRuntimeManager) readLastStringFromContainerLogs(path string)
|
||||
}
|
||||
|
||||
// getPodContainerStatuses gets all containers' statuses for the pod.
|
||||
func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, name, namespace string) ([]*kubecontainer.Status, error) {
|
||||
func (m *kubeGenericRuntimeManager) getPodContainerStatuses(ctx context.Context, uid kubetypes.UID, name, namespace string) ([]*kubecontainer.Status, error) {
|
||||
// Select all containers of the given pod.
|
||||
containers, err := m.runtimeService.ListContainers(&runtimeapi.ContainerFilter{
|
||||
containers, err := m.runtimeService.ListContainers(ctx, &runtimeapi.ContainerFilter{
|
||||
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(uid)},
|
||||
})
|
||||
if err != nil {
|
||||
@ -504,7 +504,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
|
||||
statuses := []*kubecontainer.Status{}
|
||||
// TODO: optimization: set maximum number of containers per container name to examine.
|
||||
for _, c := range containers {
|
||||
resp, err := m.runtimeService.ContainerStatus(c.Id, false)
|
||||
resp, err := m.runtimeService.ContainerStatus(ctx, c.Id, false)
|
||||
// Between List (ListContainers) and check (ContainerStatus) another thread might remove a container, and that is normal.
|
||||
// The previous call (ListContainers) never fails due to a pod container not existing.
|
||||
// Therefore, this method should not either, but instead act as if the previous call failed,
|
||||
@ -579,7 +579,7 @@ func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName strin
|
||||
}
|
||||
|
||||
// executePreStopHook runs the pre-stop lifecycle hooks if applicable and returns the duration it takes.
|
||||
func (m *kubeGenericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID kubecontainer.ContainerID, containerSpec *v1.Container, gracePeriod int64) int64 {
|
||||
func (m *kubeGenericRuntimeManager) executePreStopHook(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, containerSpec *v1.Container, gracePeriod int64) int64 {
|
||||
klog.V(3).InfoS("Running preStop hook", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", containerSpec.Name, "containerID", containerID.String())
|
||||
|
||||
start := metav1.Now()
|
||||
@ -587,7 +587,7 @@ func (m *kubeGenericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID
|
||||
go func() {
|
||||
defer close(done)
|
||||
defer utilruntime.HandleCrash()
|
||||
if _, err := m.runner.Run(containerID, pod, containerSpec, containerSpec.Lifecycle.PreStop); err != nil {
|
||||
if _, err := m.runner.Run(ctx, containerID, pod, containerSpec, containerSpec.Lifecycle.PreStop); err != nil {
|
||||
klog.ErrorS(err, "PreStop hook failed", "pod", klog.KObj(pod), "podUID", pod.UID,
|
||||
"containerName", containerSpec.Name, "containerID", containerID.String())
|
||||
// do not record the message in the event so that secrets won't leak from the server.
|
||||
@ -615,10 +615,10 @@ func (m *kubeGenericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID
|
||||
// TODO(random-liu): Add a node e2e test to test this behaviour.
|
||||
// TODO(random-liu): Change the lifecycle handler to just accept information needed, so that we can
|
||||
// just pass the needed function not create the fake object.
|
||||
func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID kubecontainer.ContainerID) (*v1.Pod, *v1.Container, error) {
|
||||
func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(ctx context.Context, containerID kubecontainer.ContainerID) (*v1.Pod, *v1.Container, error) {
|
||||
var pod *v1.Pod
|
||||
var container *v1.Container
|
||||
resp, err := m.runtimeService.ContainerStatus(containerID.ID, false)
|
||||
resp, err := m.runtimeService.ContainerStatus(ctx, containerID.ID, false)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -658,7 +658,7 @@ func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID
|
||||
// killContainer kills a container through the following steps:
|
||||
// * Run the pre-stop lifecycle hooks (if applicable).
|
||||
// * Stop the container.
|
||||
func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubecontainer.ContainerID, containerName string, message string, reason containerKillReason, gracePeriodOverride *int64) error {
|
||||
func (m *kubeGenericRuntimeManager) killContainer(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, containerName string, message string, reason containerKillReason, gracePeriodOverride *int64) error {
|
||||
var containerSpec *v1.Container
|
||||
if pod != nil {
|
||||
if containerSpec = kubecontainer.GetContainerSpec(pod, containerName); containerSpec == nil {
|
||||
@ -667,7 +667,7 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec
|
||||
}
|
||||
} else {
|
||||
// Restore necessary information if one of the specs is nil.
|
||||
restoredPod, restoredContainer, err := m.restoreSpecsFromContainerLabels(containerID)
|
||||
restoredPod, restoredContainer, err := m.restoreSpecsFromContainerLabels(ctx, containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -689,7 +689,7 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec
|
||||
|
||||
// Run the pre-stop lifecycle hooks if applicable and if there is enough time to run it
|
||||
if containerSpec.Lifecycle != nil && containerSpec.Lifecycle.PreStop != nil && gracePeriod > 0 {
|
||||
gracePeriod = gracePeriod - m.executePreStopHook(pod, containerID, containerSpec, gracePeriod)
|
||||
gracePeriod = gracePeriod - m.executePreStopHook(ctx, pod, containerID, containerSpec, gracePeriod)
|
||||
}
|
||||
// always give containers a minimal shutdown window to avoid unnecessary SIGKILLs
|
||||
if gracePeriod < minimumGracePeriodInSeconds {
|
||||
@ -704,7 +704,7 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec
|
||||
klog.V(2).InfoS("Killing container with a grace period", "pod", klog.KObj(pod), "podUID", pod.UID,
|
||||
"containerName", containerName, "containerID", containerID.String(), "gracePeriod", gracePeriod)
|
||||
|
||||
err := m.runtimeService.StopContainer(containerID.ID, gracePeriod)
|
||||
err := m.runtimeService.StopContainer(ctx, containerID.ID, gracePeriod)
|
||||
if err != nil && !crierror.IsNotFound(err) {
|
||||
klog.ErrorS(err, "Container termination failed with gracePeriod", "pod", klog.KObj(pod), "podUID", pod.UID,
|
||||
"containerName", containerName, "containerID", containerID.String(), "gracePeriod", gracePeriod)
|
||||
@ -717,7 +717,7 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec
|
||||
}
|
||||
|
||||
// killContainersWithSyncResult kills all pod's containers with sync results.
|
||||
func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (syncResults []*kubecontainer.SyncResult) {
|
||||
func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(ctx context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (syncResults []*kubecontainer.SyncResult) {
|
||||
containerResults := make(chan *kubecontainer.SyncResult, len(runningPod.Containers))
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
@ -728,7 +728,7 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *v1.Pod, ru
|
||||
defer wg.Done()
|
||||
|
||||
killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, container.Name)
|
||||
if err := m.killContainer(pod, container.ID, container.Name, "", reasonUnknown, gracePeriodOverride); err != nil {
|
||||
if err := m.killContainer(ctx, pod, container.ID, container.Name, "", reasonUnknown, gracePeriodOverride); err != nil {
|
||||
killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
|
||||
// Use runningPod for logging as the pod passed in could be *nil*.
|
||||
klog.ErrorS(err, "Kill container failed", "pod", klog.KRef(runningPod.Namespace, runningPod.Name), "podUID", runningPod.ID,
|
||||
@ -750,7 +750,7 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *v1.Pod, ru
|
||||
// containers, we have reduced the number of outstanding init containers still
|
||||
// present. This reduces load on the container garbage collector by only
|
||||
// preserving the most recent terminated init container.
|
||||
func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *v1.Pod, podStatus *kubecontainer.PodStatus) {
|
||||
func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) {
|
||||
// only the last execution of each init container should be preserved, and only preserve it if it is in the
|
||||
// list of init containers to keep.
|
||||
initContainerNames := sets.NewString()
|
||||
@ -775,7 +775,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *v1.Pod,
|
||||
}
|
||||
// prune all other init containers that match this container name
|
||||
klog.V(4).InfoS("Removing init container", "containerName", status.Name, "containerID", status.ID.ID, "count", count)
|
||||
if err := m.removeContainer(status.ID.ID); err != nil {
|
||||
if err := m.removeContainer(ctx, status.ID.ID); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod)))
|
||||
continue
|
||||
}
|
||||
@ -786,7 +786,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *v1.Pod,
|
||||
// Remove all init containers. Note that this function does not check the state
|
||||
// of the container because it assumes all init containers have been stopped
|
||||
// before the call happens.
|
||||
func (m *kubeGenericRuntimeManager) purgeInitContainers(pod *v1.Pod, podStatus *kubecontainer.PodStatus) {
|
||||
func (m *kubeGenericRuntimeManager) purgeInitContainers(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) {
|
||||
initContainerNames := sets.NewString()
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
initContainerNames.Insert(container.Name)
|
||||
@ -800,7 +800,7 @@ func (m *kubeGenericRuntimeManager) purgeInitContainers(pod *v1.Pod, podStatus *
|
||||
count++
|
||||
// Purge all init containers that match this container name
|
||||
klog.V(4).InfoS("Removing init container", "containerName", status.Name, "containerID", status.ID.ID, "count", count)
|
||||
if err := m.removeContainer(status.ID.ID); err != nil {
|
||||
if err := m.removeContainer(ctx, status.ID.ID); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod)))
|
||||
continue
|
||||
}
|
||||
@ -867,7 +867,7 @@ func findNextInitContainerToRun(pod *v1.Pod, podStatus *kubecontainer.PodStatus)
|
||||
|
||||
// GetContainerLogs returns logs of a specific container.
|
||||
func (m *kubeGenericRuntimeManager) GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
|
||||
resp, err := m.runtimeService.ContainerStatus(containerID.ID, false)
|
||||
resp, err := m.runtimeService.ContainerStatus(ctx, containerID.ID, false)
|
||||
if err != nil {
|
||||
klog.V(4).InfoS("Failed to get container status", "containerID", containerID.String(), "err", err)
|
||||
return fmt.Errorf("unable to retrieve container logs for %v", containerID.String())
|
||||
@ -880,7 +880,7 @@ func (m *kubeGenericRuntimeManager) GetContainerLogs(ctx context.Context, pod *v
|
||||
}
|
||||
|
||||
// GetExec gets the endpoint the runtime will serve the exec request from.
|
||||
func (m *kubeGenericRuntimeManager) GetExec(id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
||||
func (m *kubeGenericRuntimeManager) GetExec(ctx context.Context, id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
||||
req := &runtimeapi.ExecRequest{
|
||||
ContainerId: id.ID,
|
||||
Cmd: cmd,
|
||||
@ -889,7 +889,7 @@ func (m *kubeGenericRuntimeManager) GetExec(id kubecontainer.ContainerID, cmd []
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
}
|
||||
resp, err := m.runtimeService.Exec(req)
|
||||
resp, err := m.runtimeService.Exec(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -898,7 +898,7 @@ func (m *kubeGenericRuntimeManager) GetExec(id kubecontainer.ContainerID, cmd []
|
||||
}
|
||||
|
||||
// GetAttach gets the endpoint the runtime will serve the attach request from.
|
||||
func (m *kubeGenericRuntimeManager) GetAttach(id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
||||
func (m *kubeGenericRuntimeManager) GetAttach(ctx context.Context, id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) {
|
||||
req := &runtimeapi.AttachRequest{
|
||||
ContainerId: id.ID,
|
||||
Stdin: stdin,
|
||||
@ -906,7 +906,7 @@ func (m *kubeGenericRuntimeManager) GetAttach(id kubecontainer.ContainerID, stdi
|
||||
Stderr: stderr,
|
||||
Tty: tty,
|
||||
}
|
||||
resp, err := m.runtimeService.Attach(req)
|
||||
resp, err := m.runtimeService.Attach(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -914,8 +914,8 @@ func (m *kubeGenericRuntimeManager) GetAttach(id kubecontainer.ContainerID, stdi
|
||||
}
|
||||
|
||||
// RunInContainer synchronously executes the command in the container, and returns the output.
|
||||
func (m *kubeGenericRuntimeManager) RunInContainer(id kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
|
||||
stdout, stderr, err := m.runtimeService.ExecSync(id.ID, cmd, timeout)
|
||||
func (m *kubeGenericRuntimeManager) RunInContainer(ctx context.Context, id kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
|
||||
stdout, stderr, err := m.runtimeService.ExecSync(ctx, id.ID, cmd, timeout)
|
||||
// NOTE(tallclair): This does not correctly interleave stdout & stderr, but should be sufficient
|
||||
// for logging purposes. A combined output option will need to be added to the ExecSyncRequest
|
||||
// if more precise output ordering is ever required.
|
||||
@ -928,7 +928,7 @@ func (m *kubeGenericRuntimeManager) RunInContainer(id kubecontainer.ContainerID,
|
||||
// that container logs to be removed with the container.
|
||||
// Notice that we assume that the container should only be removed in non-running state, and
|
||||
// it will not write container logs anymore in that state.
|
||||
func (m *kubeGenericRuntimeManager) removeContainer(containerID string) error {
|
||||
func (m *kubeGenericRuntimeManager) removeContainer(ctx context.Context, containerID string) error {
|
||||
klog.V(4).InfoS("Removing container", "containerID", containerID)
|
||||
// Call internal container post-stop lifecycle hook.
|
||||
if err := m.internalLifecycle.PostStopContainer(containerID); err != nil {
|
||||
@ -937,22 +937,22 @@ func (m *kubeGenericRuntimeManager) removeContainer(containerID string) error {
|
||||
|
||||
// Remove the container log.
|
||||
// TODO: Separate log and container lifecycle management.
|
||||
if err := m.removeContainerLog(containerID); err != nil {
|
||||
if err := m.removeContainerLog(ctx, containerID); err != nil {
|
||||
return err
|
||||
}
|
||||
// Remove the container.
|
||||
return m.runtimeService.RemoveContainer(containerID)
|
||||
return m.runtimeService.RemoveContainer(ctx, containerID)
|
||||
}
|
||||
|
||||
// removeContainerLog removes the container log.
|
||||
func (m *kubeGenericRuntimeManager) removeContainerLog(containerID string) error {
|
||||
func (m *kubeGenericRuntimeManager) removeContainerLog(ctx context.Context, containerID string) error {
|
||||
// Use log manager to remove rotated logs.
|
||||
err := m.logManager.Clean(containerID)
|
||||
err := m.logManager.Clean(ctx, containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := m.runtimeService.ContainerStatus(containerID, false)
|
||||
resp, err := m.runtimeService.ContainerStatus(ctx, containerID, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get container status %q: %v", containerID, err)
|
||||
}
|
||||
@ -973,8 +973,8 @@ func (m *kubeGenericRuntimeManager) removeContainerLog(containerID string) error
|
||||
}
|
||||
|
||||
// DeleteContainer removes a container.
|
||||
func (m *kubeGenericRuntimeManager) DeleteContainer(containerID kubecontainer.ContainerID) error {
|
||||
return m.removeContainer(containerID.ID)
|
||||
func (m *kubeGenericRuntimeManager) DeleteContainer(ctx context.Context, containerID kubecontainer.ContainerID) error {
|
||||
return m.removeContainer(ctx, containerID.ID)
|
||||
}
|
||||
|
||||
// setTerminationGracePeriod determines the grace period to use when killing a container
|
||||
|
@ -20,6 +20,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
@ -39,10 +40,11 @@ import (
|
||||
)
|
||||
|
||||
func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerIndex int, enforceMemoryQoS bool) *runtimeapi.ContainerConfig {
|
||||
ctx := context.Background()
|
||||
container := &pod.Spec.Containers[containerIndex]
|
||||
podIP := ""
|
||||
restartCount := 0
|
||||
opts, _, _ := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, []string{podIP})
|
||||
opts, _, _ := m.runtimeHelper.GenerateRunContainerOptions(ctx, pod, container, podIP, []string{podIP})
|
||||
containerLogsPath := buildContainerLogsPath(container.Name, restartCount)
|
||||
restartCountUint32 := uint32(restartCount)
|
||||
envs := make([]*runtimeapi.KeyValue, len(opts.Envs))
|
||||
@ -73,6 +75,7 @@ func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerInde
|
||||
}
|
||||
|
||||
func TestGenerateContainerConfig(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, imageService, m, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -102,7 +105,7 @@ func TestGenerateContainerConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
expectedConfig := makeExpectedConfig(m, pod, 0, false)
|
||||
containerConfig, _, err := m.generateContainerConfig(&pod.Spec.Containers[0], pod, 0, "", pod.Spec.Containers[0].Image, []string{}, nil)
|
||||
containerConfig, _, err := m.generateContainerConfig(ctx, &pod.Spec.Containers[0], pod, 0, "", pod.Spec.Containers[0].Image, []string{}, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedConfig, containerConfig, "generate container config for kubelet runtime v1.")
|
||||
assert.Equal(t, runAsUser, containerConfig.GetLinux().GetSecurityContext().GetRunAsUser().GetValue(), "RunAsUser should be set")
|
||||
@ -133,11 +136,11 @@ func TestGenerateContainerConfig(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
_, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil)
|
||||
_, _, err = m.generateContainerConfig(ctx, &podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil)
|
||||
assert.Error(t, err)
|
||||
|
||||
imageID, _ := imageService.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
resp, _ := imageService.ImageStatus(&runtimeapi.ImageSpec{Image: imageID}, false)
|
||||
imageID, _ := imageService.PullImage(ctx, &runtimeapi.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
resp, _ := imageService.ImageStatus(ctx, &runtimeapi.ImageSpec{Image: imageID}, false)
|
||||
|
||||
resp.Image.Uid = nil
|
||||
resp.Image.Username = "test"
|
||||
@ -145,7 +148,7 @@ func TestGenerateContainerConfig(t *testing.T) {
|
||||
podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsUser = nil
|
||||
podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsNonRoot = &runAsNonRootTrue
|
||||
|
||||
_, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil)
|
||||
_, _, err = m.generateContainerConfig(ctx, &podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil)
|
||||
assert.Error(t, err, "RunAsNonRoot should fail for non-numeric username")
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
@ -43,6 +44,7 @@ import (
|
||||
|
||||
// TestRemoveContainer tests removing the container and its corresponding container logs.
|
||||
func TestRemoveContainer(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
require.NoError(t, err)
|
||||
pod := &v1.Pod{
|
||||
@ -80,7 +82,7 @@ func TestRemoveContainer(t *testing.T) {
|
||||
fakeOS.Create(expectedContainerLogPath)
|
||||
fakeOS.Create(expectedContainerLogPathRotated)
|
||||
|
||||
err = m.removeContainer(containerID)
|
||||
err = m.removeContainer(ctx, containerID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify container log is removed.
|
||||
@ -90,7 +92,7 @@ func TestRemoveContainer(t *testing.T) {
|
||||
fakeOS.Removes)
|
||||
// Verify container is removed
|
||||
assert.Contains(t, fakeRuntime.Called, "RemoveContainer")
|
||||
containers, err := fakeRuntime.ListContainers(&runtimeapi.ContainerFilter{Id: containerID})
|
||||
containers, err := fakeRuntime.ListContainers(ctx, &runtimeapi.ContainerFilter{Id: containerID})
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, containers)
|
||||
}
|
||||
@ -123,7 +125,8 @@ func TestKillContainer(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
err := m.killContainer(test.pod, test.containerID, test.containerName, test.reason, "", &test.gracePeriodOverride)
|
||||
ctx := context.Background()
|
||||
err := m.killContainer(ctx, test.pod, test.containerID, test.containerName, test.reason, "", &test.gracePeriodOverride)
|
||||
if test.succeed != (err == nil) {
|
||||
t.Errorf("%s: expected %v, got %v (%v)", test.caseName, test.succeed, (err == nil), err)
|
||||
}
|
||||
@ -303,8 +306,9 @@ func TestLifeCycleHook(t *testing.T) {
|
||||
|
||||
// Configured and works as expected
|
||||
t.Run("PreStop-CMDExec", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
testPod.Spec.Containers[0].Lifecycle = cmdLifeCycle
|
||||
m.killContainer(testPod, cID, "foo", "testKill", "", &gracePeriod)
|
||||
m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
|
||||
if fakeRunner.Cmd[0] != cmdLifeCycle.PreStop.Exec.Command[0] {
|
||||
t.Errorf("CMD Prestop hook was not invoked")
|
||||
}
|
||||
@ -313,21 +317,23 @@ func TestLifeCycleHook(t *testing.T) {
|
||||
// Configured and working HTTP hook
|
||||
t.Run("PreStop-HTTPGet", func(t *testing.T) {
|
||||
t.Run("inconsistent", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
defer func() { fakeHTTP.req = nil }()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, false)()
|
||||
httpLifeCycle.PreStop.HTTPGet.Port = intstr.IntOrString{}
|
||||
testPod.Spec.Containers[0].Lifecycle = httpLifeCycle
|
||||
m.killContainer(testPod, cID, "foo", "testKill", "", &gracePeriod)
|
||||
m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
|
||||
|
||||
if fakeHTTP.req == nil || !strings.Contains(fakeHTTP.req.URL.String(), httpLifeCycle.PreStop.HTTPGet.Host) {
|
||||
t.Errorf("HTTP Prestop hook was not invoked")
|
||||
}
|
||||
})
|
||||
t.Run("consistent", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
defer func() { fakeHTTP.req = nil }()
|
||||
httpLifeCycle.PreStop.HTTPGet.Port = intstr.FromInt(80)
|
||||
testPod.Spec.Containers[0].Lifecycle = httpLifeCycle
|
||||
m.killContainer(testPod, cID, "foo", "testKill", "", &gracePeriod)
|
||||
m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
|
||||
|
||||
if fakeHTTP.req == nil || !strings.Contains(fakeHTTP.req.URL.String(), httpLifeCycle.PreStop.HTTPGet.Host) {
|
||||
t.Errorf("HTTP Prestop hook was not invoked")
|
||||
@ -337,12 +343,13 @@ func TestLifeCycleHook(t *testing.T) {
|
||||
|
||||
// When there is no time to run PreStopHook
|
||||
t.Run("PreStop-NoTimeToRun", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
gracePeriodLocal := int64(0)
|
||||
|
||||
testPod.DeletionGracePeriodSeconds = &gracePeriodLocal
|
||||
testPod.Spec.TerminationGracePeriodSeconds = &gracePeriodLocal
|
||||
|
||||
m.killContainer(testPod, cID, "foo", "testKill", "", &gracePeriodLocal)
|
||||
m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriodLocal)
|
||||
|
||||
if fakeHTTP.req != nil {
|
||||
t.Errorf("HTTP Prestop hook Should not execute when gracePeriod is 0")
|
||||
@ -351,7 +358,7 @@ func TestLifeCycleHook(t *testing.T) {
|
||||
|
||||
// Post Start script
|
||||
t.Run("PostStart-CmdExe", func(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
// Fake all the things you need before trying to create a container
|
||||
fakeSandBox, _ := makeAndSetFakePod(t, m, fakeRuntime, testPod)
|
||||
fakeSandBoxConfig, _ := m.generatePodSandboxConfig(testPod, 0)
|
||||
@ -372,7 +379,7 @@ func TestLifeCycleHook(t *testing.T) {
|
||||
}
|
||||
|
||||
// Now try to create a container, which should in turn invoke PostStart Hook
|
||||
_, err := m.startContainer(fakeSandBox.Id, fakeSandBoxConfig, containerStartSpec(testContainer), testPod, fakePodStatus, nil, "", []string{})
|
||||
_, err := m.startContainer(ctx, fakeSandBox.Id, fakeSandBoxConfig, containerStartSpec(testContainer), testPod, fakePodStatus, nil, "", []string{})
|
||||
if err != nil {
|
||||
t.Errorf("startContainer error =%v", err)
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -111,18 +112,18 @@ func (a sandboxByCreated) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a sandboxByCreated) Less(i, j int) bool { return a[i].createTime.After(a[j].createTime) }
|
||||
|
||||
// enforceMaxContainersPerEvictUnit enforces MaxPerPodContainer for each evictUnit.
|
||||
func (cgc *containerGC) enforceMaxContainersPerEvictUnit(evictUnits containersByEvictUnit, MaxContainers int) {
|
||||
func (cgc *containerGC) enforceMaxContainersPerEvictUnit(ctx context.Context, evictUnits containersByEvictUnit, MaxContainers int) {
|
||||
for key := range evictUnits {
|
||||
toRemove := len(evictUnits[key]) - MaxContainers
|
||||
|
||||
if toRemove > 0 {
|
||||
evictUnits[key] = cgc.removeOldestN(evictUnits[key], toRemove)
|
||||
evictUnits[key] = cgc.removeOldestN(ctx, evictUnits[key], toRemove)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removeOldestN removes the oldest toRemove containers and returns the resulting slice.
|
||||
func (cgc *containerGC) removeOldestN(containers []containerGCInfo, toRemove int) []containerGCInfo {
|
||||
func (cgc *containerGC) removeOldestN(ctx context.Context, containers []containerGCInfo, toRemove int) []containerGCInfo {
|
||||
// Remove from oldest to newest (last to first).
|
||||
numToKeep := len(containers) - toRemove
|
||||
if numToKeep > 0 {
|
||||
@ -137,12 +138,12 @@ func (cgc *containerGC) removeOldestN(containers []containerGCInfo, toRemove int
|
||||
ID: containers[i].id,
|
||||
}
|
||||
message := "Container is in unknown state, try killing it before removal"
|
||||
if err := cgc.manager.killContainer(nil, id, containers[i].name, message, reasonUnknown, nil); err != nil {
|
||||
if err := cgc.manager.killContainer(ctx, nil, id, containers[i].name, message, reasonUnknown, nil); err != nil {
|
||||
klog.ErrorS(err, "Failed to stop container", "containerID", containers[i].id)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err := cgc.manager.removeContainer(containers[i].id); err != nil {
|
||||
if err := cgc.manager.removeContainer(ctx, containers[i].id); err != nil {
|
||||
klog.ErrorS(err, "Failed to remove container", "containerID", containers[i].id)
|
||||
}
|
||||
}
|
||||
@ -153,7 +154,7 @@ func (cgc *containerGC) removeOldestN(containers []containerGCInfo, toRemove int
|
||||
|
||||
// removeOldestNSandboxes removes the oldest inactive toRemove sandboxes and
|
||||
// returns the resulting slice.
|
||||
func (cgc *containerGC) removeOldestNSandboxes(sandboxes []sandboxGCInfo, toRemove int) {
|
||||
func (cgc *containerGC) removeOldestNSandboxes(ctx context.Context, sandboxes []sandboxGCInfo, toRemove int) {
|
||||
numToKeep := len(sandboxes) - toRemove
|
||||
if numToKeep > 0 {
|
||||
sort.Sort(sandboxByCreated(sandboxes))
|
||||
@ -161,30 +162,30 @@ func (cgc *containerGC) removeOldestNSandboxes(sandboxes []sandboxGCInfo, toRemo
|
||||
// Remove from oldest to newest (last to first).
|
||||
for i := len(sandboxes) - 1; i >= numToKeep; i-- {
|
||||
if !sandboxes[i].active {
|
||||
cgc.removeSandbox(sandboxes[i].id)
|
||||
cgc.removeSandbox(ctx, sandboxes[i].id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removeSandbox removes the sandbox by sandboxID.
|
||||
func (cgc *containerGC) removeSandbox(sandboxID string) {
|
||||
func (cgc *containerGC) removeSandbox(ctx context.Context, sandboxID string) {
|
||||
klog.V(4).InfoS("Removing sandbox", "sandboxID", sandboxID)
|
||||
// In normal cases, kubelet should've already called StopPodSandbox before
|
||||
// GC kicks in. To guard against the rare cases where this is not true, try
|
||||
// stopping the sandbox before removing it.
|
||||
if err := cgc.client.StopPodSandbox(sandboxID); err != nil {
|
||||
if err := cgc.client.StopPodSandbox(ctx, sandboxID); err != nil {
|
||||
klog.ErrorS(err, "Failed to stop sandbox before removing", "sandboxID", sandboxID)
|
||||
return
|
||||
}
|
||||
if err := cgc.client.RemovePodSandbox(sandboxID); err != nil {
|
||||
if err := cgc.client.RemovePodSandbox(ctx, sandboxID); err != nil {
|
||||
klog.ErrorS(err, "Failed to remove sandbox", "sandboxID", sandboxID)
|
||||
}
|
||||
}
|
||||
|
||||
// evictableContainers gets all containers that are evictable. Evictable containers are: not running
|
||||
// and created more than MinAge ago.
|
||||
func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByEvictUnit, error) {
|
||||
containers, err := cgc.manager.getKubeletContainers(true)
|
||||
func (cgc *containerGC) evictableContainers(ctx context.Context, minAge time.Duration) (containersByEvictUnit, error) {
|
||||
containers, err := cgc.manager.getKubeletContainers(ctx, true)
|
||||
if err != nil {
|
||||
return containersByEvictUnit{}, err
|
||||
}
|
||||
@ -220,9 +221,9 @@ func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByE
|
||||
}
|
||||
|
||||
// evict all containers that are evictable
|
||||
func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
|
||||
func (cgc *containerGC) evictContainers(ctx context.Context, gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
|
||||
// Separate containers by evict units.
|
||||
evictUnits, err := cgc.evictableContainers(gcPolicy.MinAge)
|
||||
evictUnits, err := cgc.evictableContainers(ctx, gcPolicy.MinAge)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -231,7 +232,7 @@ func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.GCPolicy, allSour
|
||||
if allSourcesReady {
|
||||
for key, unit := range evictUnits {
|
||||
if cgc.podStateProvider.ShouldPodContentBeRemoved(key.uid) || (evictNonDeletedPods && cgc.podStateProvider.ShouldPodRuntimeBeRemoved(key.uid)) {
|
||||
cgc.removeOldestN(unit, len(unit)) // Remove all.
|
||||
cgc.removeOldestN(ctx, unit, len(unit)) // Remove all.
|
||||
delete(evictUnits, key)
|
||||
}
|
||||
}
|
||||
@ -239,7 +240,7 @@ func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.GCPolicy, allSour
|
||||
|
||||
// Enforce max containers per evict unit.
|
||||
if gcPolicy.MaxPerPodContainer >= 0 {
|
||||
cgc.enforceMaxContainersPerEvictUnit(evictUnits, gcPolicy.MaxPerPodContainer)
|
||||
cgc.enforceMaxContainersPerEvictUnit(ctx, evictUnits, gcPolicy.MaxPerPodContainer)
|
||||
}
|
||||
|
||||
// Enforce max total number of containers.
|
||||
@ -249,7 +250,7 @@ func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.GCPolicy, allSour
|
||||
if numContainersPerEvictUnit < 1 {
|
||||
numContainersPerEvictUnit = 1
|
||||
}
|
||||
cgc.enforceMaxContainersPerEvictUnit(evictUnits, numContainersPerEvictUnit)
|
||||
cgc.enforceMaxContainersPerEvictUnit(ctx, evictUnits, numContainersPerEvictUnit)
|
||||
|
||||
// If we still need to evict, evict oldest first.
|
||||
numContainers := evictUnits.NumContainers()
|
||||
@ -260,7 +261,7 @@ func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.GCPolicy, allSour
|
||||
}
|
||||
sort.Sort(byCreated(flattened))
|
||||
|
||||
cgc.removeOldestN(flattened, numContainers-gcPolicy.MaxContainers)
|
||||
cgc.removeOldestN(ctx, flattened, numContainers-gcPolicy.MaxContainers)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -272,13 +273,13 @@ func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.GCPolicy, allSour
|
||||
// 2. contains no containers.
|
||||
// 3. belong to a non-existent (i.e., already removed) pod, or is not the
|
||||
// most recently created sandbox for the pod.
|
||||
func (cgc *containerGC) evictSandboxes(evictNonDeletedPods bool) error {
|
||||
containers, err := cgc.manager.getKubeletContainers(true)
|
||||
func (cgc *containerGC) evictSandboxes(ctx context.Context, evictNonDeletedPods bool) error {
|
||||
containers, err := cgc.manager.getKubeletContainers(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sandboxes, err := cgc.manager.getKubeletSandboxes(true)
|
||||
sandboxes, err := cgc.manager.getKubeletSandboxes(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -315,10 +316,10 @@ func (cgc *containerGC) evictSandboxes(evictNonDeletedPods bool) error {
|
||||
// Remove all evictable sandboxes if the pod has been removed.
|
||||
// Note that the latest dead sandbox is also removed if there is
|
||||
// already an active one.
|
||||
cgc.removeOldestNSandboxes(sandboxes, len(sandboxes))
|
||||
cgc.removeOldestNSandboxes(ctx, sandboxes, len(sandboxes))
|
||||
} else {
|
||||
// Keep latest one if the pod still exists.
|
||||
cgc.removeOldestNSandboxes(sandboxes, len(sandboxes)-1)
|
||||
cgc.removeOldestNSandboxes(ctx, sandboxes, len(sandboxes)-1)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -326,7 +327,7 @@ func (cgc *containerGC) evictSandboxes(evictNonDeletedPods bool) error {
|
||||
|
||||
// evictPodLogsDirectories evicts all evictable pod logs directories. Pod logs directories
|
||||
// are evictable if there are no corresponding pods.
|
||||
func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error {
|
||||
func (cgc *containerGC) evictPodLogsDirectories(ctx context.Context, allSourcesReady bool) error {
|
||||
osInterface := cgc.manager.osInterface
|
||||
if allSourcesReady {
|
||||
// Only remove pod logs directories when all sources are ready.
|
||||
@ -354,7 +355,7 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error {
|
||||
for _, logSymlink := range logSymlinks {
|
||||
if _, err := osInterface.Stat(logSymlink); os.IsNotExist(err) {
|
||||
if containerID, err := getContainerIDFromLegacyLogSymlink(logSymlink); err == nil {
|
||||
resp, err := cgc.manager.runtimeService.ContainerStatus(containerID, false)
|
||||
resp, err := cgc.manager.runtimeService.ContainerStatus(ctx, containerID, false)
|
||||
if err != nil {
|
||||
// TODO: we should handle container not found (i.e. container was deleted) case differently
|
||||
// once https://github.com/kubernetes/kubernetes/issues/63336 is resolved
|
||||
@ -405,20 +406,20 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error {
|
||||
// * removes oldest dead containers by enforcing gcPolicy.MaxContainers.
|
||||
// * gets evictable sandboxes which are not ready and contains no containers.
|
||||
// * removes evictable sandboxes.
|
||||
func (cgc *containerGC) GarbageCollect(gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
|
||||
func (cgc *containerGC) GarbageCollect(ctx context.Context, gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
|
||||
errors := []error{}
|
||||
// Remove evictable containers
|
||||
if err := cgc.evictContainers(gcPolicy, allSourcesReady, evictNonDeletedPods); err != nil {
|
||||
if err := cgc.evictContainers(ctx, gcPolicy, allSourcesReady, evictNonDeletedPods); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
// Remove sandboxes with zero containers
|
||||
if err := cgc.evictSandboxes(evictNonDeletedPods); err != nil {
|
||||
if err := cgc.evictSandboxes(ctx, evictNonDeletedPods); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
// Remove pod sandbox log directory
|
||||
if err := cgc.evictPodLogsDirectories(allSourcesReady); err != nil {
|
||||
if err := cgc.evictPodLogsDirectories(ctx, allSourcesReady); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
return utilerrors.NewAggregate(errors)
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@ -160,6 +161,7 @@ func TestSandboxGC(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
podStateProvider.removed = make(map[types.UID]struct{})
|
||||
podStateProvider.terminated = make(map[types.UID]struct{})
|
||||
fakeSandboxes := makeFakePodSandboxes(t, m, test.sandboxes)
|
||||
@ -175,13 +177,13 @@ func TestSandboxGC(t *testing.T) {
|
||||
fakeRuntime.SetFakeSandboxes(fakeSandboxes)
|
||||
fakeRuntime.SetFakeContainers(fakeContainers)
|
||||
|
||||
err := m.containerGC.evictSandboxes(test.evictTerminatingPods)
|
||||
err := m.containerGC.evictSandboxes(ctx, test.evictTerminatingPods)
|
||||
assert.NoError(t, err)
|
||||
realRemain, err := fakeRuntime.ListPodSandbox(nil)
|
||||
realRemain, err := fakeRuntime.ListPodSandbox(ctx, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, realRemain, len(test.remain))
|
||||
for _, remain := range test.remain {
|
||||
resp, err := fakeRuntime.PodSandboxStatus(fakeSandboxes[remain].Id, false)
|
||||
resp, err := fakeRuntime.PodSandboxStatus(ctx, fakeSandboxes[remain].Id, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, &fakeSandboxes[remain].PodSandboxStatus, resp.Status)
|
||||
}
|
||||
@ -387,6 +389,7 @@ func TestContainerGC(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
podStateProvider.removed = make(map[types.UID]struct{})
|
||||
podStateProvider.terminated = make(map[types.UID]struct{})
|
||||
fakeContainers := makeFakeContainers(t, m, test.containers)
|
||||
@ -403,13 +406,13 @@ func TestContainerGC(t *testing.T) {
|
||||
if test.policy == nil {
|
||||
test.policy = &defaultGCPolicy
|
||||
}
|
||||
err := m.containerGC.evictContainers(*test.policy, test.allSourcesReady, test.evictTerminatingPods)
|
||||
err := m.containerGC.evictContainers(ctx, *test.policy, test.allSourcesReady, test.evictTerminatingPods)
|
||||
assert.NoError(t, err)
|
||||
realRemain, err := fakeRuntime.ListContainers(nil)
|
||||
realRemain, err := fakeRuntime.ListContainers(ctx, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, realRemain, len(test.remain))
|
||||
for _, remain := range test.remain {
|
||||
resp, err := fakeRuntime.ContainerStatus(fakeContainers[remain].Id, false)
|
||||
resp, err := fakeRuntime.ContainerStatus(ctx, fakeContainers[remain].Id, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, &fakeContainers[remain].ContainerStatus, resp.Status)
|
||||
}
|
||||
@ -419,6 +422,7 @@ func TestContainerGC(t *testing.T) {
|
||||
|
||||
// Notice that legacy container symlink is not tested since it may be deprecated soon.
|
||||
func TestPodLogDirectoryGC(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
fakeOS := m.osInterface.(*containertest.FakeOS)
|
||||
@ -449,18 +453,19 @@ func TestPodLogDirectoryGC(t *testing.T) {
|
||||
}
|
||||
|
||||
// allSourcesReady == true, pod log directories without corresponding pod should be removed.
|
||||
err = m.containerGC.evictPodLogsDirectories(true)
|
||||
err = m.containerGC.evictPodLogsDirectories(ctx, true)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, removed, fakeOS.Removes)
|
||||
|
||||
// allSourcesReady == false, pod log directories should not be removed.
|
||||
fakeOS.Removes = []string{}
|
||||
err = m.containerGC.evictPodLogsDirectories(false)
|
||||
err = m.containerGC.evictPodLogsDirectories(ctx, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, fakeOS.Removes)
|
||||
}
|
||||
|
||||
func TestUnknownStateContainerGC(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -472,13 +477,13 @@ func TestUnknownStateContainerGC(t *testing.T) {
|
||||
})
|
||||
fakeRuntime.SetFakeContainers(fakeContainers)
|
||||
|
||||
err = m.containerGC.evictContainers(defaultGCPolicy, true, false)
|
||||
err = m.containerGC.evictContainers(ctx, defaultGCPolicy, true, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, fakeRuntime.GetCalls(), "StopContainer", "RemoveContainer",
|
||||
"container in unknown state should be stopped before being removed")
|
||||
|
||||
remain, err := fakeRuntime.ListContainers(nil)
|
||||
remain, err := fakeRuntime.ListContainers(ctx, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, remain)
|
||||
}
|
||||
|
@ -17,6 +17,8 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
@ -28,7 +30,7 @@ import (
|
||||
|
||||
// PullImage pulls an image from the network to local storage using the supplied
|
||||
// secrets if necessary.
|
||||
func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
func (m *kubeGenericRuntimeManager) PullImage(ctx context.Context, image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
img := image.Image
|
||||
repoToPull, _, _, err := parsers.ParseImageName(img)
|
||||
if err != nil {
|
||||
@ -46,7 +48,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
|
||||
if !withCredentials {
|
||||
klog.V(3).InfoS("Pulling image without credentials", "image", img)
|
||||
|
||||
imageRef, err := m.imageService.PullImage(imgSpec, nil, podSandboxConfig)
|
||||
imageRef, err := m.imageService.PullImage(ctx, imgSpec, nil, podSandboxConfig)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to pull image", "image", img)
|
||||
return "", err
|
||||
@ -66,7 +68,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
|
||||
RegistryToken: currentCreds.RegistryToken,
|
||||
}
|
||||
|
||||
imageRef, err := m.imageService.PullImage(imgSpec, auth, podSandboxConfig)
|
||||
imageRef, err := m.imageService.PullImage(ctx, imgSpec, auth, podSandboxConfig)
|
||||
// If there was no error, return success
|
||||
if err == nil {
|
||||
return imageRef, nil
|
||||
@ -80,8 +82,8 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
|
||||
|
||||
// GetImageRef gets the ID of the image which has already been in
|
||||
// the local storage. It returns ("", nil) if the image isn't in the local storage.
|
||||
func (m *kubeGenericRuntimeManager) GetImageRef(image kubecontainer.ImageSpec) (string, error) {
|
||||
resp, err := m.imageService.ImageStatus(toRuntimeAPIImageSpec(image), false)
|
||||
func (m *kubeGenericRuntimeManager) GetImageRef(ctx context.Context, image kubecontainer.ImageSpec) (string, error) {
|
||||
resp, err := m.imageService.ImageStatus(ctx, toRuntimeAPIImageSpec(image), false)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get image status", "image", image.Image)
|
||||
return "", err
|
||||
@ -93,10 +95,10 @@ func (m *kubeGenericRuntimeManager) GetImageRef(image kubecontainer.ImageSpec) (
|
||||
}
|
||||
|
||||
// ListImages gets all images currently on the machine.
|
||||
func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error) {
|
||||
func (m *kubeGenericRuntimeManager) ListImages(ctx context.Context) ([]kubecontainer.Image, error) {
|
||||
var images []kubecontainer.Image
|
||||
|
||||
allImages, err := m.imageService.ListImages(nil)
|
||||
allImages, err := m.imageService.ListImages(ctx, nil)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to list images")
|
||||
return nil, err
|
||||
@ -116,8 +118,8 @@ func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error)
|
||||
}
|
||||
|
||||
// RemoveImage removes the specified image.
|
||||
func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) error {
|
||||
err := m.imageService.RemoveImage(&runtimeapi.ImageSpec{Image: image.Image})
|
||||
func (m *kubeGenericRuntimeManager) RemoveImage(ctx context.Context, image kubecontainer.ImageSpec) error {
|
||||
err := m.imageService.RemoveImage(ctx, &runtimeapi.ImageSpec{Image: image.Image})
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to remove image", "image", image.Image)
|
||||
return err
|
||||
@ -130,8 +132,8 @@ func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) e
|
||||
// Notice that current logic doesn't really work for images which share layers (e.g. docker image),
|
||||
// this is a known issue, and we'll address this by getting imagefs stats directly from CRI.
|
||||
// TODO: Get imagefs stats directly from CRI.
|
||||
func (m *kubeGenericRuntimeManager) ImageStats() (*kubecontainer.ImageStats, error) {
|
||||
allImages, err := m.imageService.ListImages(nil)
|
||||
func (m *kubeGenericRuntimeManager) ImageStats(ctx context.Context) (*kubecontainer.ImageStats, error) {
|
||||
allImages, err := m.imageService.ListImages(ctx, nil)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to list images")
|
||||
return nil, err
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
@ -32,34 +33,37 @@ import (
|
||||
)
|
||||
|
||||
func TestPullImage(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, _, fakeManager, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
imageRef, err := fakeManager.PullImage(kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
imageRef, err := fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "busybox", imageRef)
|
||||
|
||||
images, err := fakeManager.ListImages()
|
||||
images, err := fakeManager.ListImages(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(images))
|
||||
assert.Equal(t, images[0].RepoTags, []string{"busybox"})
|
||||
}
|
||||
|
||||
func TestPullImageWithError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
fakeImageService.InjectError("PullImage", fmt.Errorf("test-error"))
|
||||
imageRef, err := fakeManager.PullImage(kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
imageRef, err := fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "", imageRef)
|
||||
|
||||
images, err := fakeManager.ListImages()
|
||||
images, err := fakeManager.ListImages(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(images))
|
||||
}
|
||||
|
||||
func TestListImages(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -67,7 +71,7 @@ func TestListImages(t *testing.T) {
|
||||
expected := sets.NewString(images...)
|
||||
fakeImageService.SetFakeImages(images)
|
||||
|
||||
actualImages, err := fakeManager.ListImages()
|
||||
actualImages, err := fakeManager.ListImages(ctx)
|
||||
assert.NoError(t, err)
|
||||
actual := sets.NewString()
|
||||
for _, i := range actualImages {
|
||||
@ -78,34 +82,37 @@ func TestListImages(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestListImagesWithError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
fakeImageService.InjectError("ListImages", fmt.Errorf("test-failure"))
|
||||
|
||||
actualImages, err := fakeManager.ListImages()
|
||||
actualImages, err := fakeManager.ListImages(ctx)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, actualImages)
|
||||
}
|
||||
|
||||
func TestGetImageRef(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
image := "busybox"
|
||||
fakeImageService.SetFakeImages([]string{image})
|
||||
imageRef, err := fakeManager.GetImageRef(kubecontainer.ImageSpec{Image: image})
|
||||
imageRef, err := fakeManager.GetImageRef(ctx, kubecontainer.ImageSpec{Image: image})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, image, imageRef)
|
||||
}
|
||||
|
||||
func TestGetImageRefImageNotAvailableLocally(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, _, fakeManager, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
image := "busybox"
|
||||
|
||||
imageRef, err := fakeManager.GetImageRef(kubecontainer.ImageSpec{Image: image})
|
||||
imageRef, err := fakeManager.GetImageRef(ctx, kubecontainer.ImageSpec{Image: image})
|
||||
assert.NoError(t, err)
|
||||
|
||||
imageNotAvailableLocallyRef := ""
|
||||
@ -113,6 +120,7 @@ func TestGetImageRefImageNotAvailableLocally(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetImageRefWithError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -120,48 +128,52 @@ func TestGetImageRefWithError(t *testing.T) {
|
||||
|
||||
fakeImageService.InjectError("ImageStatus", fmt.Errorf("test-error"))
|
||||
|
||||
imageRef, err := fakeManager.GetImageRef(kubecontainer.ImageSpec{Image: image})
|
||||
imageRef, err := fakeManager.GetImageRef(ctx, kubecontainer.ImageSpec{Image: image})
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "", imageRef)
|
||||
}
|
||||
|
||||
func TestRemoveImage(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = fakeManager.PullImage(kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
_, err = fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(fakeImageService.Images))
|
||||
|
||||
err = fakeManager.RemoveImage(kubecontainer.ImageSpec{Image: "busybox"})
|
||||
err = fakeManager.RemoveImage(ctx, kubecontainer.ImageSpec{Image: "busybox"})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(fakeImageService.Images))
|
||||
}
|
||||
|
||||
func TestRemoveImageNoOpIfImageNotLocal(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, _, fakeManager, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fakeManager.RemoveImage(kubecontainer.ImageSpec{Image: "busybox"})
|
||||
err = fakeManager.RemoveImage(ctx, kubecontainer.ImageSpec{Image: "busybox"})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestRemoveImageWithError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = fakeManager.PullImage(kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
_, err = fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(fakeImageService.Images))
|
||||
|
||||
fakeImageService.InjectError("RemoveImage", fmt.Errorf("test-failure"))
|
||||
|
||||
err = fakeManager.RemoveImage(kubecontainer.ImageSpec{Image: "busybox"})
|
||||
err = fakeManager.RemoveImage(ctx, kubecontainer.ImageSpec{Image: "busybox"})
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, 1, len(fakeImageService.Images))
|
||||
}
|
||||
|
||||
func TestImageStats(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -170,24 +182,26 @@ func TestImageStats(t *testing.T) {
|
||||
images := []string{"1111", "2222", "3333"}
|
||||
fakeImageService.SetFakeImages(images)
|
||||
|
||||
actualStats, err := fakeManager.ImageStats()
|
||||
actualStats, err := fakeManager.ImageStats(ctx)
|
||||
assert.NoError(t, err)
|
||||
expectedStats := &kubecontainer.ImageStats{TotalStorageBytes: imageSize * uint64(len(images))}
|
||||
assert.Equal(t, expectedStats, actualStats)
|
||||
}
|
||||
|
||||
func TestImageStatsWithError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
fakeImageService.InjectError("ListImages", fmt.Errorf("test-failure"))
|
||||
|
||||
actualImageStats, err := fakeManager.ImageStats()
|
||||
actualImageStats, err := fakeManager.ImageStats(ctx)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, actualImageStats)
|
||||
}
|
||||
|
||||
func TestPullWithSecrets(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// auth value is equivalent to: "username":"passed-user","password":"passed-password"
|
||||
dockerCfg := map[string]map[string]string{"index.docker.io/v1/": {"email": "passed-email", "auth": "cGFzc2VkLXVzZXI6cGFzc2VkLXBhc3N3b3Jk"}}
|
||||
dockercfgContent, err := json.Marshal(dockerCfg)
|
||||
@ -252,13 +266,14 @@ func TestPullWithSecrets(t *testing.T) {
|
||||
_, fakeImageService, fakeManager, err := customTestRuntimeManager(builtInKeyRing)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = fakeManager.PullImage(kubecontainer.ImageSpec{Image: test.imageName}, test.passedSecrets, nil)
|
||||
_, err = fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: test.imageName}, test.passedSecrets, nil)
|
||||
require.NoError(t, err)
|
||||
fakeImageService.AssertImagePulledWithAuth(t, &runtimeapi.ImageSpec{Image: test.imageName, Annotations: make(map[string]string)}, test.expectedAuth, description)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPullThenListWithAnnotations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, _, fakeManager, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -269,10 +284,10 @@ func TestPullThenListWithAnnotations(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
_, err = fakeManager.PullImage(imageSpec, nil, nil)
|
||||
_, err = fakeManager.PullImage(ctx, imageSpec, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
images, err := fakeManager.ListImages()
|
||||
images, err := fakeManager.ListImages(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(images))
|
||||
assert.Equal(t, images[0].Spec, imageSpec)
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
@ -196,6 +197,7 @@ func NewKubeGenericRuntimeManager(
|
||||
memoryThrottlingFactor float64,
|
||||
podPullingTimeRecorder images.ImagePodPullingTimeRecorder,
|
||||
) (KubeGenericRuntime, error) {
|
||||
ctx := context.Background()
|
||||
runtimeService = newInstrumentedRuntimeService(runtimeService)
|
||||
imageService = newInstrumentedImageManagerService(imageService)
|
||||
kubeRuntimeManager := &kubeGenericRuntimeManager{
|
||||
@ -221,7 +223,7 @@ func NewKubeGenericRuntimeManager(
|
||||
memoryThrottlingFactor: memoryThrottlingFactor,
|
||||
}
|
||||
|
||||
typedVersion, err := kubeRuntimeManager.getTypedVersion()
|
||||
typedVersion, err := kubeRuntimeManager.getTypedVersion(ctx)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Get runtime version failed")
|
||||
return nil, err
|
||||
@ -273,7 +275,7 @@ func NewKubeGenericRuntimeManager(
|
||||
|
||||
kubeRuntimeManager.versionCache = cache.NewObjectCache(
|
||||
func() (interface{}, error) {
|
||||
return kubeRuntimeManager.getTypedVersion()
|
||||
return kubeRuntimeManager.getTypedVersion(ctx)
|
||||
},
|
||||
versionCacheTTL,
|
||||
)
|
||||
@ -293,8 +295,8 @@ func newRuntimeVersion(version string) (*utilversion.Version, error) {
|
||||
return utilversion.ParseGeneric(version)
|
||||
}
|
||||
|
||||
func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeapi.VersionResponse, error) {
|
||||
typedVersion, err := m.runtimeService.Version(kubeRuntimeAPIVersion)
|
||||
func (m *kubeGenericRuntimeManager) getTypedVersion(ctx context.Context) (*runtimeapi.VersionResponse, error) {
|
||||
typedVersion, err := m.runtimeService.Version(ctx, kubeRuntimeAPIVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get remote runtime typed version failed: %v", err)
|
||||
}
|
||||
@ -302,8 +304,8 @@ func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeapi.VersionRespon
|
||||
}
|
||||
|
||||
// Version returns the version information of the container runtime.
|
||||
func (m *kubeGenericRuntimeManager) Version() (kubecontainer.Version, error) {
|
||||
typedVersion, err := m.getTypedVersion()
|
||||
func (m *kubeGenericRuntimeManager) Version(ctx context.Context) (kubecontainer.Version, error) {
|
||||
typedVersion, err := m.getTypedVersion(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -326,8 +328,8 @@ func (m *kubeGenericRuntimeManager) APIVersion() (kubecontainer.Version, error)
|
||||
|
||||
// Status returns the status of the runtime. An error is returned if the Status
|
||||
// function itself fails, nil otherwise.
|
||||
func (m *kubeGenericRuntimeManager) Status() (*kubecontainer.RuntimeStatus, error) {
|
||||
resp, err := m.runtimeService.Status(false)
|
||||
func (m *kubeGenericRuntimeManager) Status(ctx context.Context) (*kubecontainer.RuntimeStatus, error) {
|
||||
resp, err := m.runtimeService.Status(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -340,9 +342,9 @@ func (m *kubeGenericRuntimeManager) Status() (*kubecontainer.RuntimeStatus, erro
|
||||
// GetPods returns a list of containers grouped by pods. The boolean parameter
|
||||
// specifies whether the runtime returns all containers including those already
|
||||
// exited and dead containers (used for garbage collection).
|
||||
func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, error) {
|
||||
func (m *kubeGenericRuntimeManager) GetPods(ctx context.Context, all bool) ([]*kubecontainer.Pod, error) {
|
||||
pods := make(map[kubetypes.UID]*kubecontainer.Pod)
|
||||
sandboxes, err := m.getKubeletSandboxes(all)
|
||||
sandboxes, err := m.getKubeletSandboxes(ctx, all)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -370,7 +372,7 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err
|
||||
p.CreatedAt = uint64(s.GetCreatedAt())
|
||||
}
|
||||
|
||||
containers, err := m.getKubeletContainers(all)
|
||||
containers, err := m.getKubeletContainers(ctx, all)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -671,7 +673,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku
|
||||
// 5. Create ephemeral containers.
|
||||
// 6. Create init containers.
|
||||
// 7. Create normal containers.
|
||||
func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
|
||||
func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
|
||||
// Step 1: Compute sandbox and container changes.
|
||||
podContainerChanges := m.computePodActions(pod, podStatus)
|
||||
klog.V(3).InfoS("computePodActions got for pod", "podActions", podContainerChanges, "pod", klog.KObj(pod))
|
||||
@ -695,7 +697,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
|
||||
klog.V(4).InfoS("Stopping PodSandbox for pod, because all other containers are dead", "pod", klog.KObj(pod))
|
||||
}
|
||||
|
||||
killResult := m.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil)
|
||||
killResult := m.killPodWithSyncResult(ctx, pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil)
|
||||
result.AddPodSyncResult(killResult)
|
||||
if killResult.Error() != nil {
|
||||
klog.ErrorS(killResult.Error(), "killPodWithSyncResult failed")
|
||||
@ -703,7 +705,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
|
||||
}
|
||||
|
||||
if podContainerChanges.CreateSandbox {
|
||||
m.purgeInitContainers(pod, podStatus)
|
||||
m.purgeInitContainers(ctx, pod, podStatus)
|
||||
}
|
||||
} else {
|
||||
// Step 3: kill any running containers in this pod which are not to keep.
|
||||
@ -711,7 +713,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
|
||||
klog.V(3).InfoS("Killing unwanted container for pod", "containerName", containerInfo.name, "containerID", containerID, "pod", klog.KObj(pod))
|
||||
killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, containerInfo.name)
|
||||
result.AddSyncResult(killContainerResult)
|
||||
if err := m.killContainer(pod, containerID, containerInfo.name, containerInfo.message, containerInfo.reason, nil); err != nil {
|
||||
if err := m.killContainer(ctx, pod, containerID, containerInfo.name, containerInfo.message, containerInfo.reason, nil); err != nil {
|
||||
killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
|
||||
klog.ErrorS(err, "killContainer for pod failed", "containerName", containerInfo.name, "containerID", containerID, "pod", klog.KObj(pod))
|
||||
return
|
||||
@ -722,7 +724,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
|
||||
// Keep terminated init containers fairly aggressively controlled
|
||||
// This is an optimization because container removals are typically handled
|
||||
// by container garbage collector.
|
||||
m.pruneInitContainersBeforeStart(pod, podStatus)
|
||||
m.pruneInitContainersBeforeStart(ctx, pod, podStatus)
|
||||
|
||||
// We pass the value of the PRIMARY podIP and list of podIPs down to
|
||||
// generatePodSandboxConfig and generateContainerConfig, which in turn
|
||||
@ -760,7 +762,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
|
||||
// When runc supports slash as sysctl separator, this function can no longer be used.
|
||||
sysctl.ConvertPodSysctlsVariableToDotsSeparator(pod.Spec.SecurityContext)
|
||||
|
||||
podSandboxID, msg, err = m.createPodSandbox(pod, podContainerChanges.Attempt)
|
||||
podSandboxID, msg, err = m.createPodSandbox(ctx, pod, podContainerChanges.Attempt)
|
||||
if err != nil {
|
||||
// createPodSandbox can return an error from CNI, CSI,
|
||||
// or CRI if the Pod has been deleted while the POD is
|
||||
@ -785,7 +787,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
|
||||
}
|
||||
klog.V(4).InfoS("Created PodSandbox for pod", "podSandboxID", podSandboxID, "pod", klog.KObj(pod))
|
||||
|
||||
resp, err := m.runtimeService.PodSandboxStatus(podSandboxID, false)
|
||||
resp, err := m.runtimeService.PodSandboxStatus(ctx, podSandboxID, false)
|
||||
if err != nil {
|
||||
ref, referr := ref.GetReference(legacyscheme.Scheme, pod)
|
||||
if referr != nil {
|
||||
@ -834,7 +836,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
|
||||
// currently: "container", "init container" or "ephemeral container"
|
||||
// metricLabel is the label used to describe this type of container in monitoring metrics.
|
||||
// currently: "container", "init_container" or "ephemeral_container"
|
||||
start := func(typeName, metricLabel string, spec *startSpec) error {
|
||||
start := func(ctx context.Context, typeName, metricLabel string, spec *startSpec) error {
|
||||
startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, spec.container.Name)
|
||||
result.AddSyncResult(startContainerResult)
|
||||
|
||||
@ -851,7 +853,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
|
||||
}
|
||||
klog.V(4).InfoS("Creating container in pod", "containerType", typeName, "container", spec.container, "pod", klog.KObj(pod))
|
||||
// NOTE (aramase) podIPs are populated for single stack and dual stack clusters. Send only podIPs.
|
||||
if msg, err := m.startContainer(podSandboxID, podSandboxConfig, spec, pod, podStatus, pullSecrets, podIP, podIPs); err != nil {
|
||||
if msg, err := m.startContainer(ctx, podSandboxID, podSandboxConfig, spec, pod, podStatus, pullSecrets, podIP, podIPs); err != nil {
|
||||
// startContainer() returns well-defined error codes that have reasonable cardinality for metrics and are
|
||||
// useful to cluster administrators to distinguish "server errors" from "user errors".
|
||||
metrics.StartedContainersErrorsTotal.WithLabelValues(metricLabel, err.Error()).Inc()
|
||||
@ -878,13 +880,13 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
|
||||
// are errors starting an init container. In practice init containers will start first since ephemeral
|
||||
// containers cannot be specified on pod creation.
|
||||
for _, idx := range podContainerChanges.EphemeralContainersToStart {
|
||||
start("ephemeral container", metrics.EphemeralContainer, ephemeralContainerStartSpec(&pod.Spec.EphemeralContainers[idx]))
|
||||
start(ctx, "ephemeral container", metrics.EphemeralContainer, ephemeralContainerStartSpec(&pod.Spec.EphemeralContainers[idx]))
|
||||
}
|
||||
|
||||
// Step 6: start the init container.
|
||||
if container := podContainerChanges.NextInitContainerToStart; container != nil {
|
||||
// Start the next init container.
|
||||
if err := start("init container", metrics.InitContainer, containerStartSpec(container)); err != nil {
|
||||
if err := start(ctx, "init container", metrics.InitContainer, containerStartSpec(container)); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@ -894,7 +896,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
|
||||
|
||||
// Step 7: start containers in podContainerChanges.ContainersToStart.
|
||||
for _, idx := range podContainerChanges.ContainersToStart {
|
||||
start("container", metrics.Container, containerStartSpec(&pod.Spec.Containers[idx]))
|
||||
start(ctx, "container", metrics.Container, containerStartSpec(&pod.Spec.Containers[idx]))
|
||||
}
|
||||
|
||||
return
|
||||
@ -938,15 +940,15 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Contain
|
||||
// gracePeriodOverride if specified allows the caller to override the pod default grace period.
|
||||
// only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data.
|
||||
// it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios.
|
||||
func (m *kubeGenericRuntimeManager) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
|
||||
err := m.killPodWithSyncResult(pod, runningPod, gracePeriodOverride)
|
||||
func (m *kubeGenericRuntimeManager) KillPod(ctx context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
|
||||
err := m.killPodWithSyncResult(ctx, pod, runningPod, gracePeriodOverride)
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
// killPodWithSyncResult kills a runningPod and returns SyncResult.
|
||||
// Note: The pod passed in could be *nil* when kubelet restarted.
|
||||
func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) {
|
||||
killContainerResults := m.killContainersWithSyncResult(pod, runningPod, gracePeriodOverride)
|
||||
func (m *kubeGenericRuntimeManager) killPodWithSyncResult(ctx context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) {
|
||||
killContainerResults := m.killContainersWithSyncResult(ctx, pod, runningPod, gracePeriodOverride)
|
||||
for _, containerResult := range killContainerResults {
|
||||
result.AddSyncResult(containerResult)
|
||||
}
|
||||
@ -956,7 +958,7 @@ func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPo
|
||||
result.AddSyncResult(killSandboxResult)
|
||||
// Stop all sandboxes belongs to same pod
|
||||
for _, podSandbox := range runningPod.Sandboxes {
|
||||
if err := m.runtimeService.StopPodSandbox(podSandbox.ID.ID); err != nil && !crierror.IsNotFound(err) {
|
||||
if err := m.runtimeService.StopPodSandbox(ctx, podSandbox.ID.ID); err != nil && !crierror.IsNotFound(err) {
|
||||
killSandboxResult.Fail(kubecontainer.ErrKillPodSandbox, err.Error())
|
||||
klog.ErrorS(nil, "Failed to stop sandbox", "podSandboxID", podSandbox.ID)
|
||||
}
|
||||
@ -967,7 +969,7 @@ func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPo
|
||||
|
||||
// GetPodStatus retrieves the status of the pod, including the
|
||||
// information of all containers in the pod that are visible in Runtime.
|
||||
func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
|
||||
func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
|
||||
// Now we retain restart count of container as a container label. Each time a container
|
||||
// restarts, pod will read the restart count from the registered dead container, increment
|
||||
// it to get the new restart count, and then add a label with the new restart count on
|
||||
@ -981,7 +983,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
|
||||
// Anyhow, we only promised "best-effort" restart count reporting, we can just ignore
|
||||
// these limitations now.
|
||||
// TODO: move this comment to SyncPod.
|
||||
podSandboxIDs, err := m.getSandboxIDByPodUID(uid, nil)
|
||||
podSandboxIDs, err := m.getSandboxIDByPodUID(ctx, uid, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1001,7 +1003,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
|
||||
sandboxStatuses := []*runtimeapi.PodSandboxStatus{}
|
||||
podIPs := []string{}
|
||||
for idx, podSandboxID := range podSandboxIDs {
|
||||
resp, err := m.runtimeService.PodSandboxStatus(podSandboxID, false)
|
||||
resp, err := m.runtimeService.PodSandboxStatus(ctx, podSandboxID, false)
|
||||
// Between List (getSandboxIDByPodUID) and check (PodSandboxStatus) another thread might remove a container, and that is normal.
|
||||
// The previous call (getSandboxIDByPodUID) never fails due to a pod sandbox not existing.
|
||||
// Therefore, this method should not either, but instead act as if the previous call failed,
|
||||
@ -1025,7 +1027,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
|
||||
}
|
||||
|
||||
// Get statuses of all containers visible in the pod.
|
||||
containerStatuses, err := m.getPodContainerStatuses(uid, name, namespace)
|
||||
containerStatuses, err := m.getPodContainerStatuses(ctx, uid, name, namespace)
|
||||
if err != nil {
|
||||
if m.logReduction.ShouldMessageBePrinted(err.Error(), podFullName) {
|
||||
klog.ErrorS(err, "getPodContainerStatuses for pod failed", "pod", klog.KObj(pod))
|
||||
@ -1045,17 +1047,17 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
|
||||
}
|
||||
|
||||
// GarbageCollect removes dead containers using the specified container gc policy.
|
||||
func (m *kubeGenericRuntimeManager) GarbageCollect(gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
|
||||
return m.containerGC.GarbageCollect(gcPolicy, allSourcesReady, evictNonDeletedPods)
|
||||
func (m *kubeGenericRuntimeManager) GarbageCollect(ctx context.Context, gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
|
||||
return m.containerGC.GarbageCollect(ctx, gcPolicy, allSourcesReady, evictNonDeletedPods)
|
||||
}
|
||||
|
||||
// UpdatePodCIDR is just a passthrough method to update the runtimeConfig of the shim
|
||||
// with the podCIDR supplied by the kubelet.
|
||||
func (m *kubeGenericRuntimeManager) UpdatePodCIDR(podCIDR string) error {
|
||||
func (m *kubeGenericRuntimeManager) UpdatePodCIDR(ctx context.Context, podCIDR string) error {
|
||||
// TODO(#35531): do we really want to write a method on this manager for each
|
||||
// field of the config?
|
||||
klog.InfoS("Updating runtime config through cri with podcidr", "CIDR", podCIDR)
|
||||
return m.runtimeService.UpdateRuntimeConfig(
|
||||
return m.runtimeService.UpdateRuntimeConfig(ctx,
|
||||
&runtimeapi.RuntimeConfig{
|
||||
NetworkConfig: &runtimeapi.NetworkConfig{
|
||||
PodCidr: podCIDR,
|
||||
@ -1063,6 +1065,6 @@ func (m *kubeGenericRuntimeManager) UpdatePodCIDR(podCIDR string) error {
|
||||
})
|
||||
}
|
||||
|
||||
func (m *kubeGenericRuntimeManager) CheckpointContainer(options *runtimeapi.CheckpointContainerRequest) error {
|
||||
return m.runtimeService.CheckpointContainer(options)
|
||||
func (m *kubeGenericRuntimeManager) CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error {
|
||||
return m.runtimeService.CheckpointContainer(ctx, options)
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@ -161,10 +162,11 @@ func makeFakePodSandboxes(t *testing.T, m *kubeGenericRuntimeManager, templates
|
||||
|
||||
// makeFakeContainer creates a fake container based on a container template.
|
||||
func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template containerTemplate) *apitest.FakeContainer {
|
||||
ctx := context.Background()
|
||||
sandboxConfig, err := m.generatePodSandboxConfig(template.pod, template.sandboxAttempt)
|
||||
assert.NoError(t, err, "generatePodSandboxConfig for container template %+v", template)
|
||||
|
||||
containerConfig, _, err := m.generateContainerConfig(template.container, template.pod, template.attempt, "", template.container.Image, []string{}, nil)
|
||||
containerConfig, _, err := m.generateContainerConfig(ctx, template.container, template.pod, template.attempt, "", template.container.Image, []string{}, nil)
|
||||
assert.NoError(t, err, "generateContainerConfig for container template %+v", template)
|
||||
|
||||
podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata)
|
||||
@ -281,10 +283,11 @@ func TestNewKubeRuntimeManager(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVersion(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
version, err := m.Version()
|
||||
version, err := m.Version(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, kubeRuntimeAPIVersion, version.String())
|
||||
}
|
||||
@ -298,6 +301,7 @@ func TestContainerRuntimeType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetPodStatus(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -327,7 +331,7 @@ func TestGetPodStatus(t *testing.T) {
|
||||
// Set fake sandbox and faked containers to fakeRuntime.
|
||||
makeAndSetFakePod(t, m, fakeRuntime, pod)
|
||||
|
||||
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, pod.UID, podStatus.ID)
|
||||
assert.Equal(t, pod.Name, podStatus.Name)
|
||||
@ -336,6 +340,7 @@ func TestGetPodStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStopContainerWithNotFoundError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -365,15 +370,16 @@ func TestStopContainerWithNotFoundError(t *testing.T) {
|
||||
// Set fake sandbox and faked containers to fakeRuntime.
|
||||
makeAndSetFakePod(t, m, fakeRuntime, pod)
|
||||
fakeRuntime.InjectError("StopContainer", status.Error(codes.NotFound, "No such container"))
|
||||
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
require.NoError(t, err)
|
||||
p := kubecontainer.ConvertPodStatusToRunningPod("", podStatus)
|
||||
gracePeriod := int64(1)
|
||||
err = m.KillPod(pod, p, &gracePeriod)
|
||||
err = m.KillPod(ctx, pod, p, &gracePeriod)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetPodStatusWithNotFoundError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -403,7 +409,7 @@ func TestGetPodStatusWithNotFoundError(t *testing.T) {
|
||||
// Set fake sandbox and faked containers to fakeRuntime.
|
||||
makeAndSetFakePod(t, m, fakeRuntime, pod)
|
||||
fakeRuntime.InjectError("ContainerStatus", status.Error(codes.NotFound, "No such container"))
|
||||
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, pod.UID, podStatus.ID)
|
||||
require.Equal(t, pod.Name, podStatus.Name)
|
||||
@ -412,6 +418,7 @@ func TestGetPodStatusWithNotFoundError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetPods(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -480,7 +487,7 @@ func TestGetPods(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
actual, err := m.GetPods(false)
|
||||
actual, err := m.GetPods(ctx, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
if !verifyPods(expected, actual) {
|
||||
@ -489,6 +496,7 @@ func TestGetPods(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetPodsSorted(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -506,7 +514,7 @@ func TestGetPodsSorted(t *testing.T) {
|
||||
}
|
||||
fakeRuntime.SetFakeSandboxes(fakeSandboxes)
|
||||
|
||||
actual, err := m.GetPods(false)
|
||||
actual, err := m.GetPods(ctx, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, actual, 3)
|
||||
@ -518,6 +526,7 @@ func TestGetPodsSorted(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKillPod(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -584,7 +593,7 @@ func TestKillPod(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
err = m.KillPod(pod, runningPod, nil)
|
||||
err = m.KillPod(ctx, pod, runningPod, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(fakeRuntime.Containers))
|
||||
assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
|
||||
@ -624,7 +633,7 @@ func TestSyncPod(t *testing.T) {
|
||||
}
|
||||
|
||||
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
result := m.SyncPod(pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
|
||||
result := m.SyncPod(context.Background(), pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
|
||||
assert.NoError(t, result.Error())
|
||||
assert.Equal(t, 2, len(fakeRuntime.Containers))
|
||||
assert.Equal(t, 2, len(fakeImage.Images))
|
||||
@ -684,7 +693,7 @@ func TestSyncPodWithConvertedPodSysctls(t *testing.T) {
|
||||
}
|
||||
|
||||
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
result := m.SyncPod(pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
|
||||
result := m.SyncPod(context.Background(), pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
|
||||
assert.NoError(t, result.Error())
|
||||
assert.Equal(t, exceptSysctls, pod.Spec.SecurityContext.Sysctls)
|
||||
for _, sandbox := range fakeRuntime.Sandboxes {
|
||||
@ -696,6 +705,7 @@ func TestSyncPodWithConvertedPodSysctls(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPruneInitContainers(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -722,10 +732,10 @@ func TestPruneInitContainers(t *testing.T) {
|
||||
}
|
||||
fakes := makeFakeContainers(t, m, templates)
|
||||
fakeRuntime.SetFakeContainers(fakes)
|
||||
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
assert.NoError(t, err)
|
||||
|
||||
m.pruneInitContainersBeforeStart(pod, podStatus)
|
||||
m.pruneInitContainersBeforeStart(ctx, pod, podStatus)
|
||||
expectedContainers := sets.NewString(fakes[0].Id, fakes[2].Id)
|
||||
if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok {
|
||||
t.Errorf("expected %v, got %v", expectedContainers, actual)
|
||||
@ -733,6 +743,7 @@ func TestPruneInitContainers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncPodWithInitContainers(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -770,9 +781,9 @@ func TestSyncPodWithInitContainers(t *testing.T) {
|
||||
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
|
||||
// 1. should only create the init container.
|
||||
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
assert.NoError(t, err)
|
||||
result := m.SyncPod(pod, podStatus, []v1.Secret{}, backOff)
|
||||
result := m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff)
|
||||
assert.NoError(t, result.Error())
|
||||
expected := []*cRecord{
|
||||
{name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
|
||||
@ -780,24 +791,24 @@ func TestSyncPodWithInitContainers(t *testing.T) {
|
||||
verifyContainerStatuses(t, fakeRuntime, expected, "start only the init container")
|
||||
|
||||
// 2. should not create app container because init container is still running.
|
||||
podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
assert.NoError(t, err)
|
||||
result = m.SyncPod(pod, podStatus, []v1.Secret{}, backOff)
|
||||
result = m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff)
|
||||
assert.NoError(t, result.Error())
|
||||
verifyContainerStatuses(t, fakeRuntime, expected, "init container still running; do nothing")
|
||||
|
||||
// 3. should create all app containers because init container finished.
|
||||
// Stop init container instance 0.
|
||||
sandboxIDs, err := m.getSandboxIDByPodUID(pod.UID, nil)
|
||||
sandboxIDs, err := m.getSandboxIDByPodUID(ctx, pod.UID, nil)
|
||||
require.NoError(t, err)
|
||||
sandboxID := sandboxIDs[0]
|
||||
initID0, err := fakeRuntime.GetContainerID(sandboxID, initContainers[0].Name, 0)
|
||||
require.NoError(t, err)
|
||||
fakeRuntime.StopContainer(initID0, 0)
|
||||
fakeRuntime.StopContainer(ctx, initID0, 0)
|
||||
// Sync again.
|
||||
podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
assert.NoError(t, err)
|
||||
result = m.SyncPod(pod, podStatus, []v1.Secret{}, backOff)
|
||||
result = m.SyncPod(ctx, pod, podStatus, []v1.Secret{}, backOff)
|
||||
assert.NoError(t, result.Error())
|
||||
expected = []*cRecord{
|
||||
{name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
|
||||
@ -808,11 +819,11 @@ func TestSyncPodWithInitContainers(t *testing.T) {
|
||||
|
||||
// 4. should restart the init container if needed to create a new podsandbox
|
||||
// Stop the pod sandbox.
|
||||
fakeRuntime.StopPodSandbox(sandboxID)
|
||||
fakeRuntime.StopPodSandbox(ctx, sandboxID)
|
||||
// Sync again.
|
||||
podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
assert.NoError(t, err)
|
||||
result = m.SyncPod(pod, podStatus, []v1.Secret{}, backOff)
|
||||
result = m.SyncPod(ctx, pod, podStatus, []v1.Secret{}, backOff)
|
||||
assert.NoError(t, result.Error())
|
||||
expected = []*cRecord{
|
||||
// The first init container instance is purged and no longer visible.
|
||||
@ -1541,6 +1552,7 @@ func TestComputePodActionsWithInitAndEphemeralContainers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncPodWithSandboxAndDeletedPod(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
fakeRuntime.ErrorOnSandboxCreate = true
|
||||
@ -1569,9 +1581,9 @@ func TestSyncPodWithSandboxAndDeletedPod(t *testing.T) {
|
||||
// GetPodStatus and the following SyncPod will not return errors in the
|
||||
// case where the pod has been deleted. We are not adding any pods into
|
||||
// the fakePodProvider so they are 'deleted'.
|
||||
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
assert.NoError(t, err)
|
||||
result := m.SyncPod(pod, podStatus, []v1.Secret{}, backOff)
|
||||
result := m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff)
|
||||
// This will return an error if the pod has _not_ been deleted.
|
||||
assert.NoError(t, result.Error())
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"runtime"
|
||||
@ -35,7 +36,7 @@ import (
|
||||
)
|
||||
|
||||
// createPodSandbox creates a pod sandbox and returns (podSandBoxID, message, error).
|
||||
func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32) (string, string, error) {
|
||||
func (m *kubeGenericRuntimeManager) createPodSandbox(ctx context.Context, pod *v1.Pod, attempt uint32) (string, string, error) {
|
||||
podSandboxConfig, err := m.generatePodSandboxConfig(pod, attempt)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("Failed to generate sandbox config for pod %q: %v", format.Pod(pod), err)
|
||||
@ -63,7 +64,7 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32
|
||||
}
|
||||
}
|
||||
|
||||
podSandBoxID, err := m.runtimeService.RunPodSandbox(podSandboxConfig, runtimeHandler)
|
||||
podSandBoxID, err := m.runtimeService.RunPodSandbox(ctx, podSandboxConfig, runtimeHandler)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
|
||||
klog.ErrorS(err, "Failed to create sandbox for pod", "pod", klog.KObj(pod))
|
||||
@ -272,7 +273,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxWindowsConfig(pod *v1.Pod)
|
||||
}
|
||||
|
||||
// getKubeletSandboxes lists all (or just the running) sandboxes managed by kubelet.
|
||||
func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi.PodSandbox, error) {
|
||||
func (m *kubeGenericRuntimeManager) getKubeletSandboxes(ctx context.Context, all bool) ([]*runtimeapi.PodSandbox, error) {
|
||||
var filter *runtimeapi.PodSandboxFilter
|
||||
if !all {
|
||||
readyState := runtimeapi.PodSandboxState_SANDBOX_READY
|
||||
@ -283,7 +284,7 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := m.runtimeService.ListPodSandbox(filter)
|
||||
resp, err := m.runtimeService.ListPodSandbox(ctx, filter)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to list pod sandboxes")
|
||||
return nil, err
|
||||
@ -326,7 +327,7 @@ func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName
|
||||
|
||||
// getPodSandboxID gets the sandbox id by podUID and returns ([]sandboxID, error).
|
||||
// Param state could be nil in order to get all sandboxes belonging to same pod.
|
||||
func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) {
|
||||
func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(ctx context.Context, podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) {
|
||||
filter := &runtimeapi.PodSandboxFilter{
|
||||
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(podUID)},
|
||||
}
|
||||
@ -335,7 +336,7 @@ func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, s
|
||||
State: *state,
|
||||
}
|
||||
}
|
||||
sandboxes, err := m.runtimeService.ListPodSandbox(filter)
|
||||
sandboxes, err := m.runtimeService.ListPodSandbox(ctx, filter)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to list sandboxes for pod", "podUID", podUID)
|
||||
return nil, err
|
||||
@ -356,8 +357,8 @@ func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, s
|
||||
}
|
||||
|
||||
// GetPortForward gets the endpoint the runtime will serve the port-forward request from.
|
||||
func (m *kubeGenericRuntimeManager) GetPortForward(podName, podNamespace string, podUID kubetypes.UID, ports []int32) (*url.URL, error) {
|
||||
sandboxIDs, err := m.getSandboxIDByPodUID(podUID, nil)
|
||||
func (m *kubeGenericRuntimeManager) GetPortForward(ctx context.Context, podName, podNamespace string, podUID kubetypes.UID, ports []int32) (*url.URL, error) {
|
||||
sandboxIDs, err := m.getSandboxIDByPodUID(ctx, podUID, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find sandboxID for pod %s: %v", format.PodDesc(podName, podNamespace, podUID), err)
|
||||
}
|
||||
@ -368,7 +369,7 @@ func (m *kubeGenericRuntimeManager) GetPortForward(podName, podNamespace string,
|
||||
PodSandboxId: sandboxIDs[0],
|
||||
Port: ports,
|
||||
}
|
||||
resp, err := m.runtimeService.PortForward(req)
|
||||
resp, err := m.runtimeService.PortForward(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -35,6 +36,7 @@ import (
|
||||
|
||||
// TestCreatePodSandbox tests creating sandbox and its corresponding pod log directory.
|
||||
func TestCreatePodSandbox(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
require.NoError(t, err)
|
||||
pod := newTestPod()
|
||||
@ -46,10 +48,10 @@ func TestCreatePodSandbox(t *testing.T) {
|
||||
assert.Equal(t, os.FileMode(0755), perm)
|
||||
return nil
|
||||
}
|
||||
id, _, err := m.createPodSandbox(pod, 1)
|
||||
id, _, err := m.createPodSandbox(ctx, pod, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, fakeRuntime.Called, "RunPodSandbox")
|
||||
sandboxes, err := fakeRuntime.ListPodSandbox(&runtimeapi.PodSandboxFilter{Id: id})
|
||||
sandboxes, err := fakeRuntime.ListPodSandbox(ctx, &runtimeapi.PodSandboxFilter{Id: id})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(sandboxes), 1)
|
||||
// TODO Check pod sandbox configuration
|
||||
@ -100,6 +102,7 @@ func TestGeneratePodSandboxLinuxConfigSeccomp(t *testing.T) {
|
||||
|
||||
// TestCreatePodSandbox_RuntimeClass tests creating sandbox with RuntimeClasses enabled.
|
||||
func TestCreatePodSandbox_RuntimeClass(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
rcm := runtimeclass.NewManager(rctest.NewPopulatedClient())
|
||||
defer rctest.StartManagerSync(rcm)()
|
||||
|
||||
@ -122,7 +125,7 @@ func TestCreatePodSandbox_RuntimeClass(t *testing.T) {
|
||||
pod := newTestPod()
|
||||
pod.Spec.RuntimeClassName = test.rcn
|
||||
|
||||
id, _, err := m.createPodSandbox(pod, 1)
|
||||
id, _, err := m.createPodSandbox(ctx, pod, 1)
|
||||
if test.expectError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
|
@ -419,8 +419,8 @@ func ReadLogs(ctx context.Context, path, containerID string, opts *LogOptions, r
|
||||
}
|
||||
}
|
||||
|
||||
func isContainerRunning(id string, r internalapi.RuntimeService) (bool, error) {
|
||||
resp, err := r.ContainerStatus(id, false)
|
||||
func isContainerRunning(ctx context.Context, id string, r internalapi.RuntimeService) (bool, error) {
|
||||
resp, err := r.ContainerStatus(ctx, id, false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -443,7 +443,7 @@ func isContainerRunning(id string, r internalapi.RuntimeService) (bool, error) {
|
||||
// the error is error happens during waiting new logs.
|
||||
func waitLogs(ctx context.Context, id string, w *fsnotify.Watcher, runtimeService internalapi.RuntimeService) (bool, bool, error) {
|
||||
// no need to wait if the pod is not running
|
||||
if running, err := isContainerRunning(id, runtimeService); !running {
|
||||
if running, err := isContainerRunning(ctx, id, runtimeService); !running {
|
||||
return false, false, err
|
||||
}
|
||||
errRetry := 5
|
||||
|
@ -54,7 +54,7 @@ type handlerRunner struct {
|
||||
}
|
||||
|
||||
type podStatusProvider interface {
|
||||
GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error)
|
||||
GetPodStatus(ctx context.Context, uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error)
|
||||
}
|
||||
|
||||
// NewHandlerRunner returns a configured lifecycle handler for a container.
|
||||
@ -67,19 +67,19 @@ func NewHandlerRunner(httpDoer kubetypes.HTTPDoer, commandRunner kubecontainer.C
|
||||
}
|
||||
}
|
||||
|
||||
func (hr *handlerRunner) Run(containerID kubecontainer.ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler) (string, error) {
|
||||
func (hr *handlerRunner) Run(ctx context.Context, containerID kubecontainer.ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler) (string, error) {
|
||||
switch {
|
||||
case handler.Exec != nil:
|
||||
var msg string
|
||||
// TODO(tallclair): Pass a proper timeout value.
|
||||
output, err := hr.commandRunner.RunInContainer(containerID, handler.Exec.Command, 0)
|
||||
output, err := hr.commandRunner.RunInContainer(ctx, containerID, handler.Exec.Command, 0)
|
||||
if err != nil {
|
||||
msg = fmt.Sprintf("Exec lifecycle hook (%v) for Container %q in Pod %q failed - error: %v, message: %q", handler.Exec.Command, container.Name, format.Pod(pod), err, string(output))
|
||||
klog.V(1).ErrorS(err, "Exec lifecycle hook for Container in Pod failed", "execCommand", handler.Exec.Command, "containerName", container.Name, "pod", klog.KObj(pod), "message", string(output))
|
||||
}
|
||||
return msg, err
|
||||
case handler.HTTPGet != nil:
|
||||
err := hr.runHTTPHandler(pod, container, handler, hr.eventRecorder)
|
||||
err := hr.runHTTPHandler(ctx, pod, container, handler, hr.eventRecorder)
|
||||
var msg string
|
||||
if err != nil {
|
||||
msg = fmt.Sprintf("HTTP lifecycle hook (%s) for Container %q in Pod %q failed - error: %v", handler.HTTPGet.Path, container.Name, format.Pod(pod), err)
|
||||
@ -117,11 +117,11 @@ func resolvePort(portReference intstr.IntOrString, container *v1.Container) (int
|
||||
return -1, fmt.Errorf("couldn't find port: %v in %v", portReference, container)
|
||||
}
|
||||
|
||||
func (hr *handlerRunner) runHTTPHandler(pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler, eventRecorder record.EventRecorder) error {
|
||||
func (hr *handlerRunner) runHTTPHandler(ctx context.Context, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler, eventRecorder record.EventRecorder) error {
|
||||
host := handler.HTTPGet.Host
|
||||
podIP := host
|
||||
if len(host) == 0 {
|
||||
status, err := hr.containerManager.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||
status, err := hr.containerManager.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Unable to get pod info, event handlers may be invalid.", "pod", klog.KObj(pod))
|
||||
return err
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@ -94,7 +95,7 @@ type fakeContainerCommandRunner struct {
|
||||
Msg string
|
||||
}
|
||||
|
||||
func (f *fakeContainerCommandRunner) RunInContainer(id kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
|
||||
func (f *fakeContainerCommandRunner) RunInContainer(_ context.Context, id kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
|
||||
f.Cmd = cmd
|
||||
f.ID = id
|
||||
return []byte(f.Msg), f.Err
|
||||
@ -113,11 +114,12 @@ func stubPodStatusProvider(podIP string) podStatusProvider {
|
||||
|
||||
type podStatusProviderFunc func(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error)
|
||||
|
||||
func (f podStatusProviderFunc) GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
|
||||
func (f podStatusProviderFunc) GetPodStatus(_ context.Context, uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
|
||||
return f(uid, name, namespace)
|
||||
}
|
||||
|
||||
func TestRunHandlerExec(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeCommandRunner := fakeContainerCommandRunner{}
|
||||
handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeCommandRunner, nil, nil)
|
||||
|
||||
@ -139,7 +141,7 @@ func TestRunHandlerExec(t *testing.T) {
|
||||
pod.ObjectMeta.Name = "podFoo"
|
||||
pod.ObjectMeta.Namespace = "nsFoo"
|
||||
pod.Spec.Containers = []v1.Container{container}
|
||||
_, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
_, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -163,6 +165,7 @@ func (f *fakeHTTP) Do(req *http.Request) (*http.Response, error) {
|
||||
}
|
||||
|
||||
func TestRunHandlerHttp(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeHTTPGetter := fakeHTTP{}
|
||||
fakePodStatusProvider := stubPodStatusProvider("127.0.0.1")
|
||||
handlerRunner := NewHandlerRunner(&fakeHTTPGetter, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
|
||||
@ -187,7 +190,7 @@ func TestRunHandlerHttp(t *testing.T) {
|
||||
pod.ObjectMeta.Namespace = "nsFoo"
|
||||
pod.ObjectMeta.UID = "foo-bar-quux"
|
||||
pod.Spec.Containers = []v1.Container{container}
|
||||
_, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
_, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@ -198,6 +201,7 @@ func TestRunHandlerHttp(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRunHandlerHttpWithHeaders(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeHTTPDoer := fakeHTTP{}
|
||||
fakePodStatusProvider := stubPodStatusProvider("127.0.0.1")
|
||||
|
||||
@ -225,7 +229,7 @@ func TestRunHandlerHttpWithHeaders(t *testing.T) {
|
||||
pod.ObjectMeta.Name = "podFoo"
|
||||
pod.ObjectMeta.Namespace = "nsFoo"
|
||||
pod.Spec.Containers = []v1.Container{container}
|
||||
_, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
_, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@ -239,6 +243,7 @@ func TestRunHandlerHttpWithHeaders(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRunHandlerHttps(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeHTTPDoer := fakeHTTP{}
|
||||
fakePodStatusProvider := stubPodStatusProvider("127.0.0.1")
|
||||
handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
|
||||
@ -266,7 +271,7 @@ func TestRunHandlerHttps(t *testing.T) {
|
||||
t.Run("consistent", func(t *testing.T) {
|
||||
container.Lifecycle.PostStart.HTTPGet.Port = intstr.FromString("70")
|
||||
pod.Spec.Containers = []v1.Container{container}
|
||||
_, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
_, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@ -280,7 +285,7 @@ func TestRunHandlerHttps(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, false)()
|
||||
container.Lifecycle.PostStart.HTTPGet.Port = intstr.FromString("70")
|
||||
pod.Spec.Containers = []v1.Container{container}
|
||||
_, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
_, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@ -347,13 +352,14 @@ func TestRunHandlerHTTPPort(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.Name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, tt.FeatureGateEnabled)()
|
||||
fakeHTTPDoer := fakeHTTP{}
|
||||
handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
|
||||
|
||||
container.Lifecycle.PostStart.HTTPGet.Port = tt.Port
|
||||
pod.Spec.Containers = []v1.Container{container}
|
||||
_, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
_, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
|
||||
if hasError := (err != nil); hasError != tt.ExpectError {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@ -618,6 +624,7 @@ func TestRunHTTPHandler(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.Name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakePodStatusProvider := stubPodStatusProvider(tt.PodIP)
|
||||
|
||||
container.Lifecycle.PostStart.HTTPGet = tt.HTTPGet
|
||||
@ -627,7 +634,7 @@ func TestRunHTTPHandler(t *testing.T) {
|
||||
fakeHTTPDoer := fakeHTTP{}
|
||||
handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
|
||||
|
||||
_, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
_, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -654,6 +661,7 @@ func TestRunHTTPHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRunHandlerNil(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeContainerCommandRunner{}, nil, nil)
|
||||
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
|
||||
podName := "podFoo"
|
||||
@ -670,13 +678,14 @@ func TestRunHandlerNil(t *testing.T) {
|
||||
pod.ObjectMeta.Name = podName
|
||||
pod.ObjectMeta.Namespace = podNamespace
|
||||
pod.Spec.Containers = []v1.Container{container}
|
||||
_, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
_, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
if err == nil {
|
||||
t.Errorf("expect error, but got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunHandlerExecFailure(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
expectedErr := fmt.Errorf("invalid command")
|
||||
fakeCommandRunner := fakeContainerCommandRunner{Err: expectedErr, Msg: expectedErr.Error()}
|
||||
handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeCommandRunner, nil, nil)
|
||||
@ -701,7 +710,7 @@ func TestRunHandlerExecFailure(t *testing.T) {
|
||||
pod.ObjectMeta.Namespace = "nsFoo"
|
||||
pod.Spec.Containers = []v1.Container{container}
|
||||
expectedErrMsg := fmt.Sprintf("Exec lifecycle hook (%s) for Container %q in Pod %q failed - error: %v, message: %q", command, containerName, format.Pod(&pod), expectedErr, expectedErr.Error())
|
||||
msg, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
msg, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
if err == nil {
|
||||
t.Errorf("expected error: %v", expectedErr)
|
||||
}
|
||||
@ -711,6 +720,7 @@ func TestRunHandlerExecFailure(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRunHandlerHttpFailure(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
expectedErr := fmt.Errorf("fake http error")
|
||||
expectedResp := http.Response{
|
||||
Body: io.NopCloser(strings.NewReader(expectedErr.Error())),
|
||||
@ -740,7 +750,7 @@ func TestRunHandlerHttpFailure(t *testing.T) {
|
||||
pod.ObjectMeta.Namespace = "nsFoo"
|
||||
pod.Spec.Containers = []v1.Container{container}
|
||||
expectedErrMsg := fmt.Sprintf("HTTP lifecycle hook (%s) for Container %q in Pod %q failed - error: %v", "bar", containerName, format.Pod(&pod), expectedErr)
|
||||
msg, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
msg, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
if err == nil {
|
||||
t.Errorf("expected error: %v", expectedErr)
|
||||
}
|
||||
@ -753,6 +763,7 @@ func TestRunHandlerHttpFailure(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRunHandlerHttpsFailureFallback(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, true)()
|
||||
|
||||
// Since prometheus' gatherer is global, other tests may have updated metrics already, so
|
||||
@ -803,7 +814,7 @@ func TestRunHandlerHttpsFailureFallback(t *testing.T) {
|
||||
pod.ObjectMeta.Name = "podFoo"
|
||||
pod.ObjectMeta.Namespace = "nsFoo"
|
||||
pod.Spec.Containers = []v1.Container{container}
|
||||
msg, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
msg, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
|
@ -18,6 +18,7 @@ package logs
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@ -58,7 +59,7 @@ type ContainerLogManager interface {
|
||||
// Start container log manager.
|
||||
Start()
|
||||
// Clean removes all logs of specified container.
|
||||
Clean(containerID string) error
|
||||
Clean(ctx context.Context, containerID string) error
|
||||
}
|
||||
|
||||
// LogRotatePolicy is a policy for container log rotation. The policy applies to all
|
||||
@ -177,19 +178,20 @@ func NewContainerLogManager(runtimeService internalapi.RuntimeService, osInterfa
|
||||
|
||||
// Start the container log manager.
|
||||
func (c *containerLogManager) Start() {
|
||||
ctx := context.Background()
|
||||
// Start a goroutine periodically does container log rotation.
|
||||
go wait.Forever(func() {
|
||||
if err := c.rotateLogs(); err != nil {
|
||||
if err := c.rotateLogs(ctx); err != nil {
|
||||
klog.ErrorS(err, "Failed to rotate container logs")
|
||||
}
|
||||
}, logMonitorPeriod)
|
||||
}
|
||||
|
||||
// Clean removes all logs of specified container (including rotated one).
|
||||
func (c *containerLogManager) Clean(containerID string) error {
|
||||
func (c *containerLogManager) Clean(ctx context.Context, containerID string) error {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
resp, err := c.runtimeService.ContainerStatus(containerID, false)
|
||||
resp, err := c.runtimeService.ContainerStatus(ctx, containerID, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get container status %q: %v", containerID, err)
|
||||
}
|
||||
@ -211,11 +213,11 @@ func (c *containerLogManager) Clean(containerID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *containerLogManager) rotateLogs() error {
|
||||
func (c *containerLogManager) rotateLogs(ctx context.Context) error {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
// TODO(#59998): Use kubelet pod cache.
|
||||
containers, err := c.runtimeService.ListContainers(&runtimeapi.ContainerFilter{})
|
||||
containers, err := c.runtimeService.ListContainers(ctx, &runtimeapi.ContainerFilter{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers: %v", err)
|
||||
}
|
||||
@ -228,7 +230,7 @@ func (c *containerLogManager) rotateLogs() error {
|
||||
}
|
||||
id := container.GetId()
|
||||
// Note that we should not block log rotate for an error of a single container.
|
||||
resp, err := c.runtimeService.ContainerStatus(id, false)
|
||||
resp, err := c.runtimeService.ContainerStatus(ctx, id, false)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get container status", "containerID", id)
|
||||
continue
|
||||
@ -247,7 +249,7 @@ func (c *containerLogManager) rotateLogs() error {
|
||||
// In rotateLatestLog, there are several cases that we may
|
||||
// lose original container log after ReopenContainerLog fails.
|
||||
// We try to recover it by reopening container log.
|
||||
if err := c.runtimeService.ReopenContainerLog(id); err != nil {
|
||||
if err := c.runtimeService.ReopenContainerLog(ctx, id); err != nil {
|
||||
klog.ErrorS(err, "Container log doesn't exist, reopen container log failed", "containerID", id, "path", path)
|
||||
continue
|
||||
}
|
||||
@ -262,7 +264,7 @@ func (c *containerLogManager) rotateLogs() error {
|
||||
continue
|
||||
}
|
||||
// Perform log rotation.
|
||||
if err := c.rotateLog(id, path); err != nil {
|
||||
if err := c.rotateLog(ctx, id, path); err != nil {
|
||||
klog.ErrorS(err, "Failed to rotate log for container", "path", path, "containerID", id)
|
||||
continue
|
||||
}
|
||||
@ -270,7 +272,7 @@ func (c *containerLogManager) rotateLogs() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *containerLogManager) rotateLog(id, log string) error {
|
||||
func (c *containerLogManager) rotateLog(ctx context.Context, id, log string) error {
|
||||
// pattern is used to match all rotated files.
|
||||
pattern := fmt.Sprintf("%s.*", log)
|
||||
logs, err := filepath.Glob(pattern)
|
||||
@ -298,7 +300,7 @@ func (c *containerLogManager) rotateLog(id, log string) error {
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.rotateLatestLog(id, log); err != nil {
|
||||
if err := c.rotateLatestLog(ctx, id, log); err != nil {
|
||||
return fmt.Errorf("failed to rotate log %q: %v", log, err)
|
||||
}
|
||||
|
||||
@ -410,13 +412,13 @@ func (c *containerLogManager) compressLog(log string) error {
|
||||
|
||||
// rotateLatestLog rotates latest log without compression, so that container can still write
|
||||
// and fluentd can finish reading.
|
||||
func (c *containerLogManager) rotateLatestLog(id, log string) error {
|
||||
func (c *containerLogManager) rotateLatestLog(ctx context.Context, id, log string) error {
|
||||
timestamp := c.clock.Now().Format(timestampFormat)
|
||||
rotated := fmt.Sprintf("%s.%s", log, timestamp)
|
||||
if err := c.osInterface.Rename(log, rotated); err != nil {
|
||||
return fmt.Errorf("failed to rotate log %q to %q: %v", log, rotated, err)
|
||||
}
|
||||
if err := c.runtimeService.ReopenContainerLog(id); err != nil {
|
||||
if err := c.runtimeService.ReopenContainerLog(ctx, id); err != nil {
|
||||
// Rename the rotated log back, so that we can try rotating it again
|
||||
// next round.
|
||||
// If kubelet gets restarted at this point, we'll lose original log.
|
||||
|
@ -16,11 +16,13 @@ limitations under the License.
|
||||
|
||||
package logs
|
||||
|
||||
import "context"
|
||||
|
||||
type containerLogManagerStub struct{}
|
||||
|
||||
func (*containerLogManagerStub) Start() {}
|
||||
|
||||
func (*containerLogManagerStub) Clean(containerID string) error {
|
||||
func (*containerLogManagerStub) Clean(ctx context.Context, containerID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -18,6 +18,7 @@ package logs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@ -74,6 +75,7 @@ func TestGetAllLogs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRotateLogs(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
dir, err := os.MkdirTemp("", "test-rotate-logs")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
@ -147,7 +149,7 @@ func TestRotateLogs(t *testing.T) {
|
||||
},
|
||||
}
|
||||
f.SetFakeContainers(testContainers)
|
||||
require.NoError(t, c.rotateLogs())
|
||||
require.NoError(t, c.rotateLogs(ctx))
|
||||
|
||||
timestamp := now.Format(timestampFormat)
|
||||
logs, err := os.ReadDir(dir)
|
||||
@ -161,6 +163,7 @@ func TestRotateLogs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClean(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
dir, err := os.MkdirTemp("", "test-clean")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
@ -219,7 +222,7 @@ func TestClean(t *testing.T) {
|
||||
}
|
||||
f.SetFakeContainers(testContainers)
|
||||
|
||||
err = c.Clean("container-3")
|
||||
err = c.Clean(ctx, "container-3")
|
||||
require.NoError(t, err)
|
||||
|
||||
logs, err := os.ReadDir(dir)
|
||||
@ -350,6 +353,7 @@ func TestCompressLog(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRotateLatestLog(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
dir, err := os.MkdirTemp("", "test-rotate-latest-log")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
@ -393,7 +397,7 @@ func TestRotateLatestLog(t *testing.T) {
|
||||
defer testFile.Close()
|
||||
testLog := testFile.Name()
|
||||
rotatedLog := fmt.Sprintf("%s.%s", testLog, now.Format(timestampFormat))
|
||||
err = c.rotateLatestLog("test-id", testLog)
|
||||
err = c.rotateLatestLog(ctx, "test-id", testLog)
|
||||
assert.Equal(t, test.expectError, err != nil)
|
||||
_, err = os.Stat(testLog)
|
||||
assert.Equal(t, test.expectOriginal, err == nil)
|
||||
|
@ -17,6 +17,8 @@ limitations under the License.
|
||||
package collectors
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/component-base/metrics"
|
||||
"k8s.io/klog/v2"
|
||||
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
@ -40,7 +42,7 @@ var (
|
||||
type logMetricsCollector struct {
|
||||
metrics.BaseStableCollector
|
||||
|
||||
podStats func() ([]statsapi.PodStats, error)
|
||||
podStats func(ctx context.Context) ([]statsapi.PodStats, error)
|
||||
}
|
||||
|
||||
// Check if logMetricsCollector implements necessary interface
|
||||
@ -48,7 +50,7 @@ var _ metrics.StableCollector = &logMetricsCollector{}
|
||||
|
||||
// NewLogMetricsCollector implements the metrics.StableCollector interface and
|
||||
// exposes metrics about container's log volume size.
|
||||
func NewLogMetricsCollector(podStats func() ([]statsapi.PodStats, error)) metrics.StableCollector {
|
||||
func NewLogMetricsCollector(podStats func(ctx context.Context) ([]statsapi.PodStats, error)) metrics.StableCollector {
|
||||
return &logMetricsCollector{
|
||||
podStats: podStats,
|
||||
}
|
||||
@ -61,7 +63,7 @@ func (c *logMetricsCollector) DescribeWithStability(ch chan<- *metrics.Desc) {
|
||||
|
||||
// CollectWithStability implements the metrics.StableCollector interface.
|
||||
func (c *logMetricsCollector) CollectWithStability(ch chan<- metrics.Metric) {
|
||||
podStats, err := c.podStats()
|
||||
podStats, err := c.podStats(context.Background())
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get pod stats")
|
||||
return
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package collectors
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@ -29,7 +30,7 @@ func TestNoMetricsCollected(t *testing.T) {
|
||||
descLogSize = descLogSize.GetRawDesc()
|
||||
|
||||
collector := &logMetricsCollector{
|
||||
podStats: func() ([]statsapi.PodStats, error) {
|
||||
podStats: func(_ context.Context) ([]statsapi.PodStats, error) {
|
||||
return []statsapi.PodStats{}, nil
|
||||
},
|
||||
}
|
||||
@ -45,7 +46,7 @@ func TestMetricsCollected(t *testing.T) {
|
||||
|
||||
size := uint64(18)
|
||||
collector := &logMetricsCollector{
|
||||
podStats: func() ([]statsapi.PodStats, error) {
|
||||
podStats: func(_ context.Context) ([]statsapi.PodStats, error) {
|
||||
return []statsapi.PodStats{
|
||||
{
|
||||
PodRef: statsapi.PodReference{
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package collectors
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/component-base/metrics"
|
||||
@ -116,11 +117,12 @@ func (rc *resourceMetricsCollector) DescribeWithStability(ch chan<- *metrics.Des
|
||||
// leak metric collectors for containers or pods that no longer exist. Instead, implement
|
||||
// custom collector in a way that only collects metrics for active containers.
|
||||
func (rc *resourceMetricsCollector) CollectWithStability(ch chan<- metrics.Metric) {
|
||||
ctx := context.Background()
|
||||
var errorCount float64
|
||||
defer func() {
|
||||
ch <- metrics.NewLazyConstMetric(resourceScrapeResultDesc, metrics.GaugeValue, errorCount)
|
||||
}()
|
||||
statsSummary, err := rc.provider.GetCPUAndMemoryStats()
|
||||
statsSummary, err := rc.provider.GetCPUAndMemoryStats(ctx)
|
||||
if err != nil {
|
||||
errorCount = 1
|
||||
klog.ErrorS(err, "Error getting summary for resourceMetric prometheus endpoint")
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package collectors
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -357,8 +358,9 @@ func TestCollectResourceMetrics(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
tc := test
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
provider := summaryprovidertest.NewMockSummaryProvider(mockCtrl)
|
||||
provider.EXPECT().GetCPUAndMemoryStats().Return(tc.summary, tc.summaryErr).AnyTimes()
|
||||
provider.EXPECT().GetCPUAndMemoryStats(ctx).Return(tc.summary, tc.summaryErr).AnyTimes()
|
||||
collector := NewResourceMetricsCollector(provider)
|
||||
|
||||
if err := testutil.CustomCollectAndCompare(collector, strings.NewReader(tc.expectedMetrics), interestedMetrics...); err != nil {
|
||||
|
@ -17,6 +17,8 @@ limitations under the License.
|
||||
package collectors
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/component-base/metrics"
|
||||
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
@ -96,7 +98,8 @@ func (collector *volumeStatsCollector) DescribeWithStability(ch chan<- *metrics.
|
||||
|
||||
// CollectWithStability implements the metrics.StableCollector interface.
|
||||
func (collector *volumeStatsCollector) CollectWithStability(ch chan<- metrics.Metric) {
|
||||
podStats, err := collector.statsProvider.ListPodStats()
|
||||
ctx := context.Background()
|
||||
podStats, err := collector.statsProvider.ListPodStats(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package collectors
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@ -32,6 +33,7 @@ func newUint64Pointer(i uint64) *uint64 {
|
||||
}
|
||||
|
||||
func TestVolumeStatsCollector(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Fixed metadata on type and help text. We prepend this to every expected
|
||||
// output so we only have to modify a single place when doing adjustments.
|
||||
const metadata = `
|
||||
@ -144,14 +146,15 @@ func TestVolumeStatsCollector(t *testing.T) {
|
||||
defer mockCtrl.Finish()
|
||||
mockStatsProvider := statstest.NewMockProvider(mockCtrl)
|
||||
|
||||
mockStatsProvider.EXPECT().ListPodStats().Return(podStats, nil).AnyTimes()
|
||||
mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage().Return(podStats, nil).AnyTimes()
|
||||
mockStatsProvider.EXPECT().ListPodStats(ctx).Return(podStats, nil).AnyTimes()
|
||||
mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage(ctx).Return(podStats, nil).AnyTimes()
|
||||
if err := testutil.CustomCollectAndCompare(&volumeStatsCollector{statsProvider: mockStatsProvider}, strings.NewReader(want), metrics...); err != nil {
|
||||
t.Errorf("unexpected collecting result:\n%s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeStatsCollectorWithNullVolumeStatus(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Fixed metadata on type and help text. We prepend this to every expected
|
||||
// output so we only have to modify a single place when doing adjustments.
|
||||
const metadata = `
|
||||
@ -231,8 +234,8 @@ func TestVolumeStatsCollectorWithNullVolumeStatus(t *testing.T) {
|
||||
defer mockCtrl.Finish()
|
||||
mockStatsProvider := statstest.NewMockProvider(mockCtrl)
|
||||
|
||||
mockStatsProvider.EXPECT().ListPodStats().Return(podStats, nil).AnyTimes()
|
||||
mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage().Return(podStats, nil).AnyTimes()
|
||||
mockStatsProvider.EXPECT().ListPodStats(ctx).Return(podStats, nil).AnyTimes()
|
||||
mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage(ctx).Return(podStats, nil).AnyTimes()
|
||||
if err := testutil.CustomCollectAndCompare(&volumeStatsCollector{statsProvider: mockStatsProvider}, strings.NewReader(want), metrics...); err != nil {
|
||||
t.Errorf("unexpected collecting result:\n%s", err)
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package nodestatus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
@ -54,7 +55,7 @@ const (
|
||||
|
||||
// Setter modifies the node in-place, and returns an error if the modification failed.
|
||||
// Setters may partially mutate the node before returning an error.
|
||||
type Setter func(node *v1.Node) error
|
||||
type Setter func(ctx context.Context, node *v1.Node) error
|
||||
|
||||
// NodeAddress returns a Setter that updates address-related information on the node.
|
||||
func NodeAddress(nodeIPs []net.IP, // typically Kubelet.nodeIPs
|
||||
@ -78,7 +79,7 @@ func NodeAddress(nodeIPs []net.IP, // typically Kubelet.nodeIPs
|
||||
}
|
||||
secondaryNodeIPSpecified := secondaryNodeIP != nil && !secondaryNodeIP.IsUnspecified()
|
||||
|
||||
return func(node *v1.Node) error {
|
||||
return func(ctx context.Context, node *v1.Node) error {
|
||||
if nodeIPSpecified {
|
||||
if err := validateNodeIPFunc(nodeIP); err != nil {
|
||||
return fmt.Errorf("failed to validate nodeIP: %v", err)
|
||||
@ -250,7 +251,7 @@ func MachineInfo(nodeName string,
|
||||
recordEventFunc func(eventType, event, message string), // typically Kubelet.recordEvent
|
||||
localStorageCapacityIsolation bool,
|
||||
) Setter {
|
||||
return func(node *v1.Node) error {
|
||||
return func(ctx context.Context, node *v1.Node) error {
|
||||
// Note: avoid blindly overwriting the capacity in case opaque
|
||||
// resources are being advertised.
|
||||
if node.Status.Capacity == nil {
|
||||
@ -379,9 +380,9 @@ func MachineInfo(nodeName string,
|
||||
// VersionInfo returns a Setter that updates version-related information on the node.
|
||||
func VersionInfo(versionInfoFunc func() (*cadvisorapiv1.VersionInfo, error), // typically Kubelet.cadvisor.VersionInfo
|
||||
runtimeTypeFunc func() string, // typically Kubelet.containerRuntime.Type
|
||||
runtimeVersionFunc func() (kubecontainer.Version, error), // typically Kubelet.containerRuntime.Version
|
||||
runtimeVersionFunc func(ctx context.Context) (kubecontainer.Version, error), // typically Kubelet.containerRuntime.Version
|
||||
) Setter {
|
||||
return func(node *v1.Node) error {
|
||||
return func(ctx context.Context, node *v1.Node) error {
|
||||
verinfo, err := versionInfoFunc()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting version info: %v", err)
|
||||
@ -391,7 +392,7 @@ func VersionInfo(versionInfoFunc func() (*cadvisorapiv1.VersionInfo, error), //
|
||||
node.Status.NodeInfo.OSImage = verinfo.ContainerOsVersion
|
||||
|
||||
runtimeVersion := "Unknown"
|
||||
if runtimeVer, err := runtimeVersionFunc(); err == nil {
|
||||
if runtimeVer, err := runtimeVersionFunc(ctx); err == nil {
|
||||
runtimeVersion = runtimeVer.String()
|
||||
}
|
||||
node.Status.NodeInfo.ContainerRuntimeVersion = fmt.Sprintf("%s://%s", runtimeTypeFunc(), runtimeVersion)
|
||||
@ -405,7 +406,7 @@ func VersionInfo(versionInfoFunc func() (*cadvisorapiv1.VersionInfo, error), //
|
||||
|
||||
// DaemonEndpoints returns a Setter that updates the daemon endpoints on the node.
|
||||
func DaemonEndpoints(daemonEndpoints *v1.NodeDaemonEndpoints) Setter {
|
||||
return func(node *v1.Node) error {
|
||||
return func(ctx context.Context, node *v1.Node) error {
|
||||
node.Status.DaemonEndpoints = *daemonEndpoints
|
||||
return nil
|
||||
}
|
||||
@ -417,7 +418,7 @@ func DaemonEndpoints(daemonEndpoints *v1.NodeDaemonEndpoints) Setter {
|
||||
func Images(nodeStatusMaxImages int32,
|
||||
imageListFunc func() ([]kubecontainer.Image, error), // typically Kubelet.imageManager.GetImageList
|
||||
) Setter {
|
||||
return func(node *v1.Node) error {
|
||||
return func(ctx context.Context, node *v1.Node) error {
|
||||
// Update image list of this node
|
||||
var imagesOnNode []v1.ContainerImage
|
||||
containerImages, err := imageListFunc()
|
||||
@ -452,7 +453,7 @@ func Images(nodeStatusMaxImages int32,
|
||||
|
||||
// GoRuntime returns a Setter that sets GOOS and GOARCH on the node.
|
||||
func GoRuntime() Setter {
|
||||
return func(node *v1.Node) error {
|
||||
return func(ctx context.Context, node *v1.Node) error {
|
||||
node.Status.NodeInfo.OperatingSystem = goruntime.GOOS
|
||||
node.Status.NodeInfo.Architecture = goruntime.GOARCH
|
||||
return nil
|
||||
@ -471,7 +472,7 @@ func ReadyCondition(
|
||||
recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
|
||||
localStorageCapacityIsolation bool,
|
||||
) Setter {
|
||||
return func(node *v1.Node) error {
|
||||
return func(ctx context.Context, node *v1.Node) error {
|
||||
// NOTE(aaronlevy): NodeReady condition needs to be the last in the list of node conditions.
|
||||
// This is due to an issue with version skewed kubelet and master components.
|
||||
// ref: https://github.com/kubernetes/kubernetes/issues/16961
|
||||
@ -556,7 +557,7 @@ func MemoryPressureCondition(nowFunc func() time.Time, // typically Kubelet.cloc
|
||||
pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderMemoryPressure
|
||||
recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
|
||||
) Setter {
|
||||
return func(node *v1.Node) error {
|
||||
return func(ctx context.Context, node *v1.Node) error {
|
||||
currentTime := metav1.NewTime(nowFunc())
|
||||
var condition *v1.NodeCondition
|
||||
|
||||
@ -617,7 +618,7 @@ func PIDPressureCondition(nowFunc func() time.Time, // typically Kubelet.clock.N
|
||||
pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderPIDPressure
|
||||
recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
|
||||
) Setter {
|
||||
return func(node *v1.Node) error {
|
||||
return func(ctx context.Context, node *v1.Node) error {
|
||||
currentTime := metav1.NewTime(nowFunc())
|
||||
var condition *v1.NodeCondition
|
||||
|
||||
@ -678,7 +679,7 @@ func DiskPressureCondition(nowFunc func() time.Time, // typically Kubelet.clock.
|
||||
pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderDiskPressure
|
||||
recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
|
||||
) Setter {
|
||||
return func(node *v1.Node) error {
|
||||
return func(ctx context.Context, node *v1.Node) error {
|
||||
currentTime := metav1.NewTime(nowFunc())
|
||||
var condition *v1.NodeCondition
|
||||
|
||||
@ -738,7 +739,7 @@ func DiskPressureCondition(nowFunc func() time.Time, // typically Kubelet.clock.
|
||||
func VolumesInUse(syncedFunc func() bool, // typically Kubelet.volumeManager.ReconcilerStatesHasBeenSynced
|
||||
volumesInUseFunc func() []v1.UniqueVolumeName, // typically Kubelet.volumeManager.GetVolumesInUse
|
||||
) Setter {
|
||||
return func(node *v1.Node) error {
|
||||
return func(ctx context.Context, node *v1.Node) error {
|
||||
// Make sure to only update node status after reconciler starts syncing up states
|
||||
if syncedFunc() {
|
||||
node.Status.VolumesInUse = volumesInUseFunc()
|
||||
@ -750,7 +751,7 @@ func VolumesInUse(syncedFunc func() bool, // typically Kubelet.volumeManager.Rec
|
||||
// VolumeLimits returns a Setter that updates the volume limits on the node.
|
||||
func VolumeLimits(volumePluginListFunc func() []volume.VolumePluginWithAttachLimits, // typically Kubelet.volumePluginMgr.ListVolumePluginWithLimits
|
||||
) Setter {
|
||||
return func(node *v1.Node) error {
|
||||
return func(ctx context.Context, node *v1.Node) error {
|
||||
if node.Status.Capacity == nil {
|
||||
node.Status.Capacity = v1.ResourceList{}
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package nodestatus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
@ -512,6 +513,7 @@ func TestNodeAddress(t *testing.T) {
|
||||
}
|
||||
for _, testCase := range cases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// testCase setup
|
||||
existingNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -553,7 +555,7 @@ func TestNodeAddress(t *testing.T) {
|
||||
nodeAddressesFunc)
|
||||
|
||||
// call setter on existing node
|
||||
err := setter(existingNode)
|
||||
err := setter(ctx, existingNode)
|
||||
if err != nil && !testCase.shouldError {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
} else if err != nil && testCase.shouldError {
|
||||
@ -598,6 +600,7 @@ func TestNodeAddress_NoCloudProvider(t *testing.T) {
|
||||
}
|
||||
for _, testCase := range cases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// testCase setup
|
||||
existingNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Annotations: make(map[string]string)},
|
||||
@ -624,7 +627,7 @@ func TestNodeAddress_NoCloudProvider(t *testing.T) {
|
||||
nodeAddressesFunc)
|
||||
|
||||
// call setter on existing node
|
||||
err := setter(existingNode)
|
||||
err := setter(ctx, existingNode)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@ -1049,6 +1052,7 @@ func TestMachineInfo(t *testing.T) {
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
machineInfoFunc := func() (*cadvisorapiv1.MachineInfo, error) {
|
||||
return tc.machineInfo, tc.machineInfoError
|
||||
}
|
||||
@ -1075,7 +1079,7 @@ func TestMachineInfo(t *testing.T) {
|
||||
setter := MachineInfo(nodeName, tc.maxPods, tc.podsPerCore, machineInfoFunc, capacityFunc,
|
||||
devicePluginResourceCapacityFunc, nodeAllocatableReservationFunc, recordEventFunc, tc.disableLocalStorageCapacityIsolation)
|
||||
// call setter on node
|
||||
if err := setter(tc.node); err != nil {
|
||||
if err := setter(ctx, tc.node); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
// check expected node
|
||||
@ -1153,19 +1157,20 @@ func TestVersionInfo(t *testing.T) {
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
versionInfoFunc := func() (*cadvisorapiv1.VersionInfo, error) {
|
||||
return tc.versionInfo, tc.versionInfoError
|
||||
}
|
||||
runtimeTypeFunc := func() string {
|
||||
return tc.runtimeType
|
||||
}
|
||||
runtimeVersionFunc := func() (kubecontainer.Version, error) {
|
||||
runtimeVersionFunc := func(_ context.Context) (kubecontainer.Version, error) {
|
||||
return tc.runtimeVersion, tc.runtimeVersionError
|
||||
}
|
||||
// construct setter
|
||||
setter := VersionInfo(versionInfoFunc, runtimeTypeFunc, runtimeVersionFunc)
|
||||
// call setter on node
|
||||
err := setter(tc.node)
|
||||
err := setter(ctx, tc.node)
|
||||
require.Equal(t, tc.expectError, err)
|
||||
// check expected node
|
||||
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectNode, tc.node),
|
||||
@ -1229,6 +1234,7 @@ func TestImages(t *testing.T) {
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
imageListFunc := func() ([]kubecontainer.Image, error) {
|
||||
// today, imageListFunc is expected to return a sorted list,
|
||||
// but we may choose to sort in the setter at some future point
|
||||
@ -1240,7 +1246,7 @@ func TestImages(t *testing.T) {
|
||||
setter := Images(tc.maxImages, imageListFunc)
|
||||
// call setter on node
|
||||
node := &v1.Node{}
|
||||
err := setter(node)
|
||||
err := setter(ctx, node)
|
||||
require.Equal(t, tc.expectError, err)
|
||||
// check expected node, image list should be reset to empty when there is an error
|
||||
expectNode := &v1.Node{}
|
||||
@ -1408,6 +1414,7 @@ func TestReadyCondition(t *testing.T) {
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
runtimeErrorsFunc := func() error {
|
||||
return tc.runtimeErrors
|
||||
}
|
||||
@ -1433,7 +1440,7 @@ func TestReadyCondition(t *testing.T) {
|
||||
// construct setter
|
||||
setter := ReadyCondition(nowFunc, runtimeErrorsFunc, networkErrorsFunc, storageErrorsFunc, tc.appArmorValidateHostFunc, cmStatusFunc, nodeShutdownErrorsFunc, recordEventFunc, !tc.disableLocalStorageCapacityIsolation)
|
||||
// call setter on node
|
||||
if err := setter(tc.node); err != nil {
|
||||
if err := setter(ctx, tc.node); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
// check expected condition
|
||||
@ -1541,6 +1548,7 @@ func TestMemoryPressureCondition(t *testing.T) {
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
events := []testEvent{}
|
||||
recordEventFunc := func(eventType, event string) {
|
||||
events = append(events, testEvent{
|
||||
@ -1554,7 +1562,7 @@ func TestMemoryPressureCondition(t *testing.T) {
|
||||
// construct setter
|
||||
setter := MemoryPressureCondition(nowFunc, pressureFunc, recordEventFunc)
|
||||
// call setter on node
|
||||
if err := setter(tc.node); err != nil {
|
||||
if err := setter(ctx, tc.node); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
// check expected condition
|
||||
@ -1662,6 +1670,7 @@ func TestPIDPressureCondition(t *testing.T) {
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
events := []testEvent{}
|
||||
recordEventFunc := func(eventType, event string) {
|
||||
events = append(events, testEvent{
|
||||
@ -1675,7 +1684,7 @@ func TestPIDPressureCondition(t *testing.T) {
|
||||
// construct setter
|
||||
setter := PIDPressureCondition(nowFunc, pressureFunc, recordEventFunc)
|
||||
// call setter on node
|
||||
if err := setter(tc.node); err != nil {
|
||||
if err := setter(ctx, tc.node); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
// check expected condition
|
||||
@ -1783,6 +1792,7 @@ func TestDiskPressureCondition(t *testing.T) {
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
events := []testEvent{}
|
||||
recordEventFunc := func(eventType, event string) {
|
||||
events = append(events, testEvent{
|
||||
@ -1796,7 +1806,7 @@ func TestDiskPressureCondition(t *testing.T) {
|
||||
// construct setter
|
||||
setter := DiskPressureCondition(nowFunc, pressureFunc, recordEventFunc)
|
||||
// call setter on node
|
||||
if err := setter(tc.node); err != nil {
|
||||
if err := setter(ctx, tc.node); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
// check expected condition
|
||||
@ -1843,6 +1853,7 @@ func TestVolumesInUse(t *testing.T) {
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
syncedFunc := func() bool {
|
||||
return tc.synced
|
||||
}
|
||||
@ -1852,7 +1863,7 @@ func TestVolumesInUse(t *testing.T) {
|
||||
// construct setter
|
||||
setter := VolumesInUse(syncedFunc, volumesInUseFunc)
|
||||
// call setter on node
|
||||
if err := setter(tc.node); err != nil {
|
||||
if err := setter(ctx, tc.node); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
// check expected volumes
|
||||
@ -1908,6 +1919,7 @@ func TestVolumeLimits(t *testing.T) {
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
volumePluginListFunc := func() []volume.VolumePluginWithAttachLimits {
|
||||
return tc.volumePluginList
|
||||
}
|
||||
@ -1915,7 +1927,7 @@ func TestVolumeLimits(t *testing.T) {
|
||||
setter := VolumeLimits(volumePluginListFunc)
|
||||
// call setter on node
|
||||
node := &v1.Node{}
|
||||
if err := setter(node); err != nil {
|
||||
if err := setter(ctx, node); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
// check expected node
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package pleg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@ -188,6 +189,7 @@ func (g *GenericPLEG) updateRelistTime(timestamp time.Time) {
|
||||
// relist queries the container runtime for list of pods/containers, compare
|
||||
// with the internal pods/containers, and generates events accordingly.
|
||||
func (g *GenericPLEG) relist() {
|
||||
ctx := context.Background()
|
||||
klog.V(5).InfoS("GenericPLEG: Relisting")
|
||||
|
||||
if lastRelistTime := g.getRelistTime(); !lastRelistTime.IsZero() {
|
||||
@ -200,7 +202,7 @@ func (g *GenericPLEG) relist() {
|
||||
}()
|
||||
|
||||
// Get all the pods.
|
||||
podList, err := g.runtime.GetPods(true)
|
||||
podList, err := g.runtime.GetPods(ctx, true)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "GenericPLEG: Unable to retrieve pods")
|
||||
return
|
||||
@ -247,7 +249,7 @@ func (g *GenericPLEG) relist() {
|
||||
// inspecting the pod and getting the PodStatus to update the cache
|
||||
// serially may take a while. We should be aware of this and
|
||||
// parallelize if needed.
|
||||
if err := g.updateCache(pod, pid); err != nil {
|
||||
if err := g.updateCache(ctx, pod, pid); err != nil {
|
||||
// Rely on updateCache calling GetPodStatus to log the actual error.
|
||||
klog.V(4).ErrorS(err, "PLEG: Ignoring events for pod", "pod", klog.KRef(pod.Namespace, pod.Name))
|
||||
|
||||
@ -305,7 +307,7 @@ func (g *GenericPLEG) relist() {
|
||||
if len(g.podsToReinspect) > 0 {
|
||||
klog.V(5).InfoS("GenericPLEG: Reinspecting pods that previously failed inspection")
|
||||
for pid, pod := range g.podsToReinspect {
|
||||
if err := g.updateCache(pod, pid); err != nil {
|
||||
if err := g.updateCache(ctx, pod, pid); err != nil {
|
||||
// Rely on updateCache calling GetPodStatus to log the actual error.
|
||||
klog.V(5).ErrorS(err, "PLEG: pod failed reinspection", "pod", klog.KRef(pod.Namespace, pod.Name))
|
||||
needsReinspection[pid] = pod
|
||||
@ -388,7 +390,7 @@ func (g *GenericPLEG) getPodIPs(pid types.UID, status *kubecontainer.PodStatus)
|
||||
return oldStatus.IPs
|
||||
}
|
||||
|
||||
func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error {
|
||||
func (g *GenericPLEG) updateCache(ctx context.Context, pod *kubecontainer.Pod, pid types.UID) error {
|
||||
if pod == nil {
|
||||
// The pod is missing in the current relist. This means that
|
||||
// the pod has no visible (active or inactive) containers.
|
||||
@ -400,7 +402,7 @@ func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error {
|
||||
// TODO: Consider adding a new runtime method
|
||||
// GetPodStatus(pod *kubecontainer.Pod) so that Docker can avoid listing
|
||||
// all containers again.
|
||||
status, err := g.runtime.GetPodStatus(pod.ID, pod.Name, pod.Namespace)
|
||||
status, err := g.runtime.GetPodStatus(ctx, pod.ID, pod.Name, pod.Namespace)
|
||||
if err != nil {
|
||||
// nolint:logcheck // Not using the result of klog.V inside the
|
||||
// if branch is okay, we just use it to determine whether the
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package pleg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
@ -350,6 +351,7 @@ func createTestPodsStatusesAndEvents(num int) ([]*kubecontainer.Pod, []*kubecont
|
||||
}
|
||||
|
||||
func TestRelistWithCache(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
runtimeMock := containertest.NewMockRuntime(mockCtrl)
|
||||
@ -358,11 +360,11 @@ func TestRelistWithCache(t *testing.T) {
|
||||
ch := pleg.Watch()
|
||||
|
||||
pods, statuses, events := createTestPodsStatusesAndEvents(2)
|
||||
runtimeMock.EXPECT().GetPods(true).Return(pods, nil).AnyTimes()
|
||||
runtimeMock.EXPECT().GetPodStatus(pods[0].ID, "", "").Return(statuses[0], nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPods(ctx, true).Return(pods, nil).AnyTimes()
|
||||
runtimeMock.EXPECT().GetPodStatus(ctx, pods[0].ID, "", "").Return(statuses[0], nil).Times(1)
|
||||
// Inject an error when querying runtime for the pod status for pods[1].
|
||||
statusErr := fmt.Errorf("unable to get status")
|
||||
runtimeMock.EXPECT().GetPodStatus(pods[1].ID, "", "").Return(&kubecontainer.PodStatus{}, statusErr).Times(1)
|
||||
runtimeMock.EXPECT().GetPodStatus(ctx, pods[1].ID, "", "").Return(&kubecontainer.PodStatus{}, statusErr).Times(1)
|
||||
|
||||
pleg.relist()
|
||||
actualEvents := getEventsFromChannel(ch)
|
||||
@ -384,7 +386,7 @@ func TestRelistWithCache(t *testing.T) {
|
||||
assert.Exactly(t, []*PodLifecycleEvent{events[0]}, actualEvents)
|
||||
|
||||
// Return normal status for pods[1].
|
||||
runtimeMock.EXPECT().GetPodStatus(pods[1].ID, "", "").Return(statuses[1], nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPodStatus(ctx, pods[1].ID, "", "").Return(statuses[1], nil).Times(1)
|
||||
pleg.relist()
|
||||
actualEvents = getEventsFromChannel(ch)
|
||||
cases = []struct {
|
||||
@ -406,19 +408,20 @@ func TestRelistWithCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoveCacheEntry(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
runtimeMock := containertest.NewMockRuntime(mockCtrl)
|
||||
pleg := newTestGenericPLEGWithRuntimeMock(runtimeMock)
|
||||
|
||||
pods, statuses, _ := createTestPodsStatusesAndEvents(1)
|
||||
runtimeMock.EXPECT().GetPods(true).Return(pods, nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPodStatus(pods[0].ID, "", "").Return(statuses[0], nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPods(ctx, true).Return(pods, nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPodStatus(ctx, pods[0].ID, "", "").Return(statuses[0], nil).Times(1)
|
||||
// Does a relist to populate the cache.
|
||||
pleg.relist()
|
||||
// Delete the pod from runtime. Verify that the cache entry has been
|
||||
// removed after relisting.
|
||||
runtimeMock.EXPECT().GetPods(true).Return([]*kubecontainer.Pod{}, nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPods(ctx, true).Return([]*kubecontainer.Pod{}, nil).Times(1)
|
||||
pleg.relist()
|
||||
actualStatus, actualErr := pleg.cache.Get(pods[0].ID)
|
||||
assert.Equal(t, &kubecontainer.PodStatus{ID: pods[0].ID}, actualStatus)
|
||||
@ -453,6 +456,7 @@ func TestHealthy(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRelistWithReinspection(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
runtimeMock := containertest.NewMockRuntime(mockCtrl)
|
||||
@ -467,13 +471,13 @@ func TestRelistWithReinspection(t *testing.T) {
|
||||
ID: podID,
|
||||
Containers: []*kubecontainer.Container{infraContainer},
|
||||
}}
|
||||
runtimeMock.EXPECT().GetPods(true).Return(pods, nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPods(ctx, true).Return(pods, nil).Times(1)
|
||||
|
||||
goodStatus := &kubecontainer.PodStatus{
|
||||
ID: podID,
|
||||
ContainerStatuses: []*kubecontainer.Status{{ID: infraContainer.ID, State: infraContainer.State}},
|
||||
}
|
||||
runtimeMock.EXPECT().GetPodStatus(podID, "", "").Return(goodStatus, nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPodStatus(ctx, podID, "", "").Return(goodStatus, nil).Times(1)
|
||||
|
||||
goodEvent := &PodLifecycleEvent{ID: podID, Type: ContainerStarted, Data: infraContainer.ID.ID}
|
||||
|
||||
@ -492,13 +496,13 @@ func TestRelistWithReinspection(t *testing.T) {
|
||||
ID: podID,
|
||||
Containers: []*kubecontainer.Container{infraContainer, transientContainer},
|
||||
}}
|
||||
runtimeMock.EXPECT().GetPods(true).Return(podsWithTransientContainer, nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPods(ctx, true).Return(podsWithTransientContainer, nil).Times(1)
|
||||
|
||||
badStatus := &kubecontainer.PodStatus{
|
||||
ID: podID,
|
||||
ContainerStatuses: []*kubecontainer.Status{},
|
||||
}
|
||||
runtimeMock.EXPECT().GetPodStatus(podID, "", "").Return(badStatus, errors.New("inspection error")).Times(1)
|
||||
runtimeMock.EXPECT().GetPodStatus(ctx, podID, "", "").Return(badStatus, errors.New("inspection error")).Times(1)
|
||||
|
||||
pleg.relist()
|
||||
actualEvents = getEventsFromChannel(ch)
|
||||
@ -509,8 +513,8 @@ func TestRelistWithReinspection(t *testing.T) {
|
||||
|
||||
// listing 3 - pretend the transient container has now disappeared, leaving just the infra
|
||||
// container. Make sure the pod is reinspected for its status and the cache is updated.
|
||||
runtimeMock.EXPECT().GetPods(true).Return(pods, nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPodStatus(podID, "", "").Return(goodStatus, nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPods(ctx, true).Return(pods, nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPodStatus(ctx, podID, "", "").Return(goodStatus, nil).Times(1)
|
||||
|
||||
pleg.relist()
|
||||
actualEvents = getEventsFromChannel(ch)
|
||||
@ -591,6 +595,7 @@ func TestRelistingWithSandboxes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRelistIPChange(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
testCases := []struct {
|
||||
name string
|
||||
podID string
|
||||
@ -631,8 +636,8 @@ func TestRelistIPChange(t *testing.T) {
|
||||
}
|
||||
event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID}
|
||||
|
||||
runtimeMock.EXPECT().GetPods(true).Return([]*kubecontainer.Pod{pod}, nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPodStatus(pod.ID, "", "").Return(status, nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPods(ctx, true).Return([]*kubecontainer.Pod{pod}, nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPodStatus(ctx, pod.ID, "", "").Return(status, nil).Times(1)
|
||||
|
||||
pleg.relist()
|
||||
actualEvents := getEventsFromChannel(ch)
|
||||
@ -652,8 +657,8 @@ func TestRelistIPChange(t *testing.T) {
|
||||
ContainerStatuses: []*kubecontainer.Status{{ID: container.ID, State: kubecontainer.ContainerStateExited}},
|
||||
}
|
||||
event = &PodLifecycleEvent{ID: pod.ID, Type: ContainerDied, Data: container.ID.ID}
|
||||
runtimeMock.EXPECT().GetPods(true).Return([]*kubecontainer.Pod{pod}, nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPodStatus(pod.ID, "", "").Return(status, nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPods(ctx, true).Return([]*kubecontainer.Pod{pod}, nil).Times(1)
|
||||
runtimeMock.EXPECT().GetPodStatus(ctx, pod.ID, "", "").Return(status, nil).Times(1)
|
||||
|
||||
pleg.relist()
|
||||
actualEvents = getEventsFromChannel(ch)
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -48,7 +49,7 @@ func newPodContainerDeletor(runtime kubecontainer.Runtime, containersToKeep int)
|
||||
go wait.Until(func() {
|
||||
for {
|
||||
id := <-buffer
|
||||
if err := runtime.DeleteContainer(id); err != nil {
|
||||
if err := runtime.DeleteContainer(context.Background(), id); err != nil {
|
||||
klog.InfoS("DeleteContainer returned error", "containerID", id, "err", err)
|
||||
}
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package prober
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
@ -80,7 +81,7 @@ func (pb *prober) recordContainerEvent(pod *v1.Pod, container *v1.Container, eve
|
||||
}
|
||||
|
||||
// probe probes the container.
|
||||
func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (results.Result, error) {
|
||||
func (pb *prober) probe(ctx context.Context, probeType probeType, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (results.Result, error) {
|
||||
var probeSpec *v1.Probe
|
||||
switch probeType {
|
||||
case readiness:
|
||||
@ -98,7 +99,7 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c
|
||||
return results.Success, nil
|
||||
}
|
||||
|
||||
result, output, err := pb.runProbeWithRetries(probeType, probeSpec, pod, status, container, containerID, maxProbeRetries)
|
||||
result, output, err := pb.runProbeWithRetries(ctx, probeType, probeSpec, pod, status, container, containerID, maxProbeRetries)
|
||||
if err != nil || (result != probe.Success && result != probe.Warning) {
|
||||
// Probe failed in one way or another.
|
||||
if err != nil {
|
||||
@ -121,12 +122,12 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c
|
||||
|
||||
// runProbeWithRetries tries to probe the container in a finite loop, it returns the last result
|
||||
// if it never succeeds.
|
||||
func (pb *prober) runProbeWithRetries(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) {
|
||||
func (pb *prober) runProbeWithRetries(ctx context.Context, probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) {
|
||||
var err error
|
||||
var result probe.Result
|
||||
var output string
|
||||
for i := 0; i < retries; i++ {
|
||||
result, output, err = pb.runProbe(probeType, p, pod, status, container, containerID)
|
||||
result, output, err = pb.runProbe(ctx, probeType, p, pod, status, container, containerID)
|
||||
if err == nil {
|
||||
return result, output, nil
|
||||
}
|
||||
@ -134,12 +135,12 @@ func (pb *prober) runProbeWithRetries(probeType probeType, p *v1.Probe, pod *v1.
|
||||
return result, output, err
|
||||
}
|
||||
|
||||
func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) {
|
||||
func (pb *prober) runProbe(ctx context.Context, probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) {
|
||||
timeout := time.Duration(p.TimeoutSeconds) * time.Second
|
||||
if p.Exec != nil {
|
||||
klog.V(4).InfoS("Exec-Probe runProbe", "pod", klog.KObj(pod), "containerName", container.Name, "execCommand", p.Exec.Command)
|
||||
command := kubecontainer.ExpandContainerCommandOnlyStatic(p.Exec.Command, container.Env)
|
||||
return pb.exec.Probe(pb.newExecInContainer(container, containerID, command, timeout))
|
||||
return pb.exec.Probe(pb.newExecInContainer(ctx, container, containerID, command, timeout))
|
||||
}
|
||||
if p.HTTPGet != nil {
|
||||
req, err := httpprobe.NewRequestForHTTPGetAction(p.HTTPGet, &container, status.PodIP, "probe")
|
||||
@ -187,9 +188,9 @@ type execInContainer struct {
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func (pb *prober) newExecInContainer(container v1.Container, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) exec.Cmd {
|
||||
func (pb *prober) newExecInContainer(ctx context.Context, container v1.Container, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) exec.Cmd {
|
||||
return &execInContainer{run: func() ([]byte, error) {
|
||||
return pb.runner.RunInContainer(containerID, cmd, timeout)
|
||||
return pb.runner.RunInContainer(ctx, containerID, cmd, timeout)
|
||||
}}
|
||||
}
|
||||
|
||||
|
@ -18,6 +18,7 @@ package prober
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
@ -132,6 +133,7 @@ func TestGetTCPAddrParts(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProbe(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
containerID := kubecontainer.ContainerID{Type: "test", ID: "foobar"}
|
||||
|
||||
execProbe := &v1.Probe{
|
||||
@ -234,7 +236,7 @@ func TestProbe(t *testing.T) {
|
||||
prober.exec = fakeExecProber{test.execResult, nil}
|
||||
}
|
||||
|
||||
result, err := prober.probe(probeType, &v1.Pod{}, v1.PodStatus{}, testContainer, containerID)
|
||||
result, err := prober.probe(ctx, probeType, &v1.Pod{}, v1.PodStatus{}, testContainer, containerID)
|
||||
if test.expectError && err == nil {
|
||||
t.Errorf("[%s] Expected probe error but no error was returned.", testID)
|
||||
}
|
||||
@ -248,7 +250,7 @@ func TestProbe(t *testing.T) {
|
||||
if len(test.expectCommand) > 0 {
|
||||
prober.exec = execprobe.New()
|
||||
prober.runner = &containertest.FakeContainerCommandRunner{}
|
||||
_, err := prober.probe(probeType, &v1.Pod{}, v1.PodStatus{}, testContainer, containerID)
|
||||
_, err := prober.probe(ctx, probeType, &v1.Pod{}, v1.PodStatus{}, testContainer, containerID)
|
||||
if err != nil {
|
||||
t.Errorf("[%s] Didn't expect probe error but got: %v", testID, err)
|
||||
continue
|
||||
@ -262,6 +264,7 @@ func TestProbe(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewExecInContainer(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
limit := 1024
|
||||
tenKilobyte := strings.Repeat("logs-123", 128*10)
|
||||
|
||||
@ -303,7 +306,7 @@ func TestNewExecInContainer(t *testing.T) {
|
||||
container := v1.Container{}
|
||||
containerID := kubecontainer.ContainerID{Type: "docker", ID: "containerID"}
|
||||
cmd := []string{"/foo", "bar"}
|
||||
exec := prober.newExecInContainer(container, containerID, cmd, 0)
|
||||
exec := prober.newExecInContainer(ctx, container, containerID, cmd, 0)
|
||||
|
||||
var dataBuffer bytes.Buffer
|
||||
writer := ioutils.LimitWriter(&dataBuffer, int64(limit))
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package prober
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
@ -148,6 +149,7 @@ func newWorker(
|
||||
|
||||
// run periodically probes the container.
|
||||
func (w *worker) run() {
|
||||
ctx := context.Background()
|
||||
probeTickerPeriod := time.Duration(w.spec.PeriodSeconds) * time.Second
|
||||
|
||||
// If kubelet restarted the probes could be started in rapid succession.
|
||||
@ -175,7 +177,7 @@ func (w *worker) run() {
|
||||
}()
|
||||
|
||||
probeLoop:
|
||||
for w.doProbe() {
|
||||
for w.doProbe(ctx) {
|
||||
// Wait for next probe tick.
|
||||
select {
|
||||
case <-w.stopCh:
|
||||
@ -198,7 +200,7 @@ func (w *worker) stop() {
|
||||
|
||||
// doProbe probes the container once and records the result.
|
||||
// Returns whether the worker should continue.
|
||||
func (w *worker) doProbe() (keepGoing bool) {
|
||||
func (w *worker) doProbe(ctx context.Context) (keepGoing bool) {
|
||||
defer func() { recover() }() // Actually eat panics (HandleCrash takes care of logging)
|
||||
defer runtime.HandleCrash(func(_ interface{}) { keepGoing = true })
|
||||
|
||||
@ -284,7 +286,7 @@ func (w *worker) doProbe() (keepGoing bool) {
|
||||
}
|
||||
|
||||
// Note, exec probe does NOT have access to pod environment variables or downward API
|
||||
result, err := w.probeManager.prober.probe(w.probeType, w.pod, status, w.container, w.containerID)
|
||||
result, err := w.probeManager.prober.probe(ctx, w.probeType, w.pod, status, w.container, w.containerID)
|
||||
if err != nil {
|
||||
// Prober error, throw away the result.
|
||||
return true
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package prober
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
@ -131,6 +132,7 @@ func TestDoProbe(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
ctx := context.Background()
|
||||
w := newTestWorker(m, probeType, test.probe)
|
||||
if test.podStatus != nil {
|
||||
m.statusManager.SetPodStatus(w.pod, *test.podStatus)
|
||||
@ -139,7 +141,7 @@ func TestDoProbe(t *testing.T) {
|
||||
now := metav1.Now()
|
||||
w.pod.ObjectMeta.DeletionTimestamp = &now
|
||||
}
|
||||
if c := w.doProbe(); c != test.expectContinue[probeType.String()] {
|
||||
if c := w.doProbe(ctx); c != test.expectContinue[probeType.String()] {
|
||||
t.Errorf("[%s-%d] Expected continue to be %v but got %v", probeType, i, test.expectContinue[probeType.String()], c)
|
||||
}
|
||||
result, ok := resultsManager(m, probeType).Get(testContainerID)
|
||||
@ -158,6 +160,7 @@ func TestDoProbe(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInitialDelay(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m := newTestManager()
|
||||
|
||||
for _, probeType := range [...]probeType{liveness, readiness, startup} {
|
||||
@ -166,7 +169,7 @@ func TestInitialDelay(t *testing.T) {
|
||||
})
|
||||
m.statusManager.SetPodStatus(w.pod, getTestRunningStatusWithStarted(probeType != startup))
|
||||
|
||||
expectContinue(t, w, w.doProbe(), "during initial delay")
|
||||
expectContinue(t, w, w.doProbe(ctx), "during initial delay")
|
||||
// Default value depends on probe, Success for liveness, Failure for readiness, Unknown for startup
|
||||
switch probeType {
|
||||
case liveness:
|
||||
@ -184,12 +187,13 @@ func TestInitialDelay(t *testing.T) {
|
||||
m.statusManager.SetPodStatus(w.pod, laterStatus)
|
||||
|
||||
// Second call should succeed (already waited).
|
||||
expectContinue(t, w, w.doProbe(), "after initial delay")
|
||||
expectContinue(t, w, w.doProbe(ctx), "after initial delay")
|
||||
expectResult(t, w, results.Success, "after initial delay")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFailureThreshold(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m := newTestManager()
|
||||
w := newTestWorker(m, readiness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3})
|
||||
m.statusManager.SetPodStatus(w.pod, getTestRunningStatus())
|
||||
@ -200,7 +204,7 @@ func TestFailureThreshold(t *testing.T) {
|
||||
|
||||
for j := 0; j < 3; j++ {
|
||||
msg := fmt.Sprintf("%d success (%d)", j+1, i)
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Success, msg)
|
||||
}
|
||||
|
||||
@ -210,20 +214,21 @@ func TestFailureThreshold(t *testing.T) {
|
||||
// Next 2 probes should still be "success".
|
||||
for j := 0; j < 2; j++ {
|
||||
msg := fmt.Sprintf("%d failing (%d)", j+1, i)
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Success, msg)
|
||||
}
|
||||
|
||||
// Third & following fail.
|
||||
for j := 0; j < 3; j++ {
|
||||
msg := fmt.Sprintf("%d failure (%d)", j+3, i)
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Failure, msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSuccessThreshold(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m := newTestManager()
|
||||
w := newTestWorker(m, readiness, v1.Probe{SuccessThreshold: 3, FailureThreshold: 1})
|
||||
m.statusManager.SetPodStatus(w.pod, getTestRunningStatus())
|
||||
@ -235,21 +240,21 @@ func TestSuccessThreshold(t *testing.T) {
|
||||
// Probe defaults to Failure.
|
||||
for j := 0; j < 2; j++ {
|
||||
msg := fmt.Sprintf("%d success (%d)", j+1, i)
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Failure, msg)
|
||||
}
|
||||
|
||||
// Continuing success!
|
||||
for j := 0; j < 3; j++ {
|
||||
msg := fmt.Sprintf("%d success (%d)", j+3, i)
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Success, msg)
|
||||
}
|
||||
|
||||
// Prober flakes :(
|
||||
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
||||
msg := fmt.Sprintf("1 failure (%d)", i)
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Failure, msg)
|
||||
|
||||
// Back to success.
|
||||
@ -323,6 +328,7 @@ func resultsManager(m *manager, probeType probeType) results.Manager {
|
||||
}
|
||||
|
||||
func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m := newTestManager()
|
||||
|
||||
for _, probeType := range [...]probeType{liveness, startup} {
|
||||
@ -333,7 +339,7 @@ func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) {
|
||||
// First probe should fail.
|
||||
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
||||
msg := "first probe"
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Failure, msg)
|
||||
if !w.onHold {
|
||||
t.Errorf("Prober should be on hold due to %s check failure", probeType)
|
||||
@ -342,7 +348,7 @@ func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) {
|
||||
// failure because the worker is on hold and won't probe.
|
||||
m.prober.exec = fakeExecProber{probe.Success, nil}
|
||||
msg = "while on hold"
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Failure, msg)
|
||||
if !w.onHold {
|
||||
t.Errorf("Prober should be on hold due to %s check failure", probeType)
|
||||
@ -352,7 +358,7 @@ func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) {
|
||||
status.ContainerStatuses[0].ContainerID = "test://newCont_ID"
|
||||
m.statusManager.SetPodStatus(w.pod, status)
|
||||
msg = "hold lifted"
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Success, msg)
|
||||
if w.onHold {
|
||||
t.Errorf("Prober should not be on hold anymore")
|
||||
@ -361,13 +367,14 @@ func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestResultRunOnLivenessCheckFailure(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m := newTestManager()
|
||||
w := newTestWorker(m, liveness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3})
|
||||
m.statusManager.SetPodStatus(w.pod, getTestRunningStatus())
|
||||
|
||||
m.prober.exec = fakeExecProber{probe.Success, nil}
|
||||
msg := "initial probe success"
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Success, msg)
|
||||
if w.resultRun != 1 {
|
||||
t.Errorf("Prober resultRun should be 1")
|
||||
@ -375,7 +382,7 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) {
|
||||
|
||||
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
||||
msg = "probe failure, result success"
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Success, msg)
|
||||
if w.resultRun != 1 {
|
||||
t.Errorf("Prober resultRun should be 1")
|
||||
@ -383,7 +390,7 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) {
|
||||
|
||||
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
||||
msg = "2nd probe failure, result success"
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Success, msg)
|
||||
if w.resultRun != 2 {
|
||||
t.Errorf("Prober resultRun should be 2")
|
||||
@ -394,7 +401,7 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) {
|
||||
// also gets FailureThreshold attempts to succeed.
|
||||
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
||||
msg = "3rd probe failure, result failure"
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Failure, msg)
|
||||
if w.resultRun != 0 {
|
||||
t.Errorf("Prober resultRun should be reset to 0")
|
||||
@ -402,6 +409,7 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestResultRunOnStartupCheckFailure(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m := newTestManager()
|
||||
w := newTestWorker(m, startup, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3})
|
||||
m.statusManager.SetPodStatus(w.pod, getTestRunningStatusWithStarted(false))
|
||||
@ -410,7 +418,7 @@ func TestResultRunOnStartupCheckFailure(t *testing.T) {
|
||||
// which is failed for startup at first.
|
||||
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
||||
msg := "probe failure, result unknown"
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Unknown, msg)
|
||||
if w.resultRun != 1 {
|
||||
t.Errorf("Prober resultRun should be 1")
|
||||
@ -418,7 +426,7 @@ func TestResultRunOnStartupCheckFailure(t *testing.T) {
|
||||
|
||||
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
||||
msg = "2nd probe failure, result unknown"
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Unknown, msg)
|
||||
if w.resultRun != 2 {
|
||||
t.Errorf("Prober resultRun should be 2")
|
||||
@ -429,7 +437,7 @@ func TestResultRunOnStartupCheckFailure(t *testing.T) {
|
||||
// also gets FailureThreshold attempts to succeed.
|
||||
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
||||
msg = "3rd probe failure, result failure"
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Failure, msg)
|
||||
if w.resultRun != 0 {
|
||||
t.Errorf("Prober resultRun should be reset to 0")
|
||||
@ -437,43 +445,45 @@ func TestResultRunOnStartupCheckFailure(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLivenessProbeDisabledByStarted(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m := newTestManager()
|
||||
w := newTestWorker(m, liveness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 1})
|
||||
m.statusManager.SetPodStatus(w.pod, getTestRunningStatusWithStarted(false))
|
||||
// livenessProbe fails, but is disabled
|
||||
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
||||
msg := "Not started, probe failure, result success"
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Success, msg)
|
||||
// setting started state
|
||||
m.statusManager.SetContainerStartup(w.pod.UID, w.containerID, true)
|
||||
// livenessProbe fails
|
||||
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
||||
msg = "Started, probe failure, result failure"
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Failure, msg)
|
||||
}
|
||||
|
||||
func TestStartupProbeDisabledByStarted(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m := newTestManager()
|
||||
w := newTestWorker(m, startup, v1.Probe{SuccessThreshold: 1, FailureThreshold: 2})
|
||||
m.statusManager.SetPodStatus(w.pod, getTestRunningStatusWithStarted(false))
|
||||
// startupProbe fails < FailureThreshold, stays unknown
|
||||
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
||||
msg := "Not started, probe failure, result unknown"
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Unknown, msg)
|
||||
// startupProbe succeeds
|
||||
m.prober.exec = fakeExecProber{probe.Success, nil}
|
||||
msg = "Started, probe success, result success"
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Success, msg)
|
||||
// setting started state
|
||||
m.statusManager.SetContainerStartup(w.pod.UID, w.containerID, true)
|
||||
// startupProbe fails, but is disabled
|
||||
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
||||
msg = "Started, probe failure, result success"
|
||||
expectContinue(t, w, w.doProbe(), msg)
|
||||
expectContinue(t, w, w.doProbe(ctx), msg)
|
||||
expectResult(t, w, results.Success, msg)
|
||||
}
|
||||
|
||||
|
@ -44,6 +44,7 @@ type RunPodResult struct {
|
||||
|
||||
// RunOnce polls from one configuration update and run the associated pods.
|
||||
func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult, error) {
|
||||
ctx := context.Background()
|
||||
// Setup filesystem directories.
|
||||
if err := kl.setupDataDirs(); err != nil {
|
||||
return nil, err
|
||||
@ -59,7 +60,7 @@ func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult,
|
||||
select {
|
||||
case u := <-updates:
|
||||
klog.InfoS("Processing manifest with pods", "numPods", len(u.Pods))
|
||||
result, err := kl.runOnce(u.Pods, runOnceRetryDelay)
|
||||
result, err := kl.runOnce(ctx, u.Pods, runOnceRetryDelay)
|
||||
klog.InfoS("Finished processing pods", "numPods", len(u.Pods))
|
||||
return result, err
|
||||
case <-time.After(runOnceManifestDelay):
|
||||
@ -68,7 +69,7 @@ func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult,
|
||||
}
|
||||
|
||||
// runOnce runs a given set of pods and returns their status.
|
||||
func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results []RunPodResult, err error) {
|
||||
func (kl *Kubelet) runOnce(ctx context.Context, pods []*v1.Pod, retryDelay time.Duration) (results []RunPodResult, err error) {
|
||||
ch := make(chan RunPodResult)
|
||||
admitted := []*v1.Pod{}
|
||||
for _, pod := range pods {
|
||||
@ -81,7 +82,7 @@ func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results []
|
||||
|
||||
admitted = append(admitted, pod)
|
||||
go func(pod *v1.Pod) {
|
||||
err := kl.runPod(pod, retryDelay)
|
||||
err := kl.runPod(ctx, pod, retryDelay)
|
||||
ch <- RunPodResult{pod, err}
|
||||
}(pod)
|
||||
}
|
||||
@ -92,7 +93,7 @@ func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results []
|
||||
res := <-ch
|
||||
results = append(results, res)
|
||||
if res.Err != nil {
|
||||
failedContainerName, err := kl.getFailedContainers(res.Pod)
|
||||
failedContainerName, err := kl.getFailedContainers(ctx, res.Pod)
|
||||
if err != nil {
|
||||
klog.InfoS("Unable to get failed containers' names for pod", "pod", klog.KObj(res.Pod), "err", err)
|
||||
} else {
|
||||
@ -111,12 +112,12 @@ func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results []
|
||||
}
|
||||
|
||||
// runPod runs a single pod and wait until all containers are running.
|
||||
func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error {
|
||||
func (kl *Kubelet) runPod(ctx context.Context, pod *v1.Pod, retryDelay time.Duration) error {
|
||||
var isTerminal bool
|
||||
delay := retryDelay
|
||||
retry := 0
|
||||
for !isTerminal {
|
||||
status, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||
status, err := kl.containerRuntime.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get status for pod %q: %v", format.Pod(pod), err)
|
||||
}
|
||||
@ -132,7 +133,7 @@ func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error {
|
||||
klog.ErrorS(err, "Failed creating a mirror pod", "pod", klog.KObj(pod))
|
||||
}
|
||||
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
|
||||
if isTerminal, err = kl.syncPod(context.Background(), kubetypes.SyncPodUpdate, pod, mirrorPod, status); err != nil {
|
||||
if isTerminal, err = kl.syncPod(ctx, kubetypes.SyncPodUpdate, pod, mirrorPod, status); err != nil {
|
||||
return fmt.Errorf("error syncing pod %q: %v", format.Pod(pod), err)
|
||||
}
|
||||
if retry >= runOnceMaxRetries {
|
||||
@ -160,8 +161,8 @@ func (kl *Kubelet) isPodRunning(pod *v1.Pod, status *kubecontainer.PodStatus) bo
|
||||
}
|
||||
|
||||
// getFailedContainer returns failed container name for pod.
|
||||
func (kl *Kubelet) getFailedContainers(pod *v1.Pod) ([]string, error) {
|
||||
status, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||
func (kl *Kubelet) getFailedContainers(ctx context.Context, pod *v1.Pod) ([]string, error) {
|
||||
status, err := kl.containerRuntime.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get status for pod %q: %v", format.Pod(pod), err)
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
@ -53,6 +54,7 @@ import (
|
||||
)
|
||||
|
||||
func TestRunOnce(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
@ -170,7 +172,7 @@ func TestRunOnce(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
results, err := kb.runOnce(pods, time.Millisecond)
|
||||
results, err := kb.runOnce(ctx, pods, time.Millisecond)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -239,17 +239,17 @@ type HostInterface interface {
|
||||
stats.Provider
|
||||
GetVersionInfo() (*cadvisorapi.VersionInfo, error)
|
||||
GetCachedMachineInfo() (*cadvisorapi.MachineInfo, error)
|
||||
GetRunningPods() ([]*v1.Pod, error)
|
||||
RunInContainer(name string, uid types.UID, container string, cmd []string) ([]byte, error)
|
||||
CheckpointContainer(podUID types.UID, podFullName, containerName string, options *runtimeapi.CheckpointContainerRequest) error
|
||||
GetRunningPods(ctx context.Context) ([]*v1.Pod, error)
|
||||
RunInContainer(ctx context.Context, name string, uid types.UID, container string, cmd []string) ([]byte, error)
|
||||
CheckpointContainer(ctx context.Context, podUID types.UID, podFullName, containerName string, options *runtimeapi.CheckpointContainerRequest) error
|
||||
GetKubeletContainerLogs(ctx context.Context, podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error
|
||||
ServeLogs(w http.ResponseWriter, req *http.Request)
|
||||
ResyncInterval() time.Duration
|
||||
GetHostname() string
|
||||
LatestLoopEntryTime() time.Time
|
||||
GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error)
|
||||
GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error)
|
||||
GetPortForward(podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error)
|
||||
GetExec(ctx context.Context, podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error)
|
||||
GetAttach(ctx context.Context, podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error)
|
||||
GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error)
|
||||
}
|
||||
|
||||
// NewServer initializes and configures a kubelet.Server object to handle HTTP requests.
|
||||
@ -740,7 +740,8 @@ func (s *Server) getPods(request *restful.Request, response *restful.Response) {
|
||||
// provided by the container runtime, and is different from the list returned
|
||||
// by getPods, which is a set of desired pods to run.
|
||||
func (s *Server) getRunningPods(request *restful.Request, response *restful.Response) {
|
||||
pods, err := s.host.GetRunningPods()
|
||||
ctx := request.Request.Context()
|
||||
pods, err := s.host.GetRunningPods(ctx)
|
||||
if err != nil {
|
||||
response.WriteError(http.StatusInternalServerError, err)
|
||||
return
|
||||
@ -820,7 +821,7 @@ func (s *Server) getAttach(request *restful.Request, response *restful.Response)
|
||||
}
|
||||
|
||||
podFullName := kubecontainer.GetPodFullName(pod)
|
||||
url, err := s.host.GetAttach(podFullName, params.podUID, params.containerName, *streamOpts)
|
||||
url, err := s.host.GetAttach(request.Request.Context(), podFullName, params.podUID, params.containerName, *streamOpts)
|
||||
if err != nil {
|
||||
streaming.WriteError(err, response.ResponseWriter)
|
||||
return
|
||||
@ -845,7 +846,7 @@ func (s *Server) getExec(request *restful.Request, response *restful.Response) {
|
||||
}
|
||||
|
||||
podFullName := kubecontainer.GetPodFullName(pod)
|
||||
url, err := s.host.GetExec(podFullName, params.podUID, params.containerName, params.cmd, *streamOpts)
|
||||
url, err := s.host.GetExec(request.Request.Context(), podFullName, params.podUID, params.containerName, params.cmd, *streamOpts)
|
||||
if err != nil {
|
||||
streaming.WriteError(err, response.ResponseWriter)
|
||||
return
|
||||
@ -864,7 +865,7 @@ func (s *Server) getRun(request *restful.Request, response *restful.Response) {
|
||||
|
||||
// For legacy reasons, run uses different query param than exec.
|
||||
params.cmd = strings.Split(request.QueryParameter("cmd"), " ")
|
||||
data, err := s.host.RunInContainer(kubecontainer.GetPodFullName(pod), params.podUID, params.containerName, params.cmd)
|
||||
data, err := s.host.RunInContainer(request.Request.Context(), kubecontainer.GetPodFullName(pod), params.podUID, params.containerName, params.cmd)
|
||||
if err != nil {
|
||||
response.WriteError(http.StatusInternalServerError, err)
|
||||
return
|
||||
@ -907,7 +908,7 @@ func (s *Server) getPortForward(request *restful.Request, response *restful.Resp
|
||||
return
|
||||
}
|
||||
|
||||
url, err := s.host.GetPortForward(pod.Name, pod.Namespace, pod.UID, *portForwardOptions)
|
||||
url, err := s.host.GetPortForward(request.Request.Context(), pod.Name, pod.Namespace, pod.UID, *portForwardOptions)
|
||||
if err != nil {
|
||||
streaming.WriteError(err, response.ResponseWriter)
|
||||
return
|
||||
@ -919,6 +920,7 @@ func (s *Server) getPortForward(request *restful.Request, response *restful.Resp
|
||||
// podNamespace, pod and container actually exist and only then calls out
|
||||
// to the runtime to actually checkpoint the container.
|
||||
func (s *Server) checkpoint(request *restful.Request, response *restful.Response) {
|
||||
ctx := request.Request.Context()
|
||||
pod, ok := s.host.GetPodByName(request.PathParameter("podNamespace"), request.PathParameter("podID"))
|
||||
if !ok {
|
||||
response.WriteError(http.StatusNotFound, fmt.Errorf("pod does not exist"))
|
||||
@ -973,7 +975,7 @@ func (s *Server) checkpoint(request *restful.Request, response *restful.Response
|
||||
options.Timeout = timeout
|
||||
}
|
||||
|
||||
if err := s.host.CheckpointContainer(pod.UID, kubecontainer.GetPodFullName(pod), containerName, options); err != nil {
|
||||
if err := s.host.CheckpointContainer(ctx, pod.UID, kubecontainer.GetPodFullName(pod), containerName, options); err != nil {
|
||||
response.WriteError(
|
||||
http.StatusInternalServerError,
|
||||
fmt.Errorf(
|
||||
|
@ -74,11 +74,11 @@ const (
|
||||
|
||||
type fakeKubelet struct {
|
||||
podByNameFunc func(namespace, name string) (*v1.Pod, bool)
|
||||
containerInfoFunc func(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error)
|
||||
containerInfoFunc func(ctx context.Context, podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error)
|
||||
rawInfoFunc func(query *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error)
|
||||
machineInfoFunc func() (*cadvisorapi.MachineInfo, error)
|
||||
podsFunc func() []*v1.Pod
|
||||
runningPodsFunc func() ([]*v1.Pod, error)
|
||||
runningPodsFunc func(ctx context.Context) ([]*v1.Pod, error)
|
||||
logFunc func(w http.ResponseWriter, req *http.Request)
|
||||
runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error)
|
||||
getExecCheck func(string, types.UID, string, []string, remotecommandserver.Options)
|
||||
@ -109,8 +109,8 @@ func (fk *fakeKubelet) GetRequestedContainersInfo(containerName string, options
|
||||
return map[string]*cadvisorapi.ContainerInfo{}, nil
|
||||
}
|
||||
|
||||
func (fk *fakeKubelet) GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) {
|
||||
return fk.containerInfoFunc(podFullName, uid, containerName, req)
|
||||
func (fk *fakeKubelet) GetContainerInfo(ctx context.Context, podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) {
|
||||
return fk.containerInfoFunc(ctx, podFullName, uid, containerName, req)
|
||||
}
|
||||
|
||||
func (fk *fakeKubelet) GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error) {
|
||||
@ -129,8 +129,8 @@ func (fk *fakeKubelet) GetPods() []*v1.Pod {
|
||||
return fk.podsFunc()
|
||||
}
|
||||
|
||||
func (fk *fakeKubelet) GetRunningPods() ([]*v1.Pod, error) {
|
||||
return fk.runningPodsFunc()
|
||||
func (fk *fakeKubelet) GetRunningPods(ctx context.Context) ([]*v1.Pod, error) {
|
||||
return fk.runningPodsFunc(ctx)
|
||||
}
|
||||
|
||||
func (fk *fakeKubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
|
||||
@ -145,11 +145,11 @@ func (fk *fakeKubelet) GetHostname() string {
|
||||
return fk.hostnameFunc()
|
||||
}
|
||||
|
||||
func (fk *fakeKubelet) RunInContainer(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) {
|
||||
func (fk *fakeKubelet) RunInContainer(_ context.Context, podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) {
|
||||
return fk.runFunc(podFullName, uid, containerName, cmd)
|
||||
}
|
||||
|
||||
func (fk *fakeKubelet) CheckpointContainer(podUID types.UID, podFullName, containerName string, options *runtimeapi.CheckpointContainerRequest) error {
|
||||
func (fk *fakeKubelet) CheckpointContainer(_ context.Context, podUID types.UID, podFullName, containerName string, options *runtimeapi.CheckpointContainerRequest) error {
|
||||
if containerName == "checkpointingFailure" {
|
||||
return fmt.Errorf("Returning error for test")
|
||||
}
|
||||
@ -162,15 +162,15 @@ type fakeRuntime struct {
|
||||
portForwardFunc func(string, int32, io.ReadWriteCloser) error
|
||||
}
|
||||
|
||||
func (f *fakeRuntime) Exec(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
func (f *fakeRuntime) Exec(_ context.Context, containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
return f.execFunc(containerID, cmd, stdin, stdout, stderr, tty, resize)
|
||||
}
|
||||
|
||||
func (f *fakeRuntime) Attach(containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
func (f *fakeRuntime) Attach(_ context.Context, containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
return f.attachFunc(containerID, stdin, stdout, stderr, tty, resize)
|
||||
}
|
||||
|
||||
func (f *fakeRuntime) PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error {
|
||||
func (f *fakeRuntime) PortForward(_ context.Context, podSandboxID string, port int32, stream io.ReadWriteCloser) error {
|
||||
return f.portForwardFunc(podSandboxID, port, stream)
|
||||
}
|
||||
|
||||
@ -209,7 +209,7 @@ func newTestStreamingServer(streamIdleTimeout time.Duration) (s *testStreamingSe
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (fk *fakeKubelet) GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) {
|
||||
func (fk *fakeKubelet) GetExec(_ context.Context, podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) {
|
||||
if fk.getExecCheck != nil {
|
||||
fk.getExecCheck(podFullName, podUID, containerName, cmd, streamOpts)
|
||||
}
|
||||
@ -228,7 +228,7 @@ func (fk *fakeKubelet) GetExec(podFullName string, podUID types.UID, containerNa
|
||||
return url.Parse(resp.GetUrl())
|
||||
}
|
||||
|
||||
func (fk *fakeKubelet) GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) {
|
||||
func (fk *fakeKubelet) GetAttach(_ context.Context, podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) {
|
||||
if fk.getAttachCheck != nil {
|
||||
fk.getAttachCheck(podFullName, podUID, containerName, streamOpts)
|
||||
}
|
||||
@ -246,7 +246,7 @@ func (fk *fakeKubelet) GetAttach(podFullName string, podUID types.UID, container
|
||||
return url.Parse(resp.GetUrl())
|
||||
}
|
||||
|
||||
func (fk *fakeKubelet) GetPortForward(podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) {
|
||||
func (fk *fakeKubelet) GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) {
|
||||
if fk.getPortForwardCheck != nil {
|
||||
fk.getPortForwardCheck(podName, podNamespace, podUID, portForwardOpts)
|
||||
}
|
||||
@ -272,14 +272,16 @@ func (fk *fakeKubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Vo
|
||||
func (*fakeKubelet) ListBlockVolumesForPod(podUID types.UID) (map[string]volume.BlockVolume, bool) {
|
||||
return map[string]volume.BlockVolume{}, true
|
||||
}
|
||||
func (*fakeKubelet) RootFsStats() (*statsapi.FsStats, error) { return nil, nil }
|
||||
func (*fakeKubelet) ListPodStats() ([]statsapi.PodStats, error) { return nil, nil }
|
||||
func (*fakeKubelet) ListPodStatsAndUpdateCPUNanoCoreUsage() ([]statsapi.PodStats, error) {
|
||||
func (*fakeKubelet) RootFsStats() (*statsapi.FsStats, error) { return nil, nil }
|
||||
func (*fakeKubelet) ListPodStats(_ context.Context) ([]statsapi.PodStats, error) { return nil, nil }
|
||||
func (*fakeKubelet) ListPodStatsAndUpdateCPUNanoCoreUsage(_ context.Context) ([]statsapi.PodStats, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (*fakeKubelet) ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error) { return nil, nil }
|
||||
func (*fakeKubelet) ImageFsStats() (*statsapi.FsStats, error) { return nil, nil }
|
||||
func (*fakeKubelet) RlimitStats() (*statsapi.RlimitStats, error) { return nil, nil }
|
||||
func (*fakeKubelet) ListPodCPUAndMemoryStats(_ context.Context) ([]statsapi.PodStats, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (*fakeKubelet) ImageFsStats(_ context.Context) (*statsapi.FsStats, error) { return nil, nil }
|
||||
func (*fakeKubelet) RlimitStats() (*statsapi.RlimitStats, error) { return nil, nil }
|
||||
func (*fakeKubelet) GetCgroupStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ limitations under the License.
|
||||
package stats
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
@ -26,7 +27,7 @@ import (
|
||||
cadvisorv2 "github.com/google/cadvisor/info/v2"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
@ -39,18 +40,18 @@ type Provider interface {
|
||||
// The following stats are provided by either CRI or cAdvisor.
|
||||
//
|
||||
// ListPodStats returns the stats of all the containers managed by pods.
|
||||
ListPodStats() ([]statsapi.PodStats, error)
|
||||
ListPodStats(ctx context.Context) ([]statsapi.PodStats, error)
|
||||
// ListPodStatsAndUpdateCPUNanoCoreUsage updates the cpu nano core usage for
|
||||
// the containers and returns the stats for all the pod-managed containers.
|
||||
ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error)
|
||||
ListPodCPUAndMemoryStats(ctx context.Context) ([]statsapi.PodStats, error)
|
||||
// ListPodStatsAndUpdateCPUNanoCoreUsage returns the stats of all the
|
||||
// containers managed by pods and force update the cpu usageNanoCores.
|
||||
// This is a workaround for CRI runtimes that do not integrate with
|
||||
// cadvisor. See https://github.com/kubernetes/kubernetes/issues/72788
|
||||
// for more details.
|
||||
ListPodStatsAndUpdateCPUNanoCoreUsage() ([]statsapi.PodStats, error)
|
||||
ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]statsapi.PodStats, error)
|
||||
// ImageFsStats returns the stats of the image filesystem.
|
||||
ImageFsStats() (*statsapi.FsStats, error)
|
||||
ImageFsStats(ctx context.Context) (*statsapi.FsStats, error)
|
||||
|
||||
// The following stats are provided by cAdvisor.
|
||||
//
|
||||
@ -67,7 +68,7 @@ type Provider interface {
|
||||
//
|
||||
// GetContainerInfo returns the information of the container with the
|
||||
// containerName managed by the pod with the uid.
|
||||
GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error)
|
||||
GetContainerInfo(ctx context.Context, podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error)
|
||||
// GetRawContainerInfo returns the information of the container with the
|
||||
// containerName. If subcontainers is true, this function will return the
|
||||
// information of all the sub-containers as well.
|
||||
@ -140,6 +141,7 @@ func CreateHandlers(rootPath string, provider Provider, summaryProvider SummaryP
|
||||
// Handles stats summary requests to /stats/summary
|
||||
// If "only_cpu_and_memory" GET param is true then only cpu and memory is returned in response.
|
||||
func (h *handler) handleSummary(request *restful.Request, response *restful.Response) {
|
||||
ctx := request.Request.Context()
|
||||
onlyCPUAndMemory := false
|
||||
err := request.Request.ParseForm()
|
||||
if err != nil {
|
||||
@ -152,11 +154,11 @@ func (h *handler) handleSummary(request *restful.Request, response *restful.Resp
|
||||
}
|
||||
var summary *statsapi.Summary
|
||||
if onlyCPUAndMemory {
|
||||
summary, err = h.summaryProvider.GetCPUAndMemoryStats()
|
||||
summary, err = h.summaryProvider.GetCPUAndMemoryStats(ctx)
|
||||
} else {
|
||||
// external calls to the summary API use cached stats
|
||||
forceStatsUpdate := false
|
||||
summary, err = h.summaryProvider.Get(forceStatsUpdate)
|
||||
summary, err = h.summaryProvider.Get(ctx, forceStatsUpdate)
|
||||
}
|
||||
if err != nil {
|
||||
handleError(response, "/stats/summary", err)
|
||||
|
@ -18,6 +18,7 @@ limitations under the License.
|
||||
package stats
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
@ -31,9 +32,9 @@ import (
|
||||
type SummaryProvider interface {
|
||||
// Get provides a new Summary with the stats from Kubelet,
|
||||
// and will update some stats if updateStats is true
|
||||
Get(updateStats bool) (*statsapi.Summary, error)
|
||||
Get(ctx context.Context, updateStats bool) (*statsapi.Summary, error)
|
||||
// GetCPUAndMemoryStats provides a new Summary with the CPU and memory stats from Kubelet,
|
||||
GetCPUAndMemoryStats() (*statsapi.Summary, error)
|
||||
GetCPUAndMemoryStats(ctx context.Context) (*statsapi.Summary, error)
|
||||
}
|
||||
|
||||
// summaryProviderImpl implements the SummaryProvider interface.
|
||||
@ -65,7 +66,7 @@ func NewSummaryProvider(statsProvider Provider) SummaryProvider {
|
||||
}
|
||||
}
|
||||
|
||||
func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error) {
|
||||
func (sp *summaryProviderImpl) Get(ctx context.Context, updateStats bool) (*statsapi.Summary, error) {
|
||||
// TODO(timstclair): Consider returning a best-effort response if any of
|
||||
// the following errors occur.
|
||||
node, err := sp.provider.GetNode()
|
||||
@ -81,15 +82,15 @@ func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get rootFs stats: %v", err)
|
||||
}
|
||||
imageFsStats, err := sp.provider.ImageFsStats()
|
||||
imageFsStats, err := sp.provider.ImageFsStats(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get imageFs stats: %v", err)
|
||||
}
|
||||
var podStats []statsapi.PodStats
|
||||
if updateStats {
|
||||
podStats, err = sp.provider.ListPodStatsAndUpdateCPUNanoCoreUsage()
|
||||
podStats, err = sp.provider.ListPodStatsAndUpdateCPUNanoCoreUsage(ctx)
|
||||
} else {
|
||||
podStats, err = sp.provider.ListPodStats()
|
||||
podStats, err = sp.provider.ListPodStats(ctx)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list pod stats: %v", err)
|
||||
@ -118,7 +119,7 @@ func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error)
|
||||
return &summary, nil
|
||||
}
|
||||
|
||||
func (sp *summaryProviderImpl) GetCPUAndMemoryStats() (*statsapi.Summary, error) {
|
||||
func (sp *summaryProviderImpl) GetCPUAndMemoryStats(ctx context.Context) (*statsapi.Summary, error) {
|
||||
// TODO(timstclair): Consider returning a best-effort response if any of
|
||||
// the following errors occur.
|
||||
node, err := sp.provider.GetNode()
|
||||
@ -131,7 +132,7 @@ func (sp *summaryProviderImpl) GetCPUAndMemoryStats() (*statsapi.Summary, error)
|
||||
return nil, fmt.Errorf("failed to get root cgroup stats: %v", err)
|
||||
}
|
||||
|
||||
podStats, err := sp.provider.ListPodCPUAndMemoryStats()
|
||||
podStats, err := sp.provider.ListPodCPUAndMemoryStats(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list pod stats: %v", err)
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ limitations under the License.
|
||||
package stats
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -48,6 +49,7 @@ var (
|
||||
)
|
||||
|
||||
func TestSummaryProviderGetStats(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
assert := assert.New(t)
|
||||
|
||||
podStats := []statsapi.PodStats{
|
||||
@ -77,9 +79,9 @@ func TestSummaryProviderGetStats(t *testing.T) {
|
||||
mockStatsProvider.EXPECT().GetNode().Return(node, nil)
|
||||
mockStatsProvider.EXPECT().GetNodeConfig().Return(nodeConfig)
|
||||
mockStatsProvider.EXPECT().GetPodCgroupRoot().Return(cgroupRoot)
|
||||
mockStatsProvider.EXPECT().ListPodStats().Return(podStats, nil).AnyTimes()
|
||||
mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage().Return(podStats, nil)
|
||||
mockStatsProvider.EXPECT().ImageFsStats().Return(imageFsStats, nil)
|
||||
mockStatsProvider.EXPECT().ListPodStats(ctx).Return(podStats, nil).AnyTimes()
|
||||
mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage(ctx).Return(podStats, nil)
|
||||
mockStatsProvider.EXPECT().ImageFsStats(ctx).Return(imageFsStats, nil)
|
||||
mockStatsProvider.EXPECT().RootFsStats().Return(rootFsStats, nil)
|
||||
mockStatsProvider.EXPECT().RlimitStats().Return(rlimitStats, nil)
|
||||
mockStatsProvider.EXPECT().GetCgroupStats("/", true).Return(cgroupStatsMap["/"].cs, cgroupStatsMap["/"].ns, nil)
|
||||
@ -91,7 +93,7 @@ func TestSummaryProviderGetStats(t *testing.T) {
|
||||
kubeletCreationTime := metav1.Now()
|
||||
systemBootTime := metav1.Now()
|
||||
provider := summaryProviderImpl{kubeletCreationTime: kubeletCreationTime, systemBootTime: systemBootTime, provider: mockStatsProvider}
|
||||
summary, err := provider.Get(true)
|
||||
summary, err := provider.Get(ctx, true)
|
||||
assert.NoError(err)
|
||||
|
||||
assert.Equal(summary.Node.NodeName, "test-node")
|
||||
@ -139,6 +141,7 @@ func TestSummaryProviderGetStats(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSummaryProviderGetCPUAndMemoryStats(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
assert := assert.New(t)
|
||||
|
||||
podStats := []statsapi.PodStats{
|
||||
@ -165,7 +168,7 @@ func TestSummaryProviderGetCPUAndMemoryStats(t *testing.T) {
|
||||
mockStatsProvider.EXPECT().GetNode().Return(node, nil)
|
||||
mockStatsProvider.EXPECT().GetNodeConfig().Return(nodeConfig)
|
||||
mockStatsProvider.EXPECT().GetPodCgroupRoot().Return(cgroupRoot)
|
||||
mockStatsProvider.EXPECT().ListPodCPUAndMemoryStats().Return(podStats, nil)
|
||||
mockStatsProvider.EXPECT().ListPodCPUAndMemoryStats(ctx).Return(podStats, nil)
|
||||
mockStatsProvider.EXPECT().GetCgroupCPUAndMemoryStats("/", false).Return(cgroupStatsMap["/"].cs, nil)
|
||||
mockStatsProvider.EXPECT().GetCgroupCPUAndMemoryStats("/runtime", false).Return(cgroupStatsMap["/runtime"].cs, nil)
|
||||
mockStatsProvider.EXPECT().GetCgroupCPUAndMemoryStats("/misc", false).Return(cgroupStatsMap["/misc"].cs, nil)
|
||||
@ -173,7 +176,7 @@ func TestSummaryProviderGetCPUAndMemoryStats(t *testing.T) {
|
||||
mockStatsProvider.EXPECT().GetCgroupCPUAndMemoryStats("/kubepods", false).Return(cgroupStatsMap["/pods"].cs, nil)
|
||||
|
||||
provider := NewSummaryProvider(mockStatsProvider)
|
||||
summary, err := provider.GetCPUAndMemoryStats()
|
||||
summary, err := provider.GetCPUAndMemoryStats(ctx)
|
||||
assert.NoError(err)
|
||||
|
||||
assert.Equal(summary.Node.NodeName, "test-node")
|
||||
|
@ -21,6 +21,7 @@ limitations under the License.
|
||||
package testing
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
@ -88,18 +89,18 @@ func (mr *MockProviderMockRecorder) GetCgroupStats(cgroupName, updateStats inter
|
||||
}
|
||||
|
||||
// GetContainerInfo mocks base method.
|
||||
func (m *MockProvider) GetContainerInfo(podFullName string, uid types.UID, containerName string, req *v1.ContainerInfoRequest) (*v1.ContainerInfo, error) {
|
||||
func (m *MockProvider) GetContainerInfo(ctx context.Context, podFullName string, uid types.UID, containerName string, req *v1.ContainerInfoRequest) (*v1.ContainerInfo, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetContainerInfo", podFullName, uid, containerName, req)
|
||||
ret := m.ctrl.Call(m, "GetContainerInfo", ctx, podFullName, uid, containerName, req)
|
||||
ret0, _ := ret[0].(*v1.ContainerInfo)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetContainerInfo indicates an expected call of GetContainerInfo.
|
||||
func (mr *MockProviderMockRecorder) GetContainerInfo(podFullName, uid, containerName, req interface{}) *gomock.Call {
|
||||
func (mr *MockProviderMockRecorder) GetContainerInfo(ctx, podFullName, uid, containerName, req interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContainerInfo", reflect.TypeOf((*MockProvider)(nil).GetContainerInfo), podFullName, uid, containerName, req)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContainerInfo", reflect.TypeOf((*MockProvider)(nil).GetContainerInfo), ctx, podFullName, uid, containerName, req)
|
||||
}
|
||||
|
||||
// GetNode mocks base method.
|
||||
@ -220,18 +221,18 @@ func (mr *MockProviderMockRecorder) GetRequestedContainersInfo(containerName, op
|
||||
}
|
||||
|
||||
// ImageFsStats mocks base method.
|
||||
func (m *MockProvider) ImageFsStats() (*v1alpha1.FsStats, error) {
|
||||
func (m *MockProvider) ImageFsStats(ctx context.Context) (*v1alpha1.FsStats, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ImageFsStats")
|
||||
ret := m.ctrl.Call(m, "ImageFsStats", ctx)
|
||||
ret0, _ := ret[0].(*v1alpha1.FsStats)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ImageFsStats indicates an expected call of ImageFsStats.
|
||||
func (mr *MockProviderMockRecorder) ImageFsStats() *gomock.Call {
|
||||
func (mr *MockProviderMockRecorder) ImageFsStats(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageFsStats", reflect.TypeOf((*MockProvider)(nil).ImageFsStats))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageFsStats", reflect.TypeOf((*MockProvider)(nil).ImageFsStats), ctx)
|
||||
}
|
||||
|
||||
// ListBlockVolumesForPod mocks base method.
|
||||
@ -250,48 +251,48 @@ func (mr *MockProviderMockRecorder) ListBlockVolumesForPod(podUID interface{}) *
|
||||
}
|
||||
|
||||
// ListPodCPUAndMemoryStats mocks base method.
|
||||
func (m *MockProvider) ListPodCPUAndMemoryStats() ([]v1alpha1.PodStats, error) {
|
||||
func (m *MockProvider) ListPodCPUAndMemoryStats(ctx context.Context) ([]v1alpha1.PodStats, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ListPodCPUAndMemoryStats")
|
||||
ret := m.ctrl.Call(m, "ListPodCPUAndMemoryStats", ctx)
|
||||
ret0, _ := ret[0].([]v1alpha1.PodStats)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ListPodCPUAndMemoryStats indicates an expected call of ListPodCPUAndMemoryStats.
|
||||
func (mr *MockProviderMockRecorder) ListPodCPUAndMemoryStats() *gomock.Call {
|
||||
func (mr *MockProviderMockRecorder) ListPodCPUAndMemoryStats(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodCPUAndMemoryStats", reflect.TypeOf((*MockProvider)(nil).ListPodCPUAndMemoryStats))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodCPUAndMemoryStats", reflect.TypeOf((*MockProvider)(nil).ListPodCPUAndMemoryStats), ctx)
|
||||
}
|
||||
|
||||
// ListPodStats mocks base method.
|
||||
func (m *MockProvider) ListPodStats() ([]v1alpha1.PodStats, error) {
|
||||
func (m *MockProvider) ListPodStats(ctx context.Context) ([]v1alpha1.PodStats, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ListPodStats")
|
||||
ret := m.ctrl.Call(m, "ListPodStats", ctx)
|
||||
ret0, _ := ret[0].([]v1alpha1.PodStats)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ListPodStats indicates an expected call of ListPodStats.
|
||||
func (mr *MockProviderMockRecorder) ListPodStats() *gomock.Call {
|
||||
func (mr *MockProviderMockRecorder) ListPodStats(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodStats", reflect.TypeOf((*MockProvider)(nil).ListPodStats))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodStats", reflect.TypeOf((*MockProvider)(nil).ListPodStats), ctx)
|
||||
}
|
||||
|
||||
// ListPodStatsAndUpdateCPUNanoCoreUsage mocks base method.
|
||||
func (m *MockProvider) ListPodStatsAndUpdateCPUNanoCoreUsage() ([]v1alpha1.PodStats, error) {
|
||||
func (m *MockProvider) ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]v1alpha1.PodStats, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ListPodStatsAndUpdateCPUNanoCoreUsage")
|
||||
ret := m.ctrl.Call(m, "ListPodStatsAndUpdateCPUNanoCoreUsage", ctx)
|
||||
ret0, _ := ret[0].([]v1alpha1.PodStats)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ListPodStatsAndUpdateCPUNanoCoreUsage indicates an expected call of ListPodStatsAndUpdateCPUNanoCoreUsage.
|
||||
func (mr *MockProviderMockRecorder) ListPodStatsAndUpdateCPUNanoCoreUsage() *gomock.Call {
|
||||
func (mr *MockProviderMockRecorder) ListPodStatsAndUpdateCPUNanoCoreUsage(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodStatsAndUpdateCPUNanoCoreUsage", reflect.TypeOf((*MockProvider)(nil).ListPodStatsAndUpdateCPUNanoCoreUsage))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodStatsAndUpdateCPUNanoCoreUsage", reflect.TypeOf((*MockProvider)(nil).ListPodStatsAndUpdateCPUNanoCoreUsage), ctx)
|
||||
}
|
||||
|
||||
// ListVolumesForPod mocks base method.
|
||||
|
@ -21,6 +21,7 @@ limitations under the License.
|
||||
package testing
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
@ -51,31 +52,31 @@ func (m *MockSummaryProvider) EXPECT() *MockSummaryProviderMockRecorder {
|
||||
}
|
||||
|
||||
// Get mocks base method.
|
||||
func (m *MockSummaryProvider) Get(updateStats bool) (*v1alpha1.Summary, error) {
|
||||
func (m *MockSummaryProvider) Get(ctx context.Context, updateStats bool) (*v1alpha1.Summary, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Get", updateStats)
|
||||
ret := m.ctrl.Call(m, "Get", ctx, updateStats)
|
||||
ret0, _ := ret[0].(*v1alpha1.Summary)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Get indicates an expected call of Get.
|
||||
func (mr *MockSummaryProviderMockRecorder) Get(updateStats interface{}) *gomock.Call {
|
||||
func (mr *MockSummaryProviderMockRecorder) Get(ctx, updateStats interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSummaryProvider)(nil).Get), updateStats)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSummaryProvider)(nil).Get), ctx, updateStats)
|
||||
}
|
||||
|
||||
// GetCPUAndMemoryStats mocks base method.
|
||||
func (m *MockSummaryProvider) GetCPUAndMemoryStats() (*v1alpha1.Summary, error) {
|
||||
func (m *MockSummaryProvider) GetCPUAndMemoryStats(ctx context.Context) (*v1alpha1.Summary, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetCPUAndMemoryStats")
|
||||
ret := m.ctrl.Call(m, "GetCPUAndMemoryStats", ctx)
|
||||
ret0, _ := ret[0].(*v1alpha1.Summary)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetCPUAndMemoryStats indicates an expected call of GetCPUAndMemoryStats.
|
||||
func (mr *MockSummaryProviderMockRecorder) GetCPUAndMemoryStats() *gomock.Call {
|
||||
func (mr *MockSummaryProviderMockRecorder) GetCPUAndMemoryStats(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCPUAndMemoryStats", reflect.TypeOf((*MockSummaryProvider)(nil).GetCPUAndMemoryStats))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCPUAndMemoryStats", reflect.TypeOf((*MockSummaryProvider)(nil).GetCPUAndMemoryStats), ctx)
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user