mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-02-22 07:03:28 +00:00
Merge pull request #133930 from bart0sh/PR198-migrate-stats-to-contextual-logging
chore(kubelet): migrate stats to contextual logging
This commit is contained in:
@@ -94,12 +94,12 @@ func (p *cadvisorStatsProvider) ListPodStats(ctx context.Context) ([]statsapi.Po
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get imageFs info: %v", err)
|
||||
}
|
||||
infos, err := getCadvisorContainerInfo(p.cadvisor)
|
||||
logger := klog.FromContext(ctx)
|
||||
infos, err := getCadvisorContainerInfo(logger, p.cadvisor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get container info from cadvisor: %v", err)
|
||||
}
|
||||
|
||||
filteredInfos, allInfos := filterTerminatedContainerInfoAndAssembleByPodCgroupKey(infos)
|
||||
filteredInfos, allInfos := filterTerminatedContainerInfoAndAssembleByPodCgroupKey(logger, infos)
|
||||
// Map each container to a pod and update the PodStats with container data.
|
||||
podToStats := map[statsapi.PodReference]*statsapi.PodStats{}
|
||||
for key, cinfo := range filteredInfos {
|
||||
@@ -111,7 +111,7 @@ func (p *cadvisorStatsProvider) ListPodStats(ctx context.Context) ([]statsapi.Po
|
||||
continue
|
||||
}
|
||||
// Build the Pod key if this container is managed by a Pod
|
||||
if !isPodManagedContainer(&cinfo) {
|
||||
if !isPodManagedContainer(logger, &cinfo) {
|
||||
continue
|
||||
}
|
||||
ref := buildPodRef(cinfo.Spec.Labels)
|
||||
@@ -132,13 +132,13 @@ func (p *cadvisorStatsProvider) ListPodStats(ctx context.Context) ([]statsapi.Po
|
||||
// the user and has network stats.
|
||||
podStats.Network = cadvisorInfoToNetworkStats(&cinfo)
|
||||
} else {
|
||||
containerStat := cadvisorInfoToContainerStats(containerName, &cinfo, &rootFsInfo, &imageFsInfo)
|
||||
containerStat := cadvisorInfoToContainerStats(logger, containerName, &cinfo, &rootFsInfo, &imageFsInfo)
|
||||
// NOTE: This doesn't support the old pod log path, `/var/log/pods/UID`. For containers
|
||||
// using old log path, they will be populated by cadvisorInfoToContainerStats.
|
||||
podUID := types.UID(podStats.PodRef.UID)
|
||||
logs, err := p.hostStatsProvider.getPodContainerLogStats(podStats.PodRef.Namespace, podStats.PodRef.Name, podUID, containerName, &rootFsInfo)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Unable to fetch container log stats", "containerName", containerName)
|
||||
logger.Error(err, "Unable to fetch container log stats", "containerName", containerName)
|
||||
} else {
|
||||
containerStat.Logs = logs
|
||||
}
|
||||
@@ -151,7 +151,7 @@ func (p *cadvisorStatsProvider) ListPodStats(ctx context.Context) ([]statsapi.Po
|
||||
// Add each PodStats to the result.
|
||||
result := make([]statsapi.PodStats, 0, len(podToStats))
|
||||
for _, podStats := range podToStats {
|
||||
makePodStorageStats(podStats, &rootFsInfo, p.resourceAnalyzer, p.hostStatsProvider, false)
|
||||
makePodStorageStats(logger, podStats, &rootFsInfo, p.resourceAnalyzer, p.hostStatsProvider, false)
|
||||
|
||||
podUID := types.UID(podStats.PodRef.UID)
|
||||
// Lookup the pod-level cgroup's CPU and memory stats
|
||||
@@ -224,12 +224,13 @@ func (p *cadvisorStatsProvider) PodCPUAndMemoryStats(ctx context.Context, pod *v
|
||||
}
|
||||
|
||||
// ListPodCPUAndMemoryStats returns the cpu and memory stats of all the pod-managed containers.
|
||||
func (p *cadvisorStatsProvider) ListPodCPUAndMemoryStats(_ context.Context) ([]statsapi.PodStats, error) {
|
||||
infos, err := getCadvisorContainerInfo(p.cadvisor)
|
||||
func (p *cadvisorStatsProvider) ListPodCPUAndMemoryStats(ctx context.Context) ([]statsapi.PodStats, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
infos, err := getCadvisorContainerInfo(logger, p.cadvisor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get container info from cadvisor: %v", err)
|
||||
}
|
||||
filteredInfos, allInfos := filterTerminatedContainerInfoAndAssembleByPodCgroupKey(infos)
|
||||
filteredInfos, allInfos := filterTerminatedContainerInfoAndAssembleByPodCgroupKey(logger, infos)
|
||||
// Map each container to a pod and update the PodStats with container data.
|
||||
podToStats := map[statsapi.PodReference]*statsapi.PodStats{}
|
||||
for key, cinfo := range filteredInfos {
|
||||
@@ -241,7 +242,7 @@ func (p *cadvisorStatsProvider) ListPodCPUAndMemoryStats(_ context.Context) ([]s
|
||||
continue
|
||||
}
|
||||
// Build the Pod key if this container is managed by a Pod
|
||||
if !isPodManagedContainer(&cinfo) {
|
||||
if !isPodManagedContainer(logger, &cinfo) {
|
||||
continue
|
||||
}
|
||||
ref := buildPodRef(cinfo.Spec.Labels)
|
||||
@@ -362,7 +363,8 @@ func (p *cadvisorStatsProvider) ImageFsStats(ctx context.Context) (imageFsRet *s
|
||||
return fsStats, fsStats, nil
|
||||
}
|
||||
|
||||
klog.InfoS("Detect Split Filesystem", "ImageFilesystems", imageStats.ImageFilesystems[0], "ContainerFilesystems", imageStats.ContainerFilesystems[0])
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.Info("Detect Split Filesystem", "ImageFilesystems", imageStats.ImageFilesystems[0], "ContainerFilesystems", imageStats.ContainerFilesystems[0])
|
||||
|
||||
var containerFsInodesUsed *uint64
|
||||
if containerFsInfo.Inodes != nil && containerFsInfo.InodesFree != nil {
|
||||
@@ -406,12 +408,12 @@ func buildPodRef(containerLabels map[string]string) statsapi.PodReference {
|
||||
}
|
||||
|
||||
// isPodManagedContainer returns true if the cinfo container is managed by a Pod
|
||||
func isPodManagedContainer(cinfo *cadvisorapiv2.ContainerInfo) bool {
|
||||
func isPodManagedContainer(logger klog.Logger, cinfo *cadvisorapiv2.ContainerInfo) bool {
|
||||
podName := kubetypes.GetPodName(cinfo.Spec.Labels)
|
||||
podNamespace := kubetypes.GetPodNamespace(cinfo.Spec.Labels)
|
||||
managed := podName != "" && podNamespace != ""
|
||||
if !managed && podName != podNamespace {
|
||||
klog.InfoS(
|
||||
logger.Info(
|
||||
"Expect container to have either both podName and podNamespace labels, or neither",
|
||||
"podNameLabel", podName, "podNamespaceLabel", podNamespace)
|
||||
}
|
||||
@@ -432,7 +434,7 @@ func getCadvisorPodInfoFromPodUID(podUID types.UID, infos map[string]cadvisorapi
|
||||
// the second return map is pod cgroup key <-> ContainerInfo.
|
||||
// A ContainerInfo is considered to be of a terminated container if it has an
|
||||
// older CreationTime and zero CPU instantaneous and memory RSS usage.
|
||||
func filterTerminatedContainerInfoAndAssembleByPodCgroupKey(containerInfo map[string]cadvisorapiv2.ContainerInfo) (map[string]cadvisorapiv2.ContainerInfo, map[string]cadvisorapiv2.ContainerInfo) {
|
||||
func filterTerminatedContainerInfoAndAssembleByPodCgroupKey(logger klog.Logger, containerInfo map[string]cadvisorapiv2.ContainerInfo) (map[string]cadvisorapiv2.ContainerInfo, map[string]cadvisorapiv2.ContainerInfo) {
|
||||
cinfoMap := make(map[containerID][]containerInfoWithCgroup)
|
||||
cinfosByPodCgroupKey := make(map[string]cadvisorapiv2.ContainerInfo)
|
||||
for key, cinfo := range containerInfo {
|
||||
@@ -446,7 +448,7 @@ func filterTerminatedContainerInfoAndAssembleByPodCgroupKey(containerInfo map[st
|
||||
podCgroupKey = filepath.Base(key)
|
||||
}
|
||||
cinfosByPodCgroupKey[podCgroupKey] = cinfo
|
||||
if !isPodManagedContainer(&cinfo) {
|
||||
if !isPodManagedContainer(logger, &cinfo) {
|
||||
continue
|
||||
}
|
||||
cinfoID := containerID{
|
||||
@@ -554,7 +556,7 @@ func isContainerTerminated(info *cadvisorapiv2.ContainerInfo) bool {
|
||||
return cstat.CpuInst.Usage.Total == 0 && cstat.Memory.RSS == 0
|
||||
}
|
||||
|
||||
func getCadvisorContainerInfo(ca cadvisor.Interface) (map[string]cadvisorapiv2.ContainerInfo, error) {
|
||||
func getCadvisorContainerInfo(logger klog.Logger, ca cadvisor.Interface) (map[string]cadvisorapiv2.ContainerInfo, error) {
|
||||
infos, err := ca.ContainerInfoV2("/", cadvisorapiv2.RequestOptions{
|
||||
IdType: cadvisorapiv2.TypeName,
|
||||
Count: 2, // 2 samples are needed to compute "instantaneous" CPU
|
||||
@@ -564,7 +566,7 @@ func getCadvisorContainerInfo(ca cadvisor.Interface) (map[string]cadvisorapiv2.C
|
||||
if _, ok := infos["/"]; ok {
|
||||
// If the failure is partial, log it and return a best-effort
|
||||
// response.
|
||||
klog.ErrorS(err, "Partial failure issuing cadvisor.ContainerInfoV2")
|
||||
logger.Error(err, "Partial failure issuing cadvisor.ContainerInfoV2")
|
||||
} else {
|
||||
return nil, fmt.Errorf("failed to get root cgroup stats: %w", err)
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@ import (
|
||||
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
)
|
||||
|
||||
func TestFilterTerminatedContainerInfoAndAssembleByPodCgroupKey(t *testing.T) {
|
||||
@@ -91,7 +92,8 @@ func TestFilterTerminatedContainerInfoAndAssembleByPodCgroupKey(t *testing.T) {
|
||||
//ContainerInfo with no CPU/memory usage but has network usage for uncleaned cgroups, should not be filtered out
|
||||
"/pod2-c222-zerocpumem-1": getContainerInfoWithZeroCpuMem(seedPastPod0Container0, pName2, namespace, cName222),
|
||||
}
|
||||
filteredInfos, allInfos := filterTerminatedContainerInfoAndAssembleByPodCgroupKey(infos)
|
||||
logger, _ := ktesting.NewTestContext(t)
|
||||
filteredInfos, allInfos := filterTerminatedContainerInfoAndAssembleByPodCgroupKey(logger, infos)
|
||||
assert.Len(t, filteredInfos, 5)
|
||||
assert.Len(t, allInfos, 11)
|
||||
for _, c := range []string{"/pod0-i", "/pod0-c0"} {
|
||||
@@ -370,7 +372,7 @@ func TestCadvisorListPodStats(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCadvisorPodCPUAndMemoryStats(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tCtx := ktesting.Init(t)
|
||||
const (
|
||||
namespace = "test0"
|
||||
podName = "pod0"
|
||||
@@ -420,7 +422,7 @@ func TestCadvisorPodCPUAndMemoryStats(t *testing.T) {
|
||||
|
||||
p := NewCadvisorStatsProvider(mockCadvisor, &fakeResourceAnalyzer{}, nil, nil, nil, NewFakeHostStatsProvider(&containertest.FakeOS{}), mockCM)
|
||||
|
||||
ps, err := p.PodCPUAndMemoryStats(ctx, pod, nil)
|
||||
ps, err := p.PodCPUAndMemoryStats(tCtx, pod, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, podName, ps.PodRef.Name)
|
||||
assert.Equal(t, namespace, ps.PodRef.Namespace)
|
||||
@@ -463,8 +465,8 @@ func TestCadvisorPodCPUAndMemoryStats(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCadvisorListPodCPUAndMemoryStats(t *testing.T) {
|
||||
ctx := ktesting.Init(t)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KubeletPSI, true)
|
||||
ctx := context.Background()
|
||||
const (
|
||||
namespace0 = "test0"
|
||||
namespace2 = "test2"
|
||||
|
||||
@@ -152,6 +152,7 @@ func (p *criStatsProvider) listPodStats(ctx context.Context, updateCPUNanoCoreUs
|
||||
return nil, fmt.Errorf("failed to get pod or container map: %v", err)
|
||||
}
|
||||
|
||||
logger := klog.FromContext(ctx)
|
||||
if p.podAndContainerStatsFromCRI {
|
||||
result, err := p.listPodStatsStrictlyFromCRI(ctx, updateCPUNanoCoreUsage, containerMap, podSandboxMap, &rootFsInfo)
|
||||
if err == nil {
|
||||
@@ -164,7 +165,7 @@ func (p *criStatsProvider) listPodStats(ctx context.Context, updateCPUNanoCoreUs
|
||||
return nil, err
|
||||
}
|
||||
// CRI implementation doesn't support ListPodSandboxStats, warn and fallback.
|
||||
klog.V(5).ErrorS(err,
|
||||
logger.V(5).Error(err,
|
||||
"CRI implementation must be updated to support ListPodSandboxStats if PodAndContainerStatsFromCRI feature gate is enabled. Falling back to populating with cAdvisor; this call will fail in the future.",
|
||||
)
|
||||
}
|
||||
@@ -184,19 +185,20 @@ func (p *criStatsProvider) listPodStatsPartiallyFromCRI(ctx context.Context, upd
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list all container stats: %v", err)
|
||||
}
|
||||
allInfos, err := getCadvisorContainerInfo(p.cadvisor)
|
||||
|
||||
logger := klog.FromContext(ctx)
|
||||
allInfos, err := getCadvisorContainerInfo(logger, p.cadvisor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch cadvisor stats: %v", err)
|
||||
}
|
||||
caInfos, allInfos := getCRICadvisorStats(allInfos)
|
||||
caInfos, allInfos := getCRICadvisorStats(logger, allInfos)
|
||||
|
||||
// get network stats for containers.
|
||||
// This is only used on Windows. For other platforms, (nil, nil) should be returned.
|
||||
containerNetworkStats, err := p.listContainerNetworkStats()
|
||||
containerNetworkStats, err := p.listContainerNetworkStats(logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list container network stats: %v", err)
|
||||
}
|
||||
|
||||
for _, stats := range resp {
|
||||
containerID := stats.Attributes.Id
|
||||
container, found := containerMap[containerID]
|
||||
@@ -219,11 +221,11 @@ func (p *criStatsProvider) listPodStatsPartiallyFromCRI(ctx context.Context, upd
|
||||
}
|
||||
|
||||
// Fill available stats for full set of required pod stats
|
||||
cs, err := p.makeContainerStats(stats, container, rootFsInfo, fsIDtoInfo, podSandbox.GetMetadata(), updateCPUNanoCoreUsage)
|
||||
cs, err := p.makeContainerStats(logger, stats, container, rootFsInfo, fsIDtoInfo, podSandbox.GetMetadata(), updateCPUNanoCoreUsage)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("make container stats: %w", err)
|
||||
}
|
||||
p.addPodNetworkStats(ps, podSandboxID, caInfos, cs, containerNetworkStats[podSandboxID])
|
||||
p.addPodNetworkStats(logger, ps, podSandboxID, caInfos, cs, containerNetworkStats[podSandboxID])
|
||||
p.addPodCPUMemoryStats(ps, types.UID(podSandbox.Metadata.Uid), allInfos, cs)
|
||||
p.addSwapStats(ps, types.UID(podSandbox.Metadata.Uid), allInfos, cs)
|
||||
p.addIOStats(ps, types.UID(podSandbox.Metadata.Uid), allInfos, cs)
|
||||
@@ -232,9 +234,9 @@ func (p *criStatsProvider) listPodStatsPartiallyFromCRI(ctx context.Context, upd
|
||||
// container stats
|
||||
caStats, caFound := caInfos[containerID]
|
||||
if !caFound {
|
||||
klog.V(5).InfoS("Unable to find cadvisor stats for container", "containerID", containerID)
|
||||
logger.V(5).Info("Unable to find cadvisor stats for container", "containerID", containerID)
|
||||
} else {
|
||||
p.addCadvisorContainerStats(cs, &caStats)
|
||||
p.addCadvisorContainerStats(logger, cs, &caStats)
|
||||
p.addProcessStats(ps, &caStats)
|
||||
}
|
||||
|
||||
@@ -245,7 +247,7 @@ func (p *criStatsProvider) listPodStatsPartiallyFromCRI(ctx context.Context, upd
|
||||
|
||||
result := make([]statsapi.PodStats, 0, len(sandboxIDToPodStats))
|
||||
for _, s := range sandboxIDToPodStats {
|
||||
makePodStorageStats(s, rootFsInfo, p.resourceAnalyzer, p.hostStatsProvider, true)
|
||||
makePodStorageStats(logger, s, rootFsInfo, p.resourceAnalyzer, p.hostStatsProvider, true)
|
||||
result = append(result, *s)
|
||||
}
|
||||
return result, nil
|
||||
@@ -256,12 +258,13 @@ func (p *criStatsProvider) listPodStatsStrictlyFromCRI(ctx context.Context, upda
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
fsIDtoInfo := make(map[string]*cadvisorapiv2.FsInfo)
|
||||
summarySandboxStats := make([]statsapi.PodStats, 0, len(podSandboxMap))
|
||||
for _, criSandboxStat := range criSandboxStats {
|
||||
if criSandboxStat == nil || criSandboxStat.Attributes == nil {
|
||||
klog.V(5).InfoS("Unable to find CRI stats for sandbox")
|
||||
logger.V(5).Info("Unable to find CRI stats for sandbox")
|
||||
continue
|
||||
}
|
||||
podSandbox, found := podSandboxMap[criSandboxStat.Attributes.Id]
|
||||
@@ -269,7 +272,7 @@ func (p *criStatsProvider) listPodStatsStrictlyFromCRI(ctx context.Context, upda
|
||||
continue
|
||||
}
|
||||
ps := buildPodStats(podSandbox)
|
||||
if err := p.addCRIPodContainerStats(criSandboxStat, ps, fsIDtoInfo, containerMap, podSandbox, rootFsInfo, updateCPUNanoCoreUsage); err != nil {
|
||||
if err := p.addCRIPodContainerStats(logger, criSandboxStat, ps, fsIDtoInfo, containerMap, podSandbox, rootFsInfo, updateCPUNanoCoreUsage); err != nil {
|
||||
return nil, fmt.Errorf("add CRI pod container stats: %w", err)
|
||||
}
|
||||
addCRIPodNetworkStats(ps, criSandboxStat)
|
||||
@@ -277,7 +280,7 @@ func (p *criStatsProvider) listPodStatsStrictlyFromCRI(ctx context.Context, upda
|
||||
addCRIPodMemoryStats(ps, criSandboxStat)
|
||||
addCRIPodProcessStats(ps, criSandboxStat)
|
||||
addCRIPodIOStats(ps, criSandboxStat)
|
||||
makePodStorageStats(ps, rootFsInfo, p.resourceAnalyzer, p.hostStatsProvider, true)
|
||||
makePodStorageStats(logger, ps, rootFsInfo, p.resourceAnalyzer, p.hostStatsProvider, true)
|
||||
summarySandboxStats = append(summarySandboxStats, *ps)
|
||||
}
|
||||
return summarySandboxStats, nil
|
||||
@@ -382,6 +385,7 @@ func (p *criStatsProvider) ListPodCPUAndMemoryStats(ctx context.Context) ([]stat
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get pod or container map: %v", err)
|
||||
}
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
result := make([]statsapi.PodStats, 0, len(podSandboxMap))
|
||||
if p.podAndContainerStatsFromCRI {
|
||||
@@ -407,7 +411,7 @@ func (p *criStatsProvider) ListPodCPUAndMemoryStats(ctx context.Context) ([]stat
|
||||
return nil, err
|
||||
}
|
||||
// CRI implementation doesn't support ListPodSandboxStats, warn and fallback.
|
||||
klog.ErrorS(err,
|
||||
logger.Error(err,
|
||||
"CRI implementation must be updated to support ListPodSandboxStats if PodAndContainerStatsFromCRI feature gate is enabled. Falling back to populating with cAdvisor; this call will fail in the future.",
|
||||
)
|
||||
}
|
||||
@@ -417,11 +421,11 @@ func (p *criStatsProvider) ListPodCPUAndMemoryStats(ctx context.Context) ([]stat
|
||||
return nil, fmt.Errorf("failed to list all container stats: %v", err)
|
||||
}
|
||||
|
||||
allInfos, err := getCadvisorContainerInfo(p.cadvisor)
|
||||
allInfos, err := getCadvisorContainerInfo(logger, p.cadvisor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch cadvisor stats: %v", err)
|
||||
}
|
||||
caInfos, allInfos := getCRICadvisorStats(allInfos)
|
||||
caInfos, allInfos := getCRICadvisorStats(logger, allInfos)
|
||||
|
||||
for _, stats := range resp {
|
||||
containerID := stats.Attributes.Id
|
||||
@@ -453,9 +457,9 @@ func (p *criStatsProvider) ListPodCPUAndMemoryStats(ctx context.Context) ([]stat
|
||||
// container stats
|
||||
caStats, caFound := caInfos[containerID]
|
||||
if !caFound {
|
||||
klog.V(4).InfoS("Unable to find cadvisor stats for container", "containerID", containerID)
|
||||
logger.V(4).Info("Unable to find cadvisor stats for container", "containerID", containerID)
|
||||
} else {
|
||||
p.addCadvisorContainerCPUAndMemoryStats(cs, &caStats)
|
||||
p.addCadvisorContainerCPUAndMemoryStats(logger, cs, &caStats)
|
||||
}
|
||||
ps.Containers = append(ps.Containers, *cs)
|
||||
}
|
||||
@@ -516,7 +520,7 @@ func (p *criStatsProvider) ImageFsStats(ctx context.Context) (imageFsRet *statsa
|
||||
if fs.InodesUsed != nil {
|
||||
imageFsRet.InodesUsed = &fs.InodesUsed.Value
|
||||
}
|
||||
imageFsInfo, err := p.getFsInfo(fs.GetFsId())
|
||||
imageFsInfo, err := p.getFsInfo(klog.FromContext(ctx), fs.GetFsId())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("get filesystem info: %w", err)
|
||||
}
|
||||
@@ -542,7 +546,7 @@ func (p *criStatsProvider) ImageFsDevice(ctx context.Context) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
for _, fs := range resp.GetImageFilesystems() {
|
||||
fsInfo, err := p.getFsInfo(fs.GetFsId())
|
||||
fsInfo, err := p.getFsInfo(klog.FromContext(ctx), fs.GetFsId())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("get filesystem info: %w", err)
|
||||
}
|
||||
@@ -556,9 +560,9 @@ func (p *criStatsProvider) ImageFsDevice(ctx context.Context) (string, error) {
|
||||
// getFsInfo returns the information of the filesystem with the specified
|
||||
// fsID. If any error occurs, this function logs the error and returns
|
||||
// nil.
|
||||
func (p *criStatsProvider) getFsInfo(fsID *runtimeapi.FilesystemIdentifier) (*cadvisorapiv2.FsInfo, error) {
|
||||
func (p *criStatsProvider) getFsInfo(logger klog.Logger, fsID *runtimeapi.FilesystemIdentifier) (*cadvisorapiv2.FsInfo, error) {
|
||||
if fsID == nil {
|
||||
klog.V(2).InfoS("Failed to get filesystem info: fsID is nil")
|
||||
logger.V(2).Info("Failed to get filesystem info: fsID is nil")
|
||||
return nil, nil
|
||||
}
|
||||
mountpoint := fsID.GetMountpoint()
|
||||
@@ -568,9 +572,9 @@ func (p *criStatsProvider) getFsInfo(fsID *runtimeapi.FilesystemIdentifier) (*ca
|
||||
if errors.Is(err, cadvisorfs.ErrNoSuchDevice) ||
|
||||
errors.Is(err, cadvisorfs.ErrDeviceNotInPartitionsMap) ||
|
||||
errors.Is(err, cadvisormemory.ErrDataNotFound) {
|
||||
klog.V(2).InfoS(msg, "mountpoint", mountpoint, "err", err)
|
||||
logger.V(2).Info(msg, "mountpoint", mountpoint, "err", err)
|
||||
} else {
|
||||
klog.ErrorS(err, msg, "mountpoint", mountpoint)
|
||||
logger.Error(err, msg, "mountpoint", mountpoint)
|
||||
return nil, fmt.Errorf("%s: %w", msg, err)
|
||||
}
|
||||
return nil, nil
|
||||
@@ -592,6 +596,7 @@ func buildPodStats(podSandbox *runtimeapi.PodSandbox) *statsapi.PodStats {
|
||||
}
|
||||
|
||||
func (p *criStatsProvider) addPodNetworkStats(
|
||||
logger klog.Logger,
|
||||
ps *statsapi.PodStats,
|
||||
podSandboxID string,
|
||||
caInfos map[string]cadvisorapiv2.ContainerInfo,
|
||||
@@ -615,7 +620,7 @@ func (p *criStatsProvider) addPodNetworkStats(
|
||||
}
|
||||
|
||||
// TODO: sum Pod network stats from container stats.
|
||||
klog.V(4).InfoS("Unable to find network stats for sandbox", "sandboxID", podSandboxID)
|
||||
logger.V(4).Info("Unable to find network stats for sandbox", "sandboxID", podSandboxID)
|
||||
}
|
||||
|
||||
func (p *criStatsProvider) addPodCPUMemoryStats(
|
||||
@@ -728,6 +733,7 @@ func (p *criStatsProvider) addProcessStats(
|
||||
}
|
||||
|
||||
func (p *criStatsProvider) makeContainerStats(
|
||||
logger klog.Logger,
|
||||
stats *runtimeapi.ContainerStats,
|
||||
container *runtimeapi.Container,
|
||||
rootFsInfo *cadvisorapiv2.FsInfo,
|
||||
@@ -752,7 +758,7 @@ func (p *criStatsProvider) makeContainerStats(
|
||||
}
|
||||
var usageNanoCores *uint64
|
||||
if updateCPUNanoCoreUsage {
|
||||
usageNanoCores = p.getAndUpdateContainerUsageNanoCores(stats)
|
||||
usageNanoCores = p.getAndUpdateContainerUsageNanoCores(logger, stats)
|
||||
} else {
|
||||
usageNanoCores = p.getContainerUsageNanoCores(stats)
|
||||
}
|
||||
@@ -819,7 +825,7 @@ func (p *criStatsProvider) makeContainerStats(
|
||||
if fsID != nil {
|
||||
imageFsInfo, found := fsIDtoInfo[fsID.Mountpoint]
|
||||
if !found {
|
||||
imageFsInfo, err = p.getFsInfo(fsID)
|
||||
imageFsInfo, err = p.getFsInfo(logger, fsID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get filesystem info: %w", err)
|
||||
}
|
||||
@@ -841,7 +847,7 @@ func (p *criStatsProvider) makeContainerStats(
|
||||
// officially support in-place upgrade anyway.
|
||||
result.Logs, err = p.hostStatsProvider.getPodContainerLogStats(meta.GetNamespace(), meta.GetName(), types.UID(meta.GetUid()), container.GetMetadata().GetName(), rootFsInfo)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Unable to fetch container log stats", "containerName", container.GetMetadata().GetName())
|
||||
logger.Error(err, "Unable to fetch container log stats", "containerName", container.GetMetadata().GetName())
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -973,7 +979,7 @@ func (p *criStatsProvider) getContainerUsageNanoCores(stats *runtimeapi.Containe
|
||||
// getAndUpdateContainerUsageNanoCores first attempts to get the usage nano cores from the stats reported
|
||||
// by the CRI. If it is unable to, it computes usageNanoCores based on the given and the cached usageCoreNanoSeconds,
|
||||
// updates the cache with the computed usageNanoCores, and returns the usageNanoCores.
|
||||
func (p *criStatsProvider) getAndUpdateContainerUsageNanoCores(stats *runtimeapi.ContainerStats) *uint64 {
|
||||
func (p *criStatsProvider) getAndUpdateContainerUsageNanoCores(logger klog.Logger, stats *runtimeapi.ContainerStats) *uint64 {
|
||||
if stats == nil || stats.Attributes == nil || stats.Cpu == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -1015,7 +1021,7 @@ func (p *criStatsProvider) getAndUpdateContainerUsageNanoCores(stats *runtimeapi
|
||||
|
||||
if err != nil {
|
||||
// This should not happen. Log now to raise visibility
|
||||
klog.ErrorS(err, "Failed updating cpu usage nano core")
|
||||
logger.Error(err, "Failed updating cpu usage nano core")
|
||||
}
|
||||
return usage
|
||||
}
|
||||
@@ -1105,11 +1111,12 @@ func removeTerminatedContainers(containers []*runtimeapi.Container) []*runtimeap
|
||||
}
|
||||
|
||||
func (p *criStatsProvider) addCadvisorContainerStats(
|
||||
logger klog.Logger,
|
||||
cs *statsapi.ContainerStats,
|
||||
caPodStats *cadvisorapiv2.ContainerInfo,
|
||||
) {
|
||||
if caPodStats.Spec.HasCustomMetrics {
|
||||
cs.UserDefinedMetrics = cadvisorInfoToUserDefinedMetrics(caPodStats)
|
||||
cs.UserDefinedMetrics = cadvisorInfoToUserDefinedMetrics(logger, caPodStats)
|
||||
}
|
||||
|
||||
cpu, memory := cadvisorInfoToCPUandMemoryStats(caPodStats)
|
||||
@@ -1134,11 +1141,12 @@ func (p *criStatsProvider) addCadvisorContainerStats(
|
||||
}
|
||||
|
||||
func (p *criStatsProvider) addCadvisorContainerCPUAndMemoryStats(
|
||||
logger klog.Logger,
|
||||
cs *statsapi.ContainerStats,
|
||||
caPodStats *cadvisorapiv2.ContainerInfo,
|
||||
) {
|
||||
if caPodStats.Spec.HasCustomMetrics {
|
||||
cs.UserDefinedMetrics = cadvisorInfoToUserDefinedMetrics(caPodStats)
|
||||
cs.UserDefinedMetrics = cadvisorInfoToUserDefinedMetrics(logger, caPodStats)
|
||||
}
|
||||
|
||||
cpu, memory := cadvisorInfoToCPUandMemoryStats(caPodStats)
|
||||
@@ -1150,9 +1158,9 @@ func (p *criStatsProvider) addCadvisorContainerCPUAndMemoryStats(
|
||||
}
|
||||
}
|
||||
|
||||
func getCRICadvisorStats(infos map[string]cadvisorapiv2.ContainerInfo) (map[string]cadvisorapiv2.ContainerInfo, map[string]cadvisorapiv2.ContainerInfo) {
|
||||
func getCRICadvisorStats(logger klog.Logger, infos map[string]cadvisorapiv2.ContainerInfo) (map[string]cadvisorapiv2.ContainerInfo, map[string]cadvisorapiv2.ContainerInfo) {
|
||||
stats := make(map[string]cadvisorapiv2.ContainerInfo)
|
||||
filteredInfos, cinfosByPodCgroupKey := filterTerminatedContainerInfoAndAssembleByPodCgroupKey(infos)
|
||||
filteredInfos, cinfosByPodCgroupKey := filterTerminatedContainerInfoAndAssembleByPodCgroupKey(logger, infos)
|
||||
for key, info := range filteredInfos {
|
||||
// On systemd using devicemapper each mount into the container has an
|
||||
// associated cgroup. We ignore them to ensure we do not get duplicate
|
||||
@@ -1162,7 +1170,7 @@ func getCRICadvisorStats(infos map[string]cadvisorapiv2.ContainerInfo) (map[stri
|
||||
continue
|
||||
}
|
||||
// Build the Pod key if this container is managed by a Pod
|
||||
if !isPodManagedContainer(&info) {
|
||||
if !isPodManagedContainer(logger, &info) {
|
||||
continue
|
||||
}
|
||||
stats[extractIDFromCgroupPath(key)] = info
|
||||
|
||||
@@ -28,12 +28,16 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
"k8s.io/klog/v2"
|
||||
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func (p *criStatsProvider) addCRIPodContainerStats(criSandboxStat *runtimeapi.PodSandboxStats,
|
||||
ps *statsapi.PodStats, fsIDtoInfo map[string]*cadvisorapiv2.FsInfo,
|
||||
func (p *criStatsProvider) addCRIPodContainerStats(
|
||||
logger klog.Logger,
|
||||
criSandboxStat *runtimeapi.PodSandboxStats,
|
||||
ps *statsapi.PodStats,
|
||||
fsIDtoInfo map[string]*cadvisorapiv2.FsInfo,
|
||||
containerMap map[string]*runtimeapi.Container,
|
||||
podSandbox *runtimeapi.PodSandbox,
|
||||
rootFsInfo *cadvisorapiv2.FsInfo, updateCPUNanoCoreUsage bool) error {
|
||||
@@ -43,7 +47,7 @@ func (p *criStatsProvider) addCRIPodContainerStats(criSandboxStat *runtimeapi.Po
|
||||
continue
|
||||
}
|
||||
// Fill available stats for full set of required pod stats
|
||||
cs, err := p.makeContainerStats(criContainerStat, container, rootFsInfo, fsIDtoInfo, podSandbox.GetMetadata(),
|
||||
cs, err := p.makeContainerStats(logger, criContainerStat, container, rootFsInfo, fsIDtoInfo, podSandbox.GetMetadata(),
|
||||
updateCPUNanoCoreUsage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("make container stats: %w", err)
|
||||
@@ -124,6 +128,6 @@ func addCRIPodProcessStats(ps *statsapi.PodStats, criPodStat *runtimeapi.PodSand
|
||||
|
||||
// listContainerNetworkStats returns the network stats of all the running containers.
|
||||
// It should return (nil, nil) for platforms other than Windows.
|
||||
func (p *criStatsProvider) listContainerNetworkStats() (map[string]*statsapi.NetworkStats, error) {
|
||||
func (p *criStatsProvider) listContainerNetworkStats(klog.Logger) (map[string]*statsapi.NetworkStats, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -23,20 +23,25 @@ import (
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
"k8s.io/klog/v2"
|
||||
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
)
|
||||
|
||||
// listContainerNetworkStats returns the network stats of all the running containers.
|
||||
// It should return (nil, nil) for platforms other than Windows.
|
||||
func (p *criStatsProvider) listContainerNetworkStats() (map[string]*statsapi.NetworkStats, error) {
|
||||
func (p *criStatsProvider) listContainerNetworkStats(klog.Logger) (map[string]*statsapi.NetworkStats, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (p *criStatsProvider) addCRIPodContainerStats(criSandboxStat *runtimeapi.PodSandboxStats,
|
||||
ps *statsapi.PodStats, fsIDtoInfo map[string]*cadvisorapiv2.FsInfo,
|
||||
containerMap map[string]*runtimeapi.Container,
|
||||
podSandbox *runtimeapi.PodSandbox,
|
||||
rootFsInfo *cadvisorapiv2.FsInfo, updateCPUNanoCoreUsage bool) error {
|
||||
func (p *criStatsProvider) addCRIPodContainerStats(
|
||||
klog.Logger,
|
||||
*runtimeapi.PodSandboxStats,
|
||||
*statsapi.PodStats,
|
||||
map[string]*cadvisorapiv2.FsInfo,
|
||||
map[string]*runtimeapi.Container,
|
||||
*runtimeapi.PodSandbox,
|
||||
*cadvisorapiv2.FsInfo,
|
||||
bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ limitations under the License.
|
||||
package stats
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -51,6 +50,7 @@ import (
|
||||
kubepodtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
||||
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
@@ -100,7 +100,7 @@ const testPodLogDirectory = "/var/log/kube/pods/" // Use non-default path to ens
|
||||
|
||||
func TestCRIListPodStats(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KubeletPSI, true)
|
||||
ctx := context.Background()
|
||||
tCtx := ktesting.Init(t)
|
||||
var (
|
||||
imageFsMountpoint = "/test/mount/point"
|
||||
unknownMountpoint = "/unknown/mount/point"
|
||||
@@ -248,7 +248,7 @@ func TestCRIListPodStats(t *testing.T) {
|
||||
fakeContainerStatsProvider{},
|
||||
)
|
||||
|
||||
stats, err := provider.ListPodStats(ctx)
|
||||
stats, err := provider.ListPodStats(tCtx)
|
||||
assert := assert.New(t)
|
||||
assert.NoError(err)
|
||||
assert.Len(stats, 4)
|
||||
@@ -351,12 +351,12 @@ func TestCRIListPodStats(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestListPodStatsStrictlyFromCRI(t *testing.T) {
|
||||
logger, tCtx := ktesting.NewTestContext(t)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KubeletPSI, true)
|
||||
if runtime.GOOS == "windows" {
|
||||
// TODO: remove skip once the failing test has been fixed.
|
||||
t.Skip("Skip failing test on Windows.")
|
||||
}
|
||||
ctx := context.Background()
|
||||
var (
|
||||
imageFsMountpoint = "/test/mount/point"
|
||||
unknownMountpoint = "/unknown/mount/point"
|
||||
@@ -483,11 +483,11 @@ func TestListPodStatsStrictlyFromCRI(t *testing.T) {
|
||||
fakeContainerStatsProvider{},
|
||||
)
|
||||
|
||||
cadvisorInfos, err := getCadvisorContainerInfo(mockCadvisor)
|
||||
cadvisorInfos, err := getCadvisorContainerInfo(logger, mockCadvisor)
|
||||
if err != nil {
|
||||
t.Errorf("failed to get container info from cadvisor: %v", err)
|
||||
}
|
||||
stats, err := provider.ListPodStats(ctx)
|
||||
stats, err := provider.ListPodStats(tCtx)
|
||||
assert := assert.New(t)
|
||||
assert.NoError(err)
|
||||
assert.Len(stats, 2)
|
||||
@@ -552,7 +552,7 @@ func TestListPodStatsStrictlyFromCRI(t *testing.T) {
|
||||
}
|
||||
func TestCRIListPodCPUAndMemoryStats(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KubeletPSI, true)
|
||||
ctx := context.Background()
|
||||
tCtx := ktesting.Init(t)
|
||||
|
||||
var (
|
||||
imageFsMountpoint = "/test/mount/point"
|
||||
@@ -661,7 +661,7 @@ func TestCRIListPodCPUAndMemoryStats(t *testing.T) {
|
||||
fakeContainerStatsProvider{},
|
||||
)
|
||||
|
||||
stats, err := provider.ListPodCPUAndMemoryStats(ctx)
|
||||
stats, err := provider.ListPodCPUAndMemoryStats(tCtx)
|
||||
assert := assert.New(t)
|
||||
assert.NoError(err)
|
||||
assert.Len(stats, 5)
|
||||
@@ -765,7 +765,7 @@ func TestCRIListPodCPUAndMemoryStats(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCRIPodCPUAndMemoryStats(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tCtx := ktesting.Init(t)
|
||||
|
||||
const (
|
||||
podName = "test-pod"
|
||||
@@ -830,7 +830,7 @@ func TestCRIPodCPUAndMemoryStats(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
stats, err := provider.PodCPUAndMemoryStats(ctx, pod, podStatus)
|
||||
stats, err := provider.PodCPUAndMemoryStats(tCtx, pod, podStatus)
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -866,7 +866,7 @@ func TestCRIPodCPUAndMemoryStats(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCRIImagesFsStats(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tCtx := ktesting.Init(t)
|
||||
var (
|
||||
imageFsMountpoint = "/test/mount/point"
|
||||
imageFsInfo = getTestFsInfo(2000)
|
||||
@@ -896,7 +896,7 @@ func TestCRIImagesFsStats(t *testing.T) {
|
||||
fakeContainerStatsProvider{},
|
||||
)
|
||||
|
||||
stats, containerStats, err := provider.ImageFsStats(ctx)
|
||||
stats, containerStats, err := provider.ImageFsStats(tCtx)
|
||||
assert := assert.New(t)
|
||||
assert.NoError(err)
|
||||
|
||||
@@ -1513,6 +1513,7 @@ func TestGetContainerUsageNanoCores(t *testing.T) {
|
||||
expected: nil,
|
||||
},
|
||||
}
|
||||
logger, _ := ktesting.NewTestContext(t)
|
||||
for _, test := range tests {
|
||||
provider := &criStatsProvider{cpuUsageCache: test.cpuUsageCache}
|
||||
// Before the update, the cached value should be nil
|
||||
@@ -1520,7 +1521,7 @@ func TestGetContainerUsageNanoCores(t *testing.T) {
|
||||
assert.Nil(t, cached)
|
||||
|
||||
// Update the cache and get the latest value.
|
||||
real := provider.getAndUpdateContainerUsageNanoCores(test.stats)
|
||||
real := provider.getAndUpdateContainerUsageNanoCores(logger, test.stats)
|
||||
assert.Equal(t, test.expected, real, test.desc)
|
||||
|
||||
// After the update, the cached value should be up-to-date
|
||||
|
||||
@@ -52,12 +52,11 @@ func (s networkStats) GetHNSEndpointStats(endpointName string) (*hnslib.HNSEndpo
|
||||
}
|
||||
|
||||
// listContainerNetworkStats returns the network stats of all the running containers.
|
||||
func (p *criStatsProvider) listContainerNetworkStats() (map[string]*statsapi.NetworkStats, error) {
|
||||
func (p *criStatsProvider) listContainerNetworkStats(logger klog.Logger) (map[string]*statsapi.NetworkStats, error) {
|
||||
networkStatsProvider := newNetworkStatsProvider(p)
|
||||
|
||||
endpoints, err := networkStatsProvider.HNSListEndpointRequest()
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to fetch current HNS endpoints")
|
||||
logger.Error(err, "Failed to fetch current HNS endpoints")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -65,7 +64,7 @@ func (p *criStatsProvider) listContainerNetworkStats() (map[string]*statsapi.Net
|
||||
for _, endpoint := range endpoints {
|
||||
endpointStats, err := networkStatsProvider.GetHNSEndpointStats(endpoint.Id)
|
||||
if err != nil {
|
||||
klog.V(2).InfoS("Failed to fetch statistics for endpoint, continue to get stats for other endpoints", "endpointId", endpoint.Id, "containers", endpoint.SharedContainers)
|
||||
logger.V(2).Info("Failed to fetch statistics for endpoint, continue to get stats for other endpoints", "endpointId", endpoint.Id, "containers", endpoint.SharedContainers)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -84,7 +83,7 @@ func (p *criStatsProvider) listContainerNetworkStats() (map[string]*statsapi.Net
|
||||
return networkStats, nil
|
||||
}
|
||||
|
||||
func (p *criStatsProvider) addCRIPodContainerStats(criSandboxStat *runtimeapi.PodSandboxStats,
|
||||
func (p *criStatsProvider) addCRIPodContainerStats(logger klog.Logger, criSandboxStat *runtimeapi.PodSandboxStats,
|
||||
ps *statsapi.PodStats, fsIDtoInfo map[string]*cadvisorapiv2.FsInfo,
|
||||
containerMap map[string]*runtimeapi.Container,
|
||||
podSandbox *runtimeapi.PodSandbox,
|
||||
@@ -96,7 +95,7 @@ func (p *criStatsProvider) addCRIPodContainerStats(criSandboxStat *runtimeapi.Po
|
||||
continue
|
||||
}
|
||||
// Fill available stats for full set of required pod stats
|
||||
cs, err := p.makeWinContainerStats(criContainerStat, container, rootFsInfo, fsIDtoInfo, podSandbox.GetMetadata())
|
||||
cs, err := p.makeWinContainerStats(logger, criContainerStat, container, rootFsInfo, fsIDtoInfo, podSandbox.GetMetadata())
|
||||
if err != nil {
|
||||
return fmt.Errorf("make container stats: %w", err)
|
||||
|
||||
@@ -108,6 +107,7 @@ func (p *criStatsProvider) addCRIPodContainerStats(criSandboxStat *runtimeapi.Po
|
||||
}
|
||||
|
||||
func (p *criStatsProvider) makeWinContainerStats(
|
||||
logger klog.Logger,
|
||||
stats *runtimeapi.WindowsContainerStats,
|
||||
container *runtimeapi.Container,
|
||||
rootFsInfo *cadvisorapiv2.FsInfo,
|
||||
@@ -163,7 +163,7 @@ func (p *criStatsProvider) makeWinContainerStats(
|
||||
if fsID != nil {
|
||||
imageFsInfo, found := fsIDtoInfo[fsID.Mountpoint]
|
||||
if !found {
|
||||
imageFsInfo, err = p.getFsInfo(fsID)
|
||||
imageFsInfo, err = p.getFsInfo(logger, fsID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get filesystem info: %w", err)
|
||||
}
|
||||
@@ -183,7 +183,7 @@ func (p *criStatsProvider) makeWinContainerStats(
|
||||
// officially support in-place upgrade anyway.
|
||||
result.Logs, err = p.hostStatsProvider.getPodContainerLogStats(meta.GetNamespace(), meta.GetName(), types.UID(meta.GetUid()), container.GetMetadata().GetName(), rootFsInfo)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Unable to fetch container log stats", "containerName", container.GetMetadata().GetName())
|
||||
logger.Error(err, "Unable to fetch container log stats", "containerName", container.GetMetadata().GetName())
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
kubecontainertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
@@ -423,6 +424,7 @@ func Test_criStatsProvider_listContainerNetworkStats(t *testing.T) {
|
||||
skipped: true,
|
||||
},
|
||||
}
|
||||
logger, _ := ktesting.NewTestContext(t)
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// TODO: Remove skip once https://github.com/kubernetes/kubernetes/issues/116692 is fixed.
|
||||
@@ -435,7 +437,7 @@ func Test_criStatsProvider_listContainerNetworkStats(t *testing.T) {
|
||||
},
|
||||
clock: fakeClock,
|
||||
}
|
||||
got, err := p.listContainerNetworkStats()
|
||||
got, err := p.listContainerNetworkStats(logger)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("listContainerNetworkStats() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -522,7 +524,8 @@ func Test_criStatsProvider_makeWinContainerStats(t *testing.T) {
|
||||
Uid: "sb0-uid",
|
||||
}
|
||||
|
||||
got, err := p.makeWinContainerStats(inputStats, inputContainer, inputRootFsInfo, make(map[string]*cadvisorapiv2.FsInfo), inputPodSandboxMetadata)
|
||||
logger, _ := ktesting.NewTestContext(t)
|
||||
got, err := p.makeWinContainerStats(logger, inputStats, inputContainer, inputRootFsInfo, make(map[string]*cadvisorapiv2.FsInfo), inputPodSandboxMetadata)
|
||||
|
||||
expected := &statsapi.ContainerStats{
|
||||
Name: "c0",
|
||||
|
||||
@@ -91,7 +91,7 @@ func cadvisorInfoToCPUandMemoryStats(info *cadvisorapiv2.ContainerInfo) (*statsa
|
||||
|
||||
// cadvisorInfoToContainerStats returns the statsapi.ContainerStats converted
|
||||
// from the container and filesystem info.
|
||||
func cadvisorInfoToContainerStats(name string, info *cadvisorapiv2.ContainerInfo, rootFs, imageFs *cadvisorapiv2.FsInfo) *statsapi.ContainerStats {
|
||||
func cadvisorInfoToContainerStats(logger klog.Logger, name string, info *cadvisorapiv2.ContainerInfo, rootFs, imageFs *cadvisorapiv2.FsInfo) *statsapi.ContainerStats {
|
||||
result := &statsapi.ContainerStats{
|
||||
StartTime: metav1.NewTime(info.Spec.CreationTime),
|
||||
Name: name,
|
||||
@@ -151,7 +151,7 @@ func cadvisorInfoToContainerStats(name string, info *cadvisorapiv2.ContainerInfo
|
||||
})
|
||||
}
|
||||
|
||||
result.UserDefinedMetrics = cadvisorInfoToUserDefinedMetrics(info)
|
||||
result.UserDefinedMetrics = cadvisorInfoToUserDefinedMetrics(logger, info)
|
||||
|
||||
return result
|
||||
}
|
||||
@@ -247,7 +247,7 @@ func cadvisorInfoToNetworkStats(info *cadvisorapiv2.ContainerInfo) *statsapi.Net
|
||||
|
||||
// cadvisorInfoToUserDefinedMetrics returns the statsapi.UserDefinedMetric
|
||||
// converted from the container info from cadvisor.
|
||||
func cadvisorInfoToUserDefinedMetrics(info *cadvisorapiv2.ContainerInfo) []statsapi.UserDefinedMetric {
|
||||
func cadvisorInfoToUserDefinedMetrics(logger klog.Logger, info *cadvisorapiv2.ContainerInfo) []statsapi.UserDefinedMetric {
|
||||
type specVal struct {
|
||||
ref statsapi.UserDefinedMetricDescriptor
|
||||
valType cadvisorapiv1.DataType
|
||||
@@ -269,7 +269,7 @@ func cadvisorInfoToUserDefinedMetrics(info *cadvisorapiv2.ContainerInfo) []stats
|
||||
for name, values := range stat.CustomMetrics {
|
||||
specVal, ok := udmMap[name]
|
||||
if !ok {
|
||||
klog.InfoS("Spec for custom metric is missing from cAdvisor output", "metric", name, "spec", info.Spec, "metrics", stat.CustomMetrics)
|
||||
logger.Info("Spec for custom metric is missing from cAdvisor output", "metric", name, "spec", info.Spec, "metrics", stat.CustomMetrics)
|
||||
continue
|
||||
}
|
||||
for _, value := range values {
|
||||
@@ -486,7 +486,7 @@ func addUsage(first, second *uint64) *uint64 {
|
||||
return &total
|
||||
}
|
||||
|
||||
func makePodStorageStats(s *statsapi.PodStats, rootFsInfo *cadvisorapiv2.FsInfo, resourceAnalyzer stats.ResourceAnalyzer, hostStatsProvider HostStatsProvider, isCRIStatsProvider bool) {
|
||||
func makePodStorageStats(logger klog.Logger, s *statsapi.PodStats, rootFsInfo *cadvisorapiv2.FsInfo, resourceAnalyzer stats.ResourceAnalyzer, hostStatsProvider HostStatsProvider, isCRIStatsProvider bool) {
|
||||
podNs := s.PodRef.Namespace
|
||||
podName := s.PodRef.Name
|
||||
podUID := types.UID(s.PodRef.UID)
|
||||
@@ -495,11 +495,11 @@ func makePodStorageStats(s *statsapi.PodStats, rootFsInfo *cadvisorapiv2.FsInfo,
|
||||
ephemeralStats = make([]statsapi.VolumeStats, len(vstats.EphemeralVolumes))
|
||||
copy(ephemeralStats, vstats.EphemeralVolumes)
|
||||
s.VolumeStats = append(append([]statsapi.VolumeStats{}, vstats.EphemeralVolumes...), vstats.PersistentVolumes...)
|
||||
|
||||
}
|
||||
|
||||
logStats, err := hostStatsProvider.getPodLogStats(podNs, podName, podUID, rootFsInfo)
|
||||
if err != nil {
|
||||
klog.V(6).ErrorS(err, "Unable to fetch pod log stats", "pod", klog.KRef(podNs, podName))
|
||||
logger.V(6).Error(err, "Unable to fetch pod log stats", "pod", klog.KRef(podNs, podName))
|
||||
// If people do in-place upgrade, there might be pods still using
|
||||
// the old log path. For those pods, no pod log stats is returned.
|
||||
// We should continue generating other stats in that case.
|
||||
@@ -507,7 +507,7 @@ func makePodStorageStats(s *statsapi.PodStats, rootFsInfo *cadvisorapiv2.FsInfo,
|
||||
}
|
||||
etcHostsStats, err := hostStatsProvider.getPodEtcHostsStats(podUID, rootFsInfo)
|
||||
if err != nil {
|
||||
klog.V(6).ErrorS(err, "Unable to fetch pod etc hosts stats", "pod", klog.KRef(podNs, podName))
|
||||
logger.V(6).Error(err, "Unable to fetch pod etc hosts stats", "pod", klog.KRef(podNs, podName))
|
||||
}
|
||||
s.EphemeralStorage = calcEphemeralStorage(s.Containers, ephemeralStats, rootFsInfo, logStats, etcHostsStats, isCRIStatsProvider)
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
@@ -81,7 +82,8 @@ func TestCustomMetrics(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Contains(t, cadvisorInfoToUserDefinedMetrics(&cInfo),
|
||||
logger, _ := ktesting.NewTestContext(t)
|
||||
assert.Contains(t, cadvisorInfoToUserDefinedMetrics(logger, &cInfo),
|
||||
statsapi.UserDefinedMetric{
|
||||
UserDefinedMetricDescriptor: statsapi.UserDefinedMetricDescriptor{
|
||||
Name: "qos",
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
internalapi "k8s.io/cri-api/pkg/apis"
|
||||
"k8s.io/klog/v2"
|
||||
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
@@ -121,8 +122,11 @@ func (p *Provider) GetCgroupStats(cgroupName string, updateStats bool) (*statsap
|
||||
}
|
||||
return nil, nil, fmt.Errorf("failed to get cgroup stats for %q: %v", cgroupName, err)
|
||||
}
|
||||
// Use klog.TODO() because we currently do not have a proper logger to pass in.
|
||||
// Replace this with an appropriate logger when refactoring this function to accept a context parameter.
|
||||
logger := klog.TODO()
|
||||
// Rootfs and imagefs doesn't make sense for raw cgroup.
|
||||
s := cadvisorInfoToContainerStats(cgroupName, info, nil, nil)
|
||||
s := cadvisorInfoToContainerStats(logger, cgroupName, info, nil, nil)
|
||||
n := cadvisorInfoToNetworkStats(info)
|
||||
return s, n, nil
|
||||
}
|
||||
|
||||
@@ -41,6 +41,7 @@ import (
|
||||
kubepodtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
||||
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
@@ -172,7 +173,7 @@ func TestRootFsStats(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHasDedicatedImageFs(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tCtx := ktesting.Init(t)
|
||||
imageStatsExpected := &statsapi.FsStats{AvailableBytes: ptr.To[uint64](1)}
|
||||
|
||||
for desc, test := range map[string]struct {
|
||||
@@ -215,7 +216,7 @@ func TestHasDedicatedImageFs(t *testing.T) {
|
||||
containerFs: test.containerFsStats,
|
||||
})
|
||||
|
||||
dedicated, err := provider.HasDedicatedImageFs(ctx)
|
||||
dedicated, err := provider.HasDedicatedImageFs(tCtx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.dedicated, dedicated)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user