Merge pull request #21368 from timstclair/summary-api

Move stats summary types to a new kubelet/api package to avoid unnece…
This commit is contained in:
Phillip Wittrock 2016-02-18 16:05:34 -08:00
commit c6fea28e83
6 changed files with 86 additions and 82 deletions

View File

@ -21,6 +21,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
"k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
@ -36,7 +37,7 @@ type Cache map[types.UID]*PodVolumeStats
// PodVolumeStats encapsulates all VolumeStats for a pod // PodVolumeStats encapsulates all VolumeStats for a pod
type PodVolumeStats struct { type PodVolumeStats struct {
Volumes []VolumeStats Volumes []stats.VolumeStats
} }
// fsResourceAnalyzerInterface is for embedding fs functions into ResourceAnalyzer // fsResourceAnalyzerInterface is for embedding fs functions into ResourceAnalyzer
@ -107,7 +108,7 @@ func (s *fsResourceAnalyzer) getPodVolumeStats(pod *api.Pod) (PodVolumeStats, bo
} }
// Call GetMetrics on each Volume and copy the result to a new VolumeStats.FsStats // Call GetMetrics on each Volume and copy the result to a new VolumeStats.FsStats
stats := make([]VolumeStats, 0, len(volumes)) stats := make([]stats.VolumeStats, 0, len(volumes))
for name, v := range volumes { for name, v := range volumes {
metric, err := v.GetMetrics() metric, err := v.GetMetrics()
if err != nil { if err != nil {
@ -123,13 +124,13 @@ func (s *fsResourceAnalyzer) getPodVolumeStats(pod *api.Pod) (PodVolumeStats, bo
return PodVolumeStats{Volumes: stats}, true return PodVolumeStats{Volumes: stats}, true
} }
func (s *fsResourceAnalyzer) parsePodVolumeStats(podName string, metric *volume.Metrics) VolumeStats { func (s *fsResourceAnalyzer) parsePodVolumeStats(podName string, metric *volume.Metrics) stats.VolumeStats {
available := uint64(metric.Available.Value()) available := uint64(metric.Available.Value())
capacity := uint64(metric.Capacity.Value()) capacity := uint64(metric.Capacity.Value())
used := uint64((metric.Used.Value())) used := uint64((metric.Used.Value()))
return VolumeStats{ return stats.VolumeStats{
Name: podName, Name: podName,
FsStats: FsStats{ FsStats: stats.FsStats{
AvailableBytes: &available, AvailableBytes: &available,
CapacityBytes: &capacity, CapacityBytes: &capacity,
UsedBytes: &used}} UsedBytes: &used}}

View File

@ -23,6 +23,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
@ -32,34 +33,34 @@ import (
// TestGetPodVolumeStats tests that GetPodVolumeStats reads from the cache and returns the value // TestGetPodVolumeStats tests that GetPodVolumeStats reads from the cache and returns the value
func TestGetPodVolumeStats(t *testing.T) { func TestGetPodVolumeStats(t *testing.T) {
instance := newFsResourceAnalyzer(&MockStatsProvider{}, time.Minute*5) instance := newFsResourceAnalyzer(&MockStatsProvider{}, time.Minute*5)
stats, found := instance.GetPodVolumeStats("testpod1") vStats, found := instance.GetPodVolumeStats("testpod1")
assert.False(t, found) assert.False(t, found)
assert.Equal(t, PodVolumeStats{}, stats) assert.Equal(t, PodVolumeStats{}, vStats)
instance.cachedVolumeStats.Store(make(Cache)) instance.cachedVolumeStats.Store(make(Cache))
stats, found = instance.GetPodVolumeStats("testpod1") vStats, found = instance.GetPodVolumeStats("testpod1")
assert.False(t, found) assert.False(t, found)
assert.Equal(t, PodVolumeStats{}, stats) assert.Equal(t, PodVolumeStats{}, vStats)
available := uint64(100) available := uint64(100)
used := uint64(200) used := uint64(200)
capacity := uint64(400) capacity := uint64(400)
vs1 := VolumeStats{ vs1 := stats.VolumeStats{
Name: "vol1", Name: "vol1",
FsStats: FsStats{ FsStats: stats.FsStats{
AvailableBytes: &available, AvailableBytes: &available,
UsedBytes: &used, UsedBytes: &used,
CapacityBytes: &capacity, CapacityBytes: &capacity,
}, },
} }
pvs := &PodVolumeStats{ pvs := &PodVolumeStats{
Volumes: []VolumeStats{vs1}, Volumes: []stats.VolumeStats{vs1},
} }
instance.cachedVolumeStats.Load().(Cache)["testpod1"] = pvs instance.cachedVolumeStats.Load().(Cache)["testpod1"] = pvs
stats, found = instance.GetPodVolumeStats("testpod1") vStats, found = instance.GetPodVolumeStats("testpod1")
assert.True(t, found) assert.True(t, found)
assert.Equal(t, *pvs, stats) assert.Equal(t, *pvs, vStats)
} }
// TestUpdateCachedPodVolumeStats tests that the cache is updated from the stats provider // TestUpdateCachedPodVolumeStats tests that the cache is updated from the stats provider
@ -120,9 +121,9 @@ func TestUpdateCachedPodVolumeStats(t *testing.T) {
v1available := uint64(100) v1available := uint64(100)
v1used := uint64(200) v1used := uint64(200)
v1capacity := uint64(400) v1capacity := uint64(400)
assert.Contains(t, actual1.Volumes, VolumeStats{ assert.Contains(t, actual1.Volumes, stats.VolumeStats{
Name: "v1", Name: "v1",
FsStats: FsStats{ FsStats: stats.FsStats{
AvailableBytes: &v1available, AvailableBytes: &v1available,
UsedBytes: &v1used, UsedBytes: &v1used,
CapacityBytes: &v1capacity, CapacityBytes: &v1capacity,
@ -132,9 +133,9 @@ func TestUpdateCachedPodVolumeStats(t *testing.T) {
v2available := uint64(600) v2available := uint64(600)
v2used := uint64(700) v2used := uint64(700)
v2capacity := uint64(1400) v2capacity := uint64(1400)
assert.Contains(t, actual1.Volumes, VolumeStats{ assert.Contains(t, actual1.Volumes, stats.VolumeStats{
Name: "v2", Name: "v2",
FsStats: FsStats{ FsStats: stats.FsStats{
AvailableBytes: &v2available, AvailableBytes: &v2available,
UsedBytes: &v2used, UsedBytes: &v2used,
CapacityBytes: &v2capacity, CapacityBytes: &v2capacity,
@ -147,9 +148,9 @@ func TestUpdateCachedPodVolumeStats(t *testing.T) {
actual2, found := instance.GetPodVolumeStats("testpod2") actual2, found := instance.GetPodVolumeStats("testpod2")
assert.True(t, found) assert.True(t, found)
assert.Len(t, actual2.Volumes, 1) assert.Len(t, actual2.Volumes, 1)
assert.Contains(t, actual2.Volumes, VolumeStats{ assert.Contains(t, actual2.Volumes, stats.VolumeStats{
Name: "v3", Name: "v3",
FsStats: FsStats{ FsStats: stats.FsStats{
AvailableBytes: &v3available, AvailableBytes: &v3available,
UsedBytes: &v3used, UsedBytes: &v3used,
CapacityBytes: &v3capacity, CapacityBytes: &v3capacity,

View File

@ -23,6 +23,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
"k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/pkg/kubelet/leaky" "k8s.io/kubernetes/pkg/kubelet/leaky"
@ -35,7 +36,7 @@ import (
type SummaryProvider interface { type SummaryProvider interface {
// Get provides a new Summary using the latest results from cadvisor // Get provides a new Summary using the latest results from cadvisor
Get() (*Summary, error) Get() (*stats.Summary, error)
} }
type summaryProviderImpl struct { type summaryProviderImpl struct {
@ -54,7 +55,7 @@ func NewSummaryProvider(statsProvider StatsProvider, resourceAnalyzer ResourceAn
// Get implements the SummaryProvider interface // Get implements the SummaryProvider interface
// Query cadvisor for the latest resource metrics and build into a summary // Query cadvisor for the latest resource metrics and build into a summary
func (sp *summaryProviderImpl) Get() (*Summary, error) { func (sp *summaryProviderImpl) Get() (*stats.Summary, error) {
options := cadvisorapiv2.RequestOptions{ options := cadvisorapiv2.RequestOptions{
IdType: cadvisorapiv2.TypeName, IdType: cadvisorapiv2.TypeName,
Count: 2, // 2 samples are needed to compute "instantaneous" CPU Count: 2, // 2 samples are needed to compute "instantaneous" CPU
@ -95,19 +96,19 @@ type summaryBuilder struct {
} }
// build returns a Summary from aggregating the input data // build returns a Summary from aggregating the input data
func (sb *summaryBuilder) build() (*Summary, error) { func (sb *summaryBuilder) build() (*stats.Summary, error) {
rootInfo, found := sb.infos["/"] rootInfo, found := sb.infos["/"]
if !found { if !found {
return nil, fmt.Errorf("Missing stats for root container") return nil, fmt.Errorf("Missing stats for root container")
} }
rootStats := sb.containerInfoV2ToStats("", &rootInfo) rootStats := sb.containerInfoV2ToStats("", &rootInfo)
nodeStats := NodeStats{ nodeStats := stats.NodeStats{
NodeName: sb.node.Name, NodeName: sb.node.Name,
CPU: rootStats.CPU, CPU: rootStats.CPU,
Memory: rootStats.Memory, Memory: rootStats.Memory,
Network: sb.containerInfoV2ToNetworkStats(&rootInfo), Network: sb.containerInfoV2ToNetworkStats(&rootInfo),
Fs: &FsStats{ Fs: &stats.FsStats{
AvailableBytes: &sb.rootFsInfo.Available, AvailableBytes: &sb.rootFsInfo.Available,
CapacityBytes: &sb.rootFsInfo.Capacity, CapacityBytes: &sb.rootFsInfo.Capacity,
UsedBytes: &sb.rootFsInfo.Usage}, UsedBytes: &sb.rootFsInfo.Usage},
@ -115,9 +116,9 @@ func (sb *summaryBuilder) build() (*Summary, error) {
} }
systemContainers := map[string]string{ systemContainers := map[string]string{
SystemContainerKubelet: sb.nodeConfig.KubeletCgroupsName, stats.SystemContainerKubelet: sb.nodeConfig.KubeletCgroupsName,
SystemContainerRuntime: sb.nodeConfig.RuntimeCgroupsName, stats.SystemContainerRuntime: sb.nodeConfig.RuntimeCgroupsName,
SystemContainerMisc: sb.nodeConfig.SystemCgroupsName, stats.SystemContainerMisc: sb.nodeConfig.SystemCgroupsName,
} }
for sys, name := range systemContainers { for sys, name := range systemContainers {
if info, ok := sb.infos[name]; ok { if info, ok := sb.infos[name]; ok {
@ -125,7 +126,7 @@ func (sb *summaryBuilder) build() (*Summary, error) {
} }
} }
summary := Summary{ summary := stats.Summary{
Node: nodeStats, Node: nodeStats,
Pods: sb.buildSummaryPods(), Pods: sb.buildSummaryPods(),
} }
@ -135,16 +136,16 @@ func (sb *summaryBuilder) build() (*Summary, error) {
// containerInfoV2FsStats populates the container fs stats // containerInfoV2FsStats populates the container fs stats
func (sb *summaryBuilder) containerInfoV2FsStats( func (sb *summaryBuilder) containerInfoV2FsStats(
info *cadvisorapiv2.ContainerInfo, info *cadvisorapiv2.ContainerInfo,
cs *ContainerStats) { cs *stats.ContainerStats) {
// The container logs live on the node rootfs device // The container logs live on the node rootfs device
cs.Logs = &FsStats{ cs.Logs = &stats.FsStats{
AvailableBytes: &sb.rootFsInfo.Available, AvailableBytes: &sb.rootFsInfo.Available,
CapacityBytes: &sb.rootFsInfo.Capacity, CapacityBytes: &sb.rootFsInfo.Capacity,
} }
// The container rootFs lives on the imageFs devices (which may not be the node root fs) // The container rootFs lives on the imageFs devices (which may not be the node root fs)
cs.Rootfs = &FsStats{ cs.Rootfs = &stats.FsStats{
AvailableBytes: &sb.imageFsInfo.Available, AvailableBytes: &sb.imageFsInfo.Available,
CapacityBytes: &sb.imageFsInfo.Capacity, CapacityBytes: &sb.imageFsInfo.Capacity,
} }
@ -179,9 +180,9 @@ func (sb *summaryBuilder) latestContainerStats(info *cadvisorapiv2.ContainerInfo
// buildSummaryPods aggregates and returns the container stats in cinfos by the Pod managing the container. // buildSummaryPods aggregates and returns the container stats in cinfos by the Pod managing the container.
// Containers not managed by a Pod are omitted. // Containers not managed by a Pod are omitted.
func (sb *summaryBuilder) buildSummaryPods() []PodStats { func (sb *summaryBuilder) buildSummaryPods() []stats.PodStats {
// Map each container to a pod and update the PodStats with container data // Map each container to a pod and update the PodStats with container data
podToStats := map[PodReference]*PodStats{} podToStats := map[stats.PodReference]*stats.PodStats{}
for _, cinfo := range sb.infos { for _, cinfo := range sb.infos {
// Build the Pod key if this container is managed by a Pod // Build the Pod key if this container is managed by a Pod
if !sb.isPodManagedContainer(&cinfo) { if !sb.isPodManagedContainer(&cinfo) {
@ -190,42 +191,42 @@ func (sb *summaryBuilder) buildSummaryPods() []PodStats {
ref := sb.buildPodRef(&cinfo) ref := sb.buildPodRef(&cinfo)
// Lookup the PodStats for the pod using the PodRef. If none exists, initialize a new entry. // Lookup the PodStats for the pod using the PodRef. If none exists, initialize a new entry.
stats, found := podToStats[ref] podStats, found := podToStats[ref]
if !found { if !found {
stats = &PodStats{PodRef: ref} podStats = &stats.PodStats{PodRef: ref}
podToStats[ref] = stats podToStats[ref] = podStats
} }
// Update the PodStats entry with the stats from the container by adding it to stats.Containers // Update the PodStats entry with the stats from the container by adding it to stats.Containers
containerName := dockertools.GetContainerName(cinfo.Spec.Labels) containerName := dockertools.GetContainerName(cinfo.Spec.Labels)
if containerName == leaky.PodInfraContainerName { if containerName == leaky.PodInfraContainerName {
// Special case for infrastructure container which is hidden from the user and has network stats // Special case for infrastructure container which is hidden from the user and has network stats
stats.Network = sb.containerInfoV2ToNetworkStats(&cinfo) podStats.Network = sb.containerInfoV2ToNetworkStats(&cinfo)
stats.StartTime = unversioned.NewTime(cinfo.Spec.CreationTime) podStats.StartTime = unversioned.NewTime(cinfo.Spec.CreationTime)
} else { } else {
stats.Containers = append(stats.Containers, sb.containerInfoV2ToStats(containerName, &cinfo)) podStats.Containers = append(podStats.Containers, sb.containerInfoV2ToStats(containerName, &cinfo))
} }
} }
// Add each PodStats to the result // Add each PodStats to the result
result := make([]PodStats, 0, len(podToStats)) result := make([]stats.PodStats, 0, len(podToStats))
for _, stats := range podToStats { for _, podStats := range podToStats {
// Lookup the volume stats for each pod // Lookup the volume stats for each pod
podUID := types.UID(stats.PodRef.UID) podUID := types.UID(podStats.PodRef.UID)
if vstats, found := sb.resourceAnalyzer.GetPodVolumeStats(podUID); found { if vstats, found := sb.resourceAnalyzer.GetPodVolumeStats(podUID); found {
stats.VolumeStats = vstats.Volumes podStats.VolumeStats = vstats.Volumes
} }
result = append(result, *stats) result = append(result, *podStats)
} }
return result return result
} }
// buildPodRef returns a PodReference that identifies the Pod managing cinfo // buildPodRef returns a PodReference that identifies the Pod managing cinfo
func (sb *summaryBuilder) buildPodRef(cinfo *cadvisorapiv2.ContainerInfo) PodReference { func (sb *summaryBuilder) buildPodRef(cinfo *cadvisorapiv2.ContainerInfo) stats.PodReference {
podName := dockertools.GetPodName(cinfo.Spec.Labels) podName := dockertools.GetPodName(cinfo.Spec.Labels)
podNamespace := dockertools.GetPodNamespace(cinfo.Spec.Labels) podNamespace := dockertools.GetPodNamespace(cinfo.Spec.Labels)
podUID := dockertools.GetPodUID(cinfo.Spec.Labels) podUID := dockertools.GetPodUID(cinfo.Spec.Labels)
return PodReference{Name: podName, Namespace: podNamespace, UID: podUID} return stats.PodReference{Name: podName, Namespace: podNamespace, UID: podUID}
} }
// isPodManagedContainer returns true if the cinfo container is managed by a Pod // isPodManagedContainer returns true if the cinfo container is managed by a Pod
@ -243,17 +244,17 @@ func (sb *summaryBuilder) isPodManagedContainer(cinfo *cadvisorapiv2.ContainerIn
func (sb *summaryBuilder) containerInfoV2ToStats( func (sb *summaryBuilder) containerInfoV2ToStats(
name string, name string,
info *cadvisorapiv2.ContainerInfo) ContainerStats { info *cadvisorapiv2.ContainerInfo) stats.ContainerStats {
stats := ContainerStats{ cStats := stats.ContainerStats{
StartTime: unversioned.NewTime(info.Spec.CreationTime), StartTime: unversioned.NewTime(info.Spec.CreationTime),
Name: name, Name: name,
} }
cstat, found := sb.latestContainerStats(info) cstat, found := sb.latestContainerStats(info)
if !found { if !found {
return stats return cStats
} }
if info.Spec.HasCpu { if info.Spec.HasCpu {
cpuStats := CPUStats{ cpuStats := stats.CPUStats{
Time: unversioned.NewTime(cstat.Timestamp), Time: unversioned.NewTime(cstat.Timestamp),
} }
if cstat.CpuInst != nil { if cstat.CpuInst != nil {
@ -262,12 +263,12 @@ func (sb *summaryBuilder) containerInfoV2ToStats(
if cstat.Cpu != nil { if cstat.Cpu != nil {
cpuStats.UsageCoreNanoSeconds = &cstat.Cpu.Usage.Total cpuStats.UsageCoreNanoSeconds = &cstat.Cpu.Usage.Total
} }
stats.CPU = &cpuStats cStats.CPU = &cpuStats
} }
if info.Spec.HasMemory { if info.Spec.HasMemory {
pageFaults := cstat.Memory.ContainerData.Pgfault pageFaults := cstat.Memory.ContainerData.Pgfault
majorPageFaults := cstat.Memory.ContainerData.Pgmajfault majorPageFaults := cstat.Memory.ContainerData.Pgmajfault
stats.Memory = &MemoryStats{ cStats.Memory = &stats.MemoryStats{
Time: unversioned.NewTime(cstat.Timestamp), Time: unversioned.NewTime(cstat.Timestamp),
UsageBytes: &cstat.Memory.Usage, UsageBytes: &cstat.Memory.Usage,
WorkingSetBytes: &cstat.Memory.WorkingSet, WorkingSetBytes: &cstat.Memory.WorkingSet,
@ -275,12 +276,12 @@ func (sb *summaryBuilder) containerInfoV2ToStats(
MajorPageFaults: &majorPageFaults, MajorPageFaults: &majorPageFaults,
} }
} }
sb.containerInfoV2FsStats(info, &stats) sb.containerInfoV2FsStats(info, &cStats)
stats.UserDefinedMetrics = sb.containerInfoV2ToUserDefinedMetrics(info) cStats.UserDefinedMetrics = sb.containerInfoV2ToUserDefinedMetrics(info)
return stats return cStats
} }
func (sb *summaryBuilder) containerInfoV2ToNetworkStats(info *cadvisorapiv2.ContainerInfo) *NetworkStats { func (sb *summaryBuilder) containerInfoV2ToNetworkStats(info *cadvisorapiv2.ContainerInfo) *stats.NetworkStats {
if !info.Spec.HasNetwork { if !info.Spec.HasNetwork {
return nil return nil
} }
@ -301,7 +302,7 @@ func (sb *summaryBuilder) containerInfoV2ToNetworkStats(info *cadvisorapiv2.Cont
txBytes += inter.TxBytes txBytes += inter.TxBytes
txErrors += inter.TxErrors txErrors += inter.TxErrors
} }
return &NetworkStats{ return &stats.NetworkStats{
Time: unversioned.NewTime(cstat.Timestamp), Time: unversioned.NewTime(cstat.Timestamp),
RxBytes: &rxBytes, RxBytes: &rxBytes,
RxErrors: &rxErrors, RxErrors: &rxErrors,
@ -310,9 +311,9 @@ func (sb *summaryBuilder) containerInfoV2ToNetworkStats(info *cadvisorapiv2.Cont
} }
} }
func (sb *summaryBuilder) containerInfoV2ToUserDefinedMetrics(info *cadvisorapiv2.ContainerInfo) []UserDefinedMetric { func (sb *summaryBuilder) containerInfoV2ToUserDefinedMetrics(info *cadvisorapiv2.ContainerInfo) []stats.UserDefinedMetric {
type specVal struct { type specVal struct {
ref UserDefinedMetricDescriptor ref stats.UserDefinedMetricDescriptor
valType cadvisorapiv1.DataType valType cadvisorapiv1.DataType
time time.Time time time.Time
value float64 value float64
@ -320,9 +321,9 @@ func (sb *summaryBuilder) containerInfoV2ToUserDefinedMetrics(info *cadvisorapiv
udmMap := map[string]*specVal{} udmMap := map[string]*specVal{}
for _, spec := range info.Spec.CustomMetrics { for _, spec := range info.Spec.CustomMetrics {
udmMap[spec.Name] = &specVal{ udmMap[spec.Name] = &specVal{
ref: UserDefinedMetricDescriptor{ ref: stats.UserDefinedMetricDescriptor{
Name: spec.Name, Name: spec.Name,
Type: UserDefinedMetricType(spec.Type), Type: stats.UserDefinedMetricType(spec.Type),
Units: spec.Units, Units: spec.Units,
}, },
valType: spec.Format, valType: spec.Format,
@ -348,9 +349,9 @@ func (sb *summaryBuilder) containerInfoV2ToUserDefinedMetrics(info *cadvisorapiv
} }
} }
} }
var udm []UserDefinedMetric var udm []stats.UserDefinedMetric
for _, specVal := range udmMap { for _, specVal := range udmMap {
udm = append(udm, UserDefinedMetric{ udm = append(udm, stats.UserDefinedMetric{
UserDefinedMetricDescriptor: specVal.ref, UserDefinedMetricDescriptor: specVal.ref,
Time: unversioned.NewTime(specVal.time), Time: unversioned.NewTime(specVal.time),
Value: specVal.value, Value: specVal.value,

View File

@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
kubestats "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
"k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/leaky" "k8s.io/kubernetes/pkg/kubelet/leaky"
) )
@ -87,9 +88,9 @@ func TestBuildSummary(t *testing.T) {
cName20 = "c1" // ensure cName20 conflicts with cName01, but is in a different pod + namespace cName20 = "c1" // ensure cName20 conflicts with cName01, but is in a different pod + namespace
) )
prf0 := PodReference{Name: pName0, Namespace: namespace0, UID: "UID" + pName0} prf0 := kubestats.PodReference{Name: pName0, Namespace: namespace0, UID: "UID" + pName0}
prf1 := PodReference{Name: pName1, Namespace: namespace0, UID: "UID" + pName1} prf1 := kubestats.PodReference{Name: pName1, Namespace: namespace0, UID: "UID" + pName1}
prf2 := PodReference{Name: pName2, Namespace: namespace2, UID: "UID" + pName2} prf2 := kubestats.PodReference{Name: pName2, Namespace: namespace2, UID: "UID" + pName2}
infos := map[string]v2.ContainerInfo{ infos := map[string]v2.ContainerInfo{
"/": summaryTestContainerInfo(seedRoot, "", "", ""), "/": summaryTestContainerInfo(seedRoot, "", "", ""),
"/docker-daemon": summaryTestContainerInfo(seedRuntime, "", "", ""), "/docker-daemon": summaryTestContainerInfo(seedRuntime, "", "", ""),
@ -123,9 +124,9 @@ func TestBuildSummary(t *testing.T) {
checkNetworkStats(t, "Node", seedRoot, nodeStats.Network) checkNetworkStats(t, "Node", seedRoot, nodeStats.Network)
systemSeeds := map[string]int{ systemSeeds := map[string]int{
SystemContainerRuntime: seedRuntime, kubestats.SystemContainerRuntime: seedRuntime,
SystemContainerKubelet: seedKubelet, kubestats.SystemContainerKubelet: seedKubelet,
SystemContainerMisc: seedMisc, kubestats.SystemContainerMisc: seedMisc,
} }
for _, sys := range nodeStats.SystemContainers { for _, sys := range nodeStats.SystemContainers {
name := sys.Name name := sys.Name
@ -139,7 +140,7 @@ func TestBuildSummary(t *testing.T) {
} }
assert.Equal(t, 3, len(summary.Pods)) assert.Equal(t, 3, len(summary.Pods))
indexPods := make(map[PodReference]PodStats, len(summary.Pods)) indexPods := make(map[kubestats.PodReference]kubestats.PodStats, len(summary.Pods))
for _, pod := range summary.Pods { for _, pod := range summary.Pods {
indexPods[pod.PodRef] = pod indexPods[pod.PodRef] = pod
} }
@ -148,7 +149,7 @@ func TestBuildSummary(t *testing.T) {
ps, found := indexPods[prf0] ps, found := indexPods[prf0]
assert.True(t, found) assert.True(t, found)
assert.Len(t, ps.Containers, 2) assert.Len(t, ps.Containers, 2)
indexCon := make(map[string]ContainerStats, len(ps.Containers)) indexCon := make(map[string]kubestats.ContainerStats, len(ps.Containers))
for _, con := range ps.Containers { for _, con := range ps.Containers {
indexCon[con.Name] = con indexCon[con.Name] = con
} }
@ -284,7 +285,7 @@ func testTime(base time.Time, seed int) time.Time {
return base.Add(time.Duration(seed) * time.Second) return base.Add(time.Duration(seed) * time.Second)
} }
func checkNetworkStats(t *testing.T, label string, seed int, stats *NetworkStats) { func checkNetworkStats(t *testing.T, label string, seed int, stats *kubestats.NetworkStats) {
assert.EqualValues(t, testTime(timestamp, seed).Unix(), stats.Time.Time.Unix(), label+".Net.Time") assert.EqualValues(t, testTime(timestamp, seed).Unix(), stats.Time.Time.Unix(), label+".Net.Time")
assert.EqualValues(t, seed+offsetNetRxBytes, *stats.RxBytes, label+".Net.RxBytes") assert.EqualValues(t, seed+offsetNetRxBytes, *stats.RxBytes, label+".Net.RxBytes")
assert.EqualValues(t, seed+offsetNetRxErrors, *stats.RxErrors, label+".Net.RxErrors") assert.EqualValues(t, seed+offsetNetRxErrors, *stats.RxErrors, label+".Net.RxErrors")
@ -292,13 +293,13 @@ func checkNetworkStats(t *testing.T, label string, seed int, stats *NetworkStats
assert.EqualValues(t, seed+offsetNetTxErrors, *stats.TxErrors, label+".Net.TxErrors") assert.EqualValues(t, seed+offsetNetTxErrors, *stats.TxErrors, label+".Net.TxErrors")
} }
func checkCPUStats(t *testing.T, label string, seed int, stats *CPUStats) { func checkCPUStats(t *testing.T, label string, seed int, stats *kubestats.CPUStats) {
assert.EqualValues(t, testTime(timestamp, seed).Unix(), stats.Time.Time.Unix(), label+".CPU.Time") assert.EqualValues(t, testTime(timestamp, seed).Unix(), stats.Time.Time.Unix(), label+".CPU.Time")
assert.EqualValues(t, seed+offsetCPUUsageCores, *stats.UsageNanoCores, label+".CPU.UsageCores") assert.EqualValues(t, seed+offsetCPUUsageCores, *stats.UsageNanoCores, label+".CPU.UsageCores")
assert.EqualValues(t, seed+offsetCPUUsageCoreSeconds, *stats.UsageCoreNanoSeconds, label+".CPU.UsageCoreSeconds") assert.EqualValues(t, seed+offsetCPUUsageCoreSeconds, *stats.UsageCoreNanoSeconds, label+".CPU.UsageCoreSeconds")
} }
func checkMemoryStats(t *testing.T, label string, seed int, stats *MemoryStats) { func checkMemoryStats(t *testing.T, label string, seed int, stats *kubestats.MemoryStats) {
assert.EqualValues(t, testTime(timestamp, seed).Unix(), stats.Time.Time.Unix(), label+".Mem.Time") assert.EqualValues(t, testTime(timestamp, seed).Unix(), stats.Time.Time.Unix(), label+".Mem.Time")
assert.EqualValues(t, seed+offsetMemUsageBytes, *stats.UsageBytes, label+".Mem.UsageBytes") assert.EqualValues(t, seed+offsetMemUsageBytes, *stats.UsageBytes, label+".Mem.UsageBytes")
assert.EqualValues(t, seed+offsetMemWorkingSetBytes, *stats.WorkingSetBytes, label+".Mem.WorkingSetBytes") assert.EqualValues(t, seed+offsetMemWorkingSetBytes, *stats.WorkingSetBytes, label+".Mem.WorkingSetBytes")
@ -357,19 +358,19 @@ func TestCustomMetrics(t *testing.T) {
} }
sb := &summaryBuilder{} sb := &summaryBuilder{}
assert.Contains(t, sb.containerInfoV2ToUserDefinedMetrics(&cInfo), assert.Contains(t, sb.containerInfoV2ToUserDefinedMetrics(&cInfo),
UserDefinedMetric{ kubestats.UserDefinedMetric{
UserDefinedMetricDescriptor: UserDefinedMetricDescriptor{ UserDefinedMetricDescriptor: kubestats.UserDefinedMetricDescriptor{
Name: "qos", Name: "qos",
Type: MetricGauge, Type: kubestats.MetricGauge,
Units: "per second", Units: "per second",
}, },
Time: unversioned.NewTime(timestamp2), Time: unversioned.NewTime(timestamp2),
Value: 100, Value: 100,
}, },
UserDefinedMetric{ kubestats.UserDefinedMetric{
UserDefinedMetricDescriptor: UserDefinedMetricDescriptor{ UserDefinedMetricDescriptor: kubestats.UserDefinedMetricDescriptor{
Name: "cpuLoad", Name: "cpuLoad",
Type: MetricCumulative, Type: kubestats.MetricCumulative,
Units: "count", Units: "count",
}, },
Time: unversioned.NewTime(timestamp2), Time: unversioned.NewTime(timestamp2),

View File

@ -27,7 +27,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/kubelet/server/stats" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"