diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 0b8f64cf9d8..839eefa7a5d 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -251,9 +251,19 @@ func (f *Framework) BeforeEach() { if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" { var err error + var nodeMode NodesSet + switch TestContext.GatherKubeSystemResourceUsageData { + case "master": + nodeMode = MasterNodes + case "masteranddns": + nodeMode = MasterAndDNSNodes + default: + nodeMode = AllNodes + } + f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{ - InKubemark: ProviderIs("kubemark"), - MasterOnly: TestContext.GatherKubeSystemResourceUsageData == "master", + InKubemark: ProviderIs("kubemark"), + Nodes: nodeMode, ResourceDataGatheringPeriod: 60 * time.Second, ProbeDuration: 15 * time.Second, PrintVerboseLogs: false, diff --git a/test/e2e/framework/resource_usage_gatherer.go b/test/e2e/framework/resource_usage_gatherer.go index 36797add0bc..d5711e6210e 100644 --- a/test/e2e/framework/resource_usage_gatherer.go +++ b/test/e2e/framework/resource_usage_gatherer.go @@ -202,12 +202,20 @@ type ContainerResourceGatherer struct { type ResourceGathererOptions struct { InKubemark bool - MasterOnly bool + Nodes NodesSet ResourceDataGatheringPeriod time.Duration ProbeDuration time.Duration PrintVerboseLogs bool } +type NodesSet int + +const ( + AllNodes NodesSet = 0 // All containers on all nodes + MasterNodes NodesSet = 1 // All containers on Master nodes only + MasterAndDNSNodes NodesSet = 2 // All containers on Master nodes and DNS containers on other nodes +) + func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) { g := ContainerResourceGatherer{ client: c, @@ -237,13 +245,23 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt return nil, err } } + dnsNodes := make(map[string]bool) for _, pod := range pods.Items { + if (options.Nodes == MasterNodes) && !system.IsMasterNode(pod.Spec.NodeName) { + continue + } + if (options.Nodes == MasterAndDNSNodes) && !system.IsMasterNode(pod.Spec.NodeName) && pod.Labels["k8s-app"] != "kube-dns" { + continue + } for _, container := range pod.Status.InitContainerStatuses { g.containerIDs = append(g.containerIDs, container.Name) } for _, container := range pod.Status.ContainerStatuses { g.containerIDs = append(g.containerIDs, container.Name) } + if options.Nodes == MasterAndDNSNodes { + dnsNodes[pod.Spec.NodeName] = true + } } nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { @@ -252,7 +270,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt } for _, node := range nodeList.Items { - if !options.MasterOnly || system.IsMasterNode(node.Name) { + if options.Nodes == AllNodes || system.IsMasterNode(node.Name) || dnsNodes[node.Name] { g.workerWg.Add(1) g.workers = append(g.workers, resourceGatherWorker{ c: c, @@ -266,7 +284,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt probeDuration: options.ProbeDuration, printVerboseLogs: options.PrintVerboseLogs, }) - if options.MasterOnly { + if options.Nodes == MasterNodes { break } } diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index 4bceb42b77f..8dc0a3873dd 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -137,7 +137,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra var rsgather *framework.ContainerResourceGatherer if setupResourceGatherer { framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.") - rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, MasterOnly: false, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods) + rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, Nodes: framework.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods) framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods") go rsgather.StartGatheringData() }