diff --git a/cluster/addons/dns/coredns/coredns.yaml.base b/cluster/addons/dns/coredns/coredns.yaml.base index b6774713e01..e190fadbbf1 100644 --- a/cluster/addons/dns/coredns/coredns.yaml.base +++ b/cluster/addons/dns/coredns/coredns.yaml.base @@ -112,7 +112,7 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - memory: 170Mi + memory: 1000Mi requests: cpu: 100m memory: 70Mi diff --git a/cluster/addons/dns/coredns/coredns.yaml.in b/cluster/addons/dns/coredns/coredns.yaml.in index 7beb3769c3d..355dac35c9e 100644 --- a/cluster/addons/dns/coredns/coredns.yaml.in +++ b/cluster/addons/dns/coredns/coredns.yaml.in @@ -112,7 +112,7 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - memory: 170Mi + memory: 1000Mi requests: cpu: 100m memory: 70Mi diff --git a/cluster/addons/dns/coredns/coredns.yaml.sed b/cluster/addons/dns/coredns/coredns.yaml.sed index 4f0dc10a10e..eefecf83055 100644 --- a/cluster/addons/dns/coredns/coredns.yaml.sed +++ b/cluster/addons/dns/coredns/coredns.yaml.sed @@ -112,7 +112,7 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - memory: 170Mi + memory: 1000Mi requests: cpu: 100m memory: 70Mi diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index 867fc1c86e0..ebb9b9d23c1 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -259,7 +259,7 @@ fi # Optional: Install cluster DNS. # Set CLUSTER_DNS_CORE_DNS to 'true' to install CoreDNS instead of kube-dns. -CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-false}" +CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-true}" ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" DNS_SERVER_IP="${KUBE_DNS_SERVER_IP:-10.0.0.10}" DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}" diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 4766afa5701..b2c6f980461 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -266,7 +266,7 @@ fi # Optional: Install cluster DNS. # Set CLUSTER_DNS_CORE_DNS to 'true' to install CoreDNS instead of kube-dns. -CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-false}" +CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-true}" ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" DNS_SERVER_IP="10.0.0.10" DNS_DOMAIN="cluster.local" diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 0b8f64cf9d8..519c6f97ef6 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -251,9 +251,18 @@ func (f *Framework) BeforeEach() { if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" { var err error + var nodeMode NodesSet + switch TestContext.GatherKubeSystemResourceUsageData { + case "master": + nodeMode = MasterAndDNSNodes + case "masteranddns": + nodeMode = MasterAndDNSNodes + default: + nodeMode = AllNodes + } f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{ - InKubemark: ProviderIs("kubemark"), - MasterOnly: TestContext.GatherKubeSystemResourceUsageData == "master", + InKubemark: ProviderIs("kubemark"), + Nodes: nodeMode, ResourceDataGatheringPeriod: 60 * time.Second, ProbeDuration: 15 * time.Second, PrintVerboseLogs: false, diff --git a/test/e2e/framework/resource_usage_gatherer.go b/test/e2e/framework/resource_usage_gatherer.go index 36797add0bc..d5711e6210e 100644 --- a/test/e2e/framework/resource_usage_gatherer.go +++ b/test/e2e/framework/resource_usage_gatherer.go @@ -202,12 +202,20 @@ type ContainerResourceGatherer struct { type ResourceGathererOptions struct { InKubemark bool - MasterOnly bool + Nodes NodesSet ResourceDataGatheringPeriod time.Duration ProbeDuration time.Duration PrintVerboseLogs bool } +type NodesSet int + +const ( + AllNodes NodesSet = 0 // All containers on all nodes + MasterNodes NodesSet = 1 // All containers on Master nodes only + MasterAndDNSNodes NodesSet = 2 // All containers on Master nodes and DNS containers on other nodes +) + func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) { g := ContainerResourceGatherer{ client: c, @@ -237,13 +245,23 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt return nil, err } } + dnsNodes := make(map[string]bool) for _, pod := range pods.Items { + if (options.Nodes == MasterNodes) && !system.IsMasterNode(pod.Spec.NodeName) { + continue + } + if (options.Nodes == MasterAndDNSNodes) && !system.IsMasterNode(pod.Spec.NodeName) && pod.Labels["k8s-app"] != "kube-dns" { + continue + } for _, container := range pod.Status.InitContainerStatuses { g.containerIDs = append(g.containerIDs, container.Name) } for _, container := range pod.Status.ContainerStatuses { g.containerIDs = append(g.containerIDs, container.Name) } + if options.Nodes == MasterAndDNSNodes { + dnsNodes[pod.Spec.NodeName] = true + } } nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { @@ -252,7 +270,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt } for _, node := range nodeList.Items { - if !options.MasterOnly || system.IsMasterNode(node.Name) { + if options.Nodes == AllNodes || system.IsMasterNode(node.Name) || dnsNodes[node.Name] { g.workerWg.Add(1) g.workers = append(g.workers, resourceGatherWorker{ c: c, @@ -266,7 +284,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt probeDuration: options.ProbeDuration, printVerboseLogs: options.PrintVerboseLogs, }) - if options.MasterOnly { + if options.Nodes == MasterNodes { break } } diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index 4c01a633af6..6b215cbfe1e 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -137,7 +137,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra var rsgather *framework.ContainerResourceGatherer if setupResourceGatherer { framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.") - rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, MasterOnly: false, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods) + rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, Nodes: framework.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods) framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods") go rsgather.StartGatheringData() }