diff --git a/cluster/addons/dns/coredns/coredns.yaml.base b/cluster/addons/dns/coredns/coredns.yaml.base index e190fadbbf1..b6774713e01 100644 --- a/cluster/addons/dns/coredns/coredns.yaml.base +++ b/cluster/addons/dns/coredns/coredns.yaml.base @@ -112,7 +112,7 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - memory: 1000Mi + memory: 170Mi requests: cpu: 100m memory: 70Mi diff --git a/cluster/addons/dns/coredns/coredns.yaml.in b/cluster/addons/dns/coredns/coredns.yaml.in index 355dac35c9e..7beb3769c3d 100644 --- a/cluster/addons/dns/coredns/coredns.yaml.in +++ b/cluster/addons/dns/coredns/coredns.yaml.in @@ -112,7 +112,7 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - memory: 1000Mi + memory: 170Mi requests: cpu: 100m memory: 70Mi diff --git a/cluster/addons/dns/coredns/coredns.yaml.sed b/cluster/addons/dns/coredns/coredns.yaml.sed index eefecf83055..4f0dc10a10e 100644 --- a/cluster/addons/dns/coredns/coredns.yaml.sed +++ b/cluster/addons/dns/coredns/coredns.yaml.sed @@ -112,7 +112,7 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - memory: 1000Mi + memory: 170Mi requests: cpu: 100m memory: 70Mi diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index ebb9b9d23c1..867fc1c86e0 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -259,7 +259,7 @@ fi # Optional: Install cluster DNS. # Set CLUSTER_DNS_CORE_DNS to 'true' to install CoreDNS instead of kube-dns. -CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-true}" +CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-false}" ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" DNS_SERVER_IP="${KUBE_DNS_SERVER_IP:-10.0.0.10}" DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}" diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index b2c6f980461..4766afa5701 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -266,7 +266,7 @@ fi # Optional: Install cluster DNS. # Set CLUSTER_DNS_CORE_DNS to 'true' to install CoreDNS instead of kube-dns. -CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-true}" +CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-false}" ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" DNS_SERVER_IP="10.0.0.10" DNS_DOMAIN="cluster.local" diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 519c6f97ef6..0b8f64cf9d8 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -251,18 +251,9 @@ func (f *Framework) BeforeEach() { if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" { var err error - var nodeMode NodesSet - switch TestContext.GatherKubeSystemResourceUsageData { - case "master": - nodeMode = MasterAndDNSNodes - case "masteranddns": - nodeMode = MasterAndDNSNodes - default: - nodeMode = AllNodes - } f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{ - InKubemark: ProviderIs("kubemark"), - Nodes: nodeMode, + InKubemark: ProviderIs("kubemark"), + MasterOnly: TestContext.GatherKubeSystemResourceUsageData == "master", ResourceDataGatheringPeriod: 60 * time.Second, ProbeDuration: 15 * time.Second, PrintVerboseLogs: false, diff --git a/test/e2e/framework/resource_usage_gatherer.go b/test/e2e/framework/resource_usage_gatherer.go index d5711e6210e..36797add0bc 100644 --- a/test/e2e/framework/resource_usage_gatherer.go +++ b/test/e2e/framework/resource_usage_gatherer.go @@ -202,20 +202,12 @@ type ContainerResourceGatherer struct { type ResourceGathererOptions struct { InKubemark bool - Nodes NodesSet + MasterOnly bool ResourceDataGatheringPeriod time.Duration ProbeDuration time.Duration PrintVerboseLogs bool } -type NodesSet int - -const ( - AllNodes NodesSet = 0 // All containers on all nodes - MasterNodes NodesSet = 1 // All containers on Master nodes only - MasterAndDNSNodes NodesSet = 2 // All containers on Master nodes and DNS containers on other nodes -) - func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) { g := ContainerResourceGatherer{ client: c, @@ -245,23 +237,13 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt return nil, err } } - dnsNodes := make(map[string]bool) for _, pod := range pods.Items { - if (options.Nodes == MasterNodes) && !system.IsMasterNode(pod.Spec.NodeName) { - continue - } - if (options.Nodes == MasterAndDNSNodes) && !system.IsMasterNode(pod.Spec.NodeName) && pod.Labels["k8s-app"] != "kube-dns" { - continue - } for _, container := range pod.Status.InitContainerStatuses { g.containerIDs = append(g.containerIDs, container.Name) } for _, container := range pod.Status.ContainerStatuses { g.containerIDs = append(g.containerIDs, container.Name) } - if options.Nodes == MasterAndDNSNodes { - dnsNodes[pod.Spec.NodeName] = true - } } nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { @@ -270,7 +252,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt } for _, node := range nodeList.Items { - if options.Nodes == AllNodes || system.IsMasterNode(node.Name) || dnsNodes[node.Name] { + if !options.MasterOnly || system.IsMasterNode(node.Name) { g.workerWg.Add(1) g.workers = append(g.workers, resourceGatherWorker{ c: c, @@ -284,7 +266,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt probeDuration: options.ProbeDuration, printVerboseLogs: options.PrintVerboseLogs, }) - if options.Nodes == MasterNodes { + if options.MasterOnly { break } } diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index 6b215cbfe1e..4c01a633af6 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -137,7 +137,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra var rsgather *framework.ContainerResourceGatherer if setupResourceGatherer { framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.") - rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, Nodes: framework.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods) + rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, MasterOnly: false, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods) framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods") go rsgather.StartGatheringData() }