mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #68683 from chrisohaver/trackdns
Add DNS pod resource monitoring option
This commit is contained in:
commit
ab02fd6f8a
@ -112,7 +112,7 @@ spec:
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
memory: 170Mi
|
||||
memory: 1000Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
|
@ -112,7 +112,7 @@ spec:
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
memory: 170Mi
|
||||
memory: 1000Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
|
@ -112,7 +112,7 @@ spec:
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
memory: 170Mi
|
||||
memory: 1000Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
|
@ -259,7 +259,7 @@ fi
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
# Set CLUSTER_DNS_CORE_DNS to 'true' to install CoreDNS instead of kube-dns.
|
||||
CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-false}"
|
||||
CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-true}"
|
||||
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
||||
DNS_SERVER_IP="${KUBE_DNS_SERVER_IP:-10.0.0.10}"
|
||||
DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}"
|
||||
|
@ -266,7 +266,7 @@ fi
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
# Set CLUSTER_DNS_CORE_DNS to 'true' to install CoreDNS instead of kube-dns.
|
||||
CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-false}"
|
||||
CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-true}"
|
||||
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
||||
DNS_SERVER_IP="10.0.0.10"
|
||||
DNS_DOMAIN="cluster.local"
|
||||
|
@ -251,9 +251,18 @@ func (f *Framework) BeforeEach() {
|
||||
|
||||
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" {
|
||||
var err error
|
||||
var nodeMode NodesSet
|
||||
switch TestContext.GatherKubeSystemResourceUsageData {
|
||||
case "master":
|
||||
nodeMode = MasterAndDNSNodes
|
||||
case "masteranddns":
|
||||
nodeMode = MasterAndDNSNodes
|
||||
default:
|
||||
nodeMode = AllNodes
|
||||
}
|
||||
f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{
|
||||
InKubemark: ProviderIs("kubemark"),
|
||||
MasterOnly: TestContext.GatherKubeSystemResourceUsageData == "master",
|
||||
InKubemark: ProviderIs("kubemark"),
|
||||
Nodes: nodeMode,
|
||||
ResourceDataGatheringPeriod: 60 * time.Second,
|
||||
ProbeDuration: 15 * time.Second,
|
||||
PrintVerboseLogs: false,
|
||||
|
@ -202,12 +202,20 @@ type ContainerResourceGatherer struct {
|
||||
|
||||
type ResourceGathererOptions struct {
|
||||
InKubemark bool
|
||||
MasterOnly bool
|
||||
Nodes NodesSet
|
||||
ResourceDataGatheringPeriod time.Duration
|
||||
ProbeDuration time.Duration
|
||||
PrintVerboseLogs bool
|
||||
}
|
||||
|
||||
type NodesSet int
|
||||
|
||||
const (
|
||||
AllNodes NodesSet = 0 // All containers on all nodes
|
||||
MasterNodes NodesSet = 1 // All containers on Master nodes only
|
||||
MasterAndDNSNodes NodesSet = 2 // All containers on Master nodes and DNS containers on other nodes
|
||||
)
|
||||
|
||||
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) {
|
||||
g := ContainerResourceGatherer{
|
||||
client: c,
|
||||
@ -237,13 +245,23 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
dnsNodes := make(map[string]bool)
|
||||
for _, pod := range pods.Items {
|
||||
if (options.Nodes == MasterNodes) && !system.IsMasterNode(pod.Spec.NodeName) {
|
||||
continue
|
||||
}
|
||||
if (options.Nodes == MasterAndDNSNodes) && !system.IsMasterNode(pod.Spec.NodeName) && pod.Labels["k8s-app"] != "kube-dns" {
|
||||
continue
|
||||
}
|
||||
for _, container := range pod.Status.InitContainerStatuses {
|
||||
g.containerIDs = append(g.containerIDs, container.Name)
|
||||
}
|
||||
for _, container := range pod.Status.ContainerStatuses {
|
||||
g.containerIDs = append(g.containerIDs, container.Name)
|
||||
}
|
||||
if options.Nodes == MasterAndDNSNodes {
|
||||
dnsNodes[pod.Spec.NodeName] = true
|
||||
}
|
||||
}
|
||||
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -252,7 +270,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
||||
}
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
if !options.MasterOnly || system.IsMasterNode(node.Name) {
|
||||
if options.Nodes == AllNodes || system.IsMasterNode(node.Name) || dnsNodes[node.Name] {
|
||||
g.workerWg.Add(1)
|
||||
g.workers = append(g.workers, resourceGatherWorker{
|
||||
c: c,
|
||||
@ -266,7 +284,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
||||
probeDuration: options.ProbeDuration,
|
||||
printVerboseLogs: options.PrintVerboseLogs,
|
||||
})
|
||||
if options.MasterOnly {
|
||||
if options.Nodes == MasterNodes {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -137,7 +137,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
|
||||
var rsgather *framework.ContainerResourceGatherer
|
||||
if setupResourceGatherer {
|
||||
framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
|
||||
rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, MasterOnly: false, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods)
|
||||
rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, Nodes: framework.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods)
|
||||
framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods")
|
||||
go rsgather.StartGatheringData()
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user