Merge pull request #121422 from borg-land/fix-dns-autoscaling-test

Fix dns autoscaling test to run with coredns
This commit is contained in:
Kubernetes Prow Robot 2023-12-13 21:26:36 +01:00 committed by GitHub
commit a4cee65e16
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -38,12 +38,15 @@ import (
"github.com/onsi/ginkgo/v2"
)
// This test requires coredns to be installed on the cluster with autoscaling enabled.
// Compare your coredns manifest against the command below
// helm template coredns -n kube-system coredns/coredns --set k8sAppLabelOverride=kube-dns --set fullnameOverride=coredns --set autoscaler.enabled=true
// Constants used in dns-autoscaling test.
const (
DNSdefaultTimeout = 5 * time.Minute
ClusterAddonLabelKey = "k8s-app"
DNSLabelName = "kube-dns"
DNSAutoscalerLabelName = "kube-dns-autoscaler"
DNSdefaultTimeout = 5 * time.Minute
ClusterAddonLabelKey = "k8s-app"
DNSLabelName = "kube-dns"
)
var _ = SIGDescribe("DNS horizontal autoscaling", func() {
@ -51,6 +54,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
var c clientset.Interface
var previousParams map[string]string
var configMapNames map[string]string
var originDNSReplicasCount int
var DNSParams1 DNSParamsLinear
var DNSParams2 DNSParamsLinear
@ -65,10 +69,21 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
nodeCount := len(nodes.Items)
ginkgo.By("Collecting original replicas count and DNS scaling params")
originDNSReplicasCount, err = getDNSReplicas(ctx, c)
// Check if we are running coredns or kube-dns, the only difference is the name of the autoscaling CM.
// The test should be have identically on both dns providers
provider, err := detectDNSProvider(ctx, c)
framework.ExpectNoError(err)
pcm, err := fetchDNSScalingConfigMap(ctx, c)
originDNSReplicasCount, err = getDNSReplicas(ctx, c)
framework.ExpectNoError(err)
configMapNames = map[string]string{
"kube-dns": "kube-dns-autoscaler",
"coredns": "coredns-autoscaler",
}
pcm, err := fetchDNSScalingConfigMap(ctx, c, configMapNames[provider])
framework.Logf("original DNS scaling params: %v", pcm)
framework.ExpectNoError(err)
previousParams = pcm.Data
@ -109,12 +124,19 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
numNodes, err := e2enode.TotalRegistered(ctx, c)
framework.ExpectNoError(err)
configMapNames = map[string]string{
"kube-dns": "kube-dns-autoscaler",
"coredns": "coredns-autoscaler",
}
provider, err := detectDNSProvider(ctx, c)
framework.ExpectNoError(err)
ginkgo.By("Replace the dns autoscaling parameters with testing parameters")
err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams1)))
err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(configMapNames[provider], packLinearParams(&DNSParams1)))
framework.ExpectNoError(err)
defer func() {
ginkgo.By("Restoring initial dns autoscaling parameters")
err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(previousParams))
err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(configMapNames[provider], previousParams))
framework.ExpectNoError(err)
ginkgo.By("Wait for number of running and ready kube-dns pods recover")
@ -151,7 +173,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
framework.ExpectNoError(err)
ginkgo.By("Replace the dns autoscaling parameters with another testing parameters")
err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams3)))
err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(configMapNames[provider], packLinearParams(&DNSParams3)))
framework.ExpectNoError(err)
ginkgo.By("Wait for kube-dns scaled to expected number")
@ -171,12 +193,21 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
ginkgo.It("kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func(ctx context.Context) {
configMapNames = map[string]string{
"kube-dns": "kube-dns-autoscaler",
"coredns": "coredns-autoscaler",
}
provider, err := detectDNSProvider(ctx, c)
framework.ExpectNoError(err)
ginkgo.By("Replace the dns autoscaling parameters with testing parameters")
err := updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams1)))
cm := packDNSScalingConfigMap(configMapNames[provider], packLinearParams(&DNSParams1))
framework.Logf("Updating the following cm: %v", cm)
err = updateDNSScalingConfigMap(ctx, c, cm)
framework.ExpectNoError(err)
defer func() {
ginkgo.By("Restoring initial dns autoscaling parameters")
err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(previousParams))
err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(configMapNames[provider], previousParams))
framework.ExpectNoError(err)
}()
ginkgo.By("Wait for kube-dns scaled to expected number")
@ -186,7 +217,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
ginkgo.By("--- Scenario: should scale kube-dns based on changed parameters ---")
ginkgo.By("Replace the dns autoscaling parameters with another testing parameters")
err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams3)))
err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(configMapNames[provider], packLinearParams(&DNSParams3)))
framework.ExpectNoError(err)
ginkgo.By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(ctx, c, &DNSParams3)
@ -195,30 +226,30 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
ginkgo.By("--- Scenario: should re-create scaling parameters with default value when parameters got deleted ---")
ginkgo.By("Delete the ConfigMap for autoscaler")
err = deleteDNSScalingConfigMap(ctx, c)
err = deleteDNSScalingConfigMap(ctx, c, configMapNames[provider])
framework.ExpectNoError(err)
ginkgo.By("Wait for the ConfigMap got re-created")
_, err = waitForDNSConfigMapCreated(ctx, c, DNSdefaultTimeout)
_, err = waitForDNSConfigMapCreated(ctx, c, DNSdefaultTimeout, configMapNames[provider])
framework.ExpectNoError(err)
ginkgo.By("Replace the dns autoscaling parameters with another testing parameters")
err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams2)))
err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(configMapNames[provider], packLinearParams(&DNSParams2)))
framework.ExpectNoError(err)
ginkgo.By("Wait for kube-dns scaled to expected number")
ginkgo.By("Wait for kube-dns/coredns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(ctx, c, &DNSParams2)
err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout)
framework.ExpectNoError(err)
ginkgo.By("--- Scenario: should recover after autoscaler pod got deleted ---")
ginkgo.By("Delete the autoscaler pod for kube-dns")
ginkgo.By("Delete the autoscaler pod for kube-dns/coredns")
err = deleteDNSAutoscalerPod(ctx, c)
framework.ExpectNoError(err)
ginkgo.By("Replace the dns autoscaling parameters with another testing parameters")
err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams1)))
err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(configMapNames[provider], packLinearParams(&DNSParams1)))
framework.ExpectNoError(err)
ginkgo.By("Wait for kube-dns scaled to expected number")
ginkgo.By("Wait for kube-dns/coredns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(ctx, c, &DNSParams1)
err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout)
framework.ExpectNoError(err)
@ -261,16 +292,30 @@ func getSchedulableCores(nodes []v1.Node) int64 {
return sc.Value()
}
func fetchDNSScalingConfigMap(ctx context.Context, c clientset.Interface) (*v1.ConfigMap, error) {
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, DNSAutoscalerLabelName, metav1.GetOptions{})
func detectDNSProvider(ctx context.Context, c clientset.Interface) (string, error) {
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, "coredns-autoscaler", metav1.GetOptions{})
if cm != nil && err == nil {
return "coredns", nil
}
cm, err = c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, "kube-dns-autoscaler", metav1.GetOptions{})
if cm != nil && err == nil {
return "kube-dns", nil
}
return "", fmt.Errorf("the cluster doesn't have kube-dns or coredns autoscaling configured")
}
func fetchDNSScalingConfigMap(ctx context.Context, c clientset.Interface, configMapName string) (*v1.ConfigMap, error) {
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, configMapName, metav1.GetOptions{})
if err != nil {
return nil, err
}
return cm, nil
}
func deleteDNSScalingConfigMap(ctx context.Context, c clientset.Interface) error {
if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(ctx, DNSAutoscalerLabelName, metav1.DeleteOptions{}); err != nil {
func deleteDNSScalingConfigMap(ctx context.Context, c clientset.Interface, configMapName string) error {
if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(ctx, configMapName, metav1.DeleteOptions{}); err != nil {
return err
}
framework.Logf("DNS autoscaling ConfigMap deleted.")
@ -287,9 +332,9 @@ func packLinearParams(params *DNSParamsLinear) map[string]string {
return paramsMap
}
func packDNSScalingConfigMap(params map[string]string) *v1.ConfigMap {
func packDNSScalingConfigMap(configMapName string, params map[string]string) *v1.ConfigMap {
configMap := v1.ConfigMap{}
configMap.ObjectMeta.Name = DNSAutoscalerLabelName
configMap.ObjectMeta.Name = configMapName
configMap.ObjectMeta.Namespace = metav1.NamespaceSystem
configMap.Data = params
return &configMap
@ -320,8 +365,8 @@ func getDNSReplicas(ctx context.Context, c clientset.Interface) (int, error) {
}
func deleteDNSAutoscalerPod(ctx context.Context, c clientset.Interface) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSAutoscalerLabelName}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
selector, _ := labels.Parse(fmt.Sprintf("%s in (kube-dns-autoscaler, coredns-autoscaler)", ClusterAddonLabelKey))
listOpts := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(ctx, listOpts)
if err != nil {
return err
@ -342,7 +387,7 @@ func waitForDNSReplicasSatisfied(ctx context.Context, c clientset.Interface, get
var current int
var expected int
framework.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout)
condition := func() (bool, error) {
condition := func(ctx context.Context) (bool, error) {
current, err = getDNSReplicas(ctx, c)
if err != nil {
return false, err
@ -355,24 +400,24 @@ func waitForDNSReplicasSatisfied(ctx context.Context, c clientset.Interface, get
return true, nil
}
if err = wait.Poll(2*time.Second, timeout, condition); err != nil {
if err = wait.PollUntilContextTimeout(ctx, 2*time.Second, timeout, false, condition); err != nil {
return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %w", expected, current, err)
}
framework.Logf("kube-dns reaches expected replicas: %v", expected)
return nil
}
func waitForDNSConfigMapCreated(ctx context.Context, c clientset.Interface, timeout time.Duration) (configMap *v1.ConfigMap, err error) {
framework.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout)
condition := func() (bool, error) {
configMap, err = fetchDNSScalingConfigMap(ctx, c)
func waitForDNSConfigMapCreated(ctx context.Context, c clientset.Interface, timeout time.Duration, configMapName string) (configMap *v1.ConfigMap, err error) {
framework.Logf("Waiting up to %v for DNS autoscaling ConfigMap to be re-created", timeout)
condition := func(ctx context.Context) (bool, error) {
configMap, err = fetchDNSScalingConfigMap(ctx, c, configMapName)
if err != nil {
return false, nil
}
return true, nil
}
if err = wait.Poll(time.Second, timeout, condition); err != nil {
if err = wait.PollUntilContextTimeout(ctx, time.Second, timeout, false, condition); err != nil {
return nil, fmt.Errorf("err waiting for DNS autoscaling ConfigMap got re-created: %w", err)
}
return configMap, nil