Fixes dns autoscaling test flakes

This commit is contained in:
Zihong Zheng
2016-11-10 11:14:54 -08:00
parent 545f749a0d
commit b77484987b

View File

@@ -23,6 +23,7 @@ import (
"strings"
"time"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels"
@@ -35,8 +36,6 @@ import (
const (
DNSdefaultTimeout = 5 * time.Minute
DNSscaleUpTimeout = 5 * time.Minute
DNSscaleDownTimeout = 10 * time.Minute
DNSNamespace = "kube-system"
ClusterAddonLabelKey = "k8s-app"
KubeDNSLabelName = "kube-dns"
@@ -46,33 +45,31 @@ const (
var _ = framework.KubeDescribe("DNS horizontal autoscaling", func() {
f := framework.NewDefaultFramework("dns-autoscaling")
var c clientset.Interface
var nodeCount int
var previousParams map[string]string
DNSParams_1 := map[string]string{"linear": "{\"nodesPerReplica\": 1}"}
DNSParams_2 := map[string]string{"linear": "{\"nodesPerReplica\": 2}"}
DNSParams_3 := map[string]string{"linear": "{\"nodesPerReplica\": 3}"}
DNSParams_1 := DNSParamsLinear{map[string]string{"linear": "{\"nodesPerReplica\": 1}"}, 1.0, 0.0}
DNSParams_2 := DNSParamsLinear{map[string]string{"linear": "{\"nodesPerReplica\": 2}"}, 2.0, 0.0}
DNSParams_3 := DNSParamsLinear{map[string]string{"linear": "{\"nodesPerReplica\": 3, \"coresPerReplica\": 3}"}, 3.0, 3.0}
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce")
c = f.ClientSet
nodes := framework.GetReadySchedulableNodesOrDie(c)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
Expect(len(framework.GetReadySchedulableNodesOrDie(c).Items)).NotTo(BeZero())
pcm, err := fetchDNSScalingConfigMap(c)
ExpectNoError(err)
Expect(err).NotTo(HaveOccurred())
previousParams = pcm.Data
By("Replace the dns autoscaling parameters with testing parameters")
ExpectNoError(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(DNSParams_1)))
Expect(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(DNSParams_1.data))).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
ExpectNoError(waitForDNSReplicasSatisfied(c, nodeCount, DNSdefaultTimeout))
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams_1)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
})
AfterEach(func() {
By("Restoring intial dns autoscaling parameters")
ExpectNoError(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams)))
Expect(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams))).NotTo(HaveOccurred())
})
// This test is separated because it is slow and need to run serially
@@ -81,12 +78,11 @@ var _ = framework.KubeDescribe("DNS horizontal autoscaling", func() {
sum := 0
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
size, err := GroupSize(mig)
ExpectNoError(err)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
originalSizes[mig] = size
sum += size
}
Expect(nodeCount).Should(Equal(sum))
By("Manually increase cluster size")
increasedSize := 0
@@ -96,54 +92,105 @@ var _ = framework.KubeDescribe("DNS horizontal autoscaling", func() {
increasedSize += increasedSizes[key]
}
setMigSizes(increasedSizes)
ExpectNoError(WaitForClusterSizeFunc(c,
func(size int) bool { return size == increasedSize }, DNSscaleUpTimeout))
Expect(WaitForClusterSizeFunc(c,
func(size int) bool { return size == increasedSize }, scaleUpTimeout)).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
ExpectNoError(waitForDNSReplicasSatisfied(c, increasedSize, DNSdefaultTimeout))
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams_1)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
By("Replace the dns autoscaling parameters with another testing parameters")
Expect(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(DNSParams_3.data))).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_3)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
By("Restoring cluster size")
setMigSizes(originalSizes)
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount, DNSscaleDownTimeout))
Expect(framework.WaitForClusterSize(c, sum, scaleDownTimeout)).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
ExpectNoError(waitForDNSReplicasSatisfied(c, nodeCount, DNSdefaultTimeout))
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
})
It("kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func() {
By("--- Scenario: should scale kube-dns based on changed parameters ---")
By("Replace the dns autoscaling parameters with the second testing parameters")
ExpectNoError(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(DNSParams_2)))
By("Replace the dns autoscaling parameters with another testing parameters")
Expect(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(DNSParams_3.data))).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
ExpectNoError(waitForDNSReplicasSatisfied(c, int(math.Ceil(float64(nodeCount)/2.0)), DNSdefaultTimeout))
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams_3)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
By("--- Scenario: should re-create scaling parameters with default value when parameters got deleted ---")
By("Delete the ConfigMap for autoscaler")
err := deleteDNSScalingConfigMap(c)
ExpectNoError(err)
Expect(err).NotTo(HaveOccurred())
By("Wait for the ConfigMap got re-created")
configMap, err := waitForDNSConfigMapCreated(c, DNSdefaultTimeout)
ExpectNoError(err)
Expect(err).NotTo(HaveOccurred())
By("Check the new created ConfigMap got the same data as we have")
Expect(reflect.DeepEqual(previousParams, configMap.Data)).To(Equal(true))
By("Replace the dns autoscaling parameters with the second testing parameters")
ExpectNoError(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(DNSParams_2)))
By("Replace the dns autoscaling parameters with another testing parameters")
Expect(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(DNSParams_2.data))).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
ExpectNoError(waitForDNSReplicasSatisfied(c, int(math.Ceil(float64(nodeCount)/2.0)), DNSdefaultTimeout))
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_2)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
By("--- Scenario: should recover after autoscaler pod got deleted ---")
By("Delete the autoscaler pod for kube-dns")
ExpectNoError(deleteDNSAutoscalerPod(c))
Expect(deleteDNSAutoscalerPod(c)).NotTo(HaveOccurred())
By("Replace the dns autoscaling parameters with the third testing parameters")
ExpectNoError(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(DNSParams_3)))
By("Replace the dns autoscaling parameters with another testing parameters")
Expect(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(DNSParams_1.data))).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
ExpectNoError(waitForDNSReplicasSatisfied(c, int(math.Ceil(float64(nodeCount)/3.0)), DNSdefaultTimeout))
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_1)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
})
})
type DNSParamsLinear struct {
data map[string]string
nodesPerReplica float64
coresPerReplica float64
}
type getExpectReplicasFunc func(c clientset.Interface) int
func getExpectReplicasFuncLinear(c clientset.Interface, params *DNSParamsLinear) getExpectReplicasFunc {
return func(c clientset.Interface) int {
var replicasFromNodes float64
var replicasFromCores float64
nodes := framework.GetReadySchedulableNodesOrDie(c).Items
if params.nodesPerReplica > 0 {
replicasFromNodes = math.Ceil(float64(len(nodes)) / params.nodesPerReplica)
}
if params.coresPerReplica > 0 {
replicasFromCores = math.Ceil(float64(getScheduableCores(nodes)) / params.coresPerReplica)
}
return int(math.Max(1.0, math.Max(replicasFromNodes, replicasFromCores)))
}
}
func getScheduableCores(nodes []v1.Node) int64 {
var sc resource.Quantity
for _, node := range nodes {
if !node.Spec.Unschedulable {
sc.Add(node.Status.Capacity[v1.ResourceCPU])
}
}
scInt64, scOk := sc.AsInt64()
if !scOk {
framework.Logf("unable to compute integer values of schedulable cores in the cluster")
return 0
}
return scInt64
}
func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) {
cm, err := c.Core().ConfigMaps(DNSNamespace).Get(DNSAutoscalerLabelName)
if err != nil {
@@ -207,23 +254,27 @@ func deleteDNSAutoscalerPod(c clientset.Interface) error {
return nil
}
func waitForDNSReplicasSatisfied(c clientset.Interface, expected int, timeout time.Duration) (err error) {
func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectReplicasFunc, timeout time.Duration) (err error) {
var current int
framework.Logf("Waiting up to %v for kube-dns reach %v replicas", timeout, expected)
var expected int
framework.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout)
condition := func() (bool, error) {
current, err = getDNSReplicas(c)
if err != nil {
return false, err
}
expected = getExpected(c)
if current != expected {
framework.Logf("replicas not as expected: got %v, expected %v", current, expected)
return false, nil
}
return true, nil
}
if err = wait.Poll(time.Second, timeout, condition); err != nil {
if err = wait.Poll(2*time.Second, timeout, condition); err != nil {
return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %v", expected, current, err)
}
framework.Logf("kube-dns reaches expected replicas: %v", expected)
return nil
}