Improve LB session affinity tests

This commit is contained in:
wojtekt 2020-06-23 14:12:04 +02:00
parent ad634590f8
commit 3c34b56fbf

View File

@ -101,42 +101,81 @@ var (
// portsByPodName is a map that maps pod name to container ports. // portsByPodName is a map that maps pod name to container ports.
type portsByPodName map[string][]int type portsByPodName map[string][]int
// affinityCheckFromPod returns interval, timeout and function pinging the service and
// returning pinged hosts for pinging the service from execPod.
func affinityCheckFromPod(execPod *v1.Pod, serviceIP string, servicePort int) (time.Duration, time.Duration, func() []string) {
timeout := AffinityTimeout
// interval considering a maximum of 2 seconds per connection
interval := 2 * AffinityConfirmCount * time.Second
serviceIPPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
curl := fmt.Sprintf(`curl -q -s --connect-timeout 2 http://%s/`, serviceIPPort)
cmd := fmt.Sprintf("for i in $(seq 0 %d); do echo; %s ; done", AffinityConfirmCount, curl)
getHosts := func() []string {
stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil {
framework.Logf("Failed to get response from %s. Retry until timeout", serviceIPPort)
return nil
}
return strings.Split(stdout, "\n")
}
return interval, timeout, getHosts
}
// affinityCheckFromTest returns interval, timeout and function pinging the service and
// returning pinged hosts for pinging the service from the test itself.
func affinityCheckFromTest(cs clientset.Interface, serviceIP string, servicePort int) (time.Duration, time.Duration, func() []string) {
interval := 2 * time.Second
timeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs)
params := &e2enetwork.HTTPPokeParams{Timeout: 2 * time.Second}
getHosts := func() []string {
var hosts []string
for i := 0; i < AffinityConfirmCount; i++ {
result := e2enetwork.PokeHTTP(serviceIP, servicePort, "", params)
if result.Status == e2enetwork.HTTPSuccess {
hosts = append(hosts, string(result.Body))
}
}
return hosts
}
return interval, timeout, getHosts
}
// CheckAffinity function tests whether the service affinity works as expected. // CheckAffinity function tests whether the service affinity works as expected.
// If affinity is expected, the test will return true once affinityConfirmCount // If affinity is expected, the test will return true once affinityConfirmCount
// number of same response observed in a row. If affinity is not expected, the // number of same response observed in a row. If affinity is not expected, the
// test will keep observe until different responses observed. The function will // test will keep observe until different responses observed. The function will
// return false only in case of unexpected errors. // return false only in case of unexpected errors.
func checkAffinity(cs clientset.Interface, execPod *v1.Pod, serviceIP string, servicePort int, shouldHold bool) bool { func checkAffinity(cs clientset.Interface, execPod *v1.Pod, serviceIP string, servicePort int, shouldHold bool) bool {
serviceIPPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort)) var interval, timeout time.Duration
curl := fmt.Sprintf(`curl -q -s --connect-timeout 2 http://%s/`, serviceIPPort) var getHosts func() []string
cmd := fmt.Sprintf("for i in $(seq 0 %d); do echo; %s ; done", AffinityConfirmCount, curl) if execPod != nil {
timeout := AffinityTimeout interval, timeout, getHosts = affinityCheckFromPod(execPod, serviceIP, servicePort)
if execPod == nil { } else {
timeout = e2eservice.GetServiceLoadBalancerPropagationTimeout(cs) interval, timeout, getHosts = affinityCheckFromTest(cs, serviceIP, servicePort)
} }
var tracker affinityTracker var tracker affinityTracker
// interval considering a maximum of 2 seconds per connection
interval := 2 * AffinityConfirmCount * time.Second
if pollErr := wait.PollImmediate(interval, timeout, func() (bool, error) { if pollErr := wait.PollImmediate(interval, timeout, func() (bool, error) {
if execPod != nil { hosts := getHosts()
stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) for _, host := range hosts {
if err != nil { if len(host) > 0 {
framework.Logf("Failed to get response from %s. Retry until timeout", serviceIPPort)
return false, nil
}
hosts := strings.Split(stdout, "\n")
for _, host := range hosts {
tracker.recordHost(strings.TrimSpace(host)) tracker.recordHost(strings.TrimSpace(host))
} }
} else {
rawResponse := GetHTTPContent(serviceIP, servicePort, timeout, "")
tracker.recordHost(rawResponse.String())
} }
trackerFulfilled, affinityHolds := tracker.checkHostTrace(AffinityConfirmCount) trackerFulfilled, affinityHolds := tracker.checkHostTrace(AffinityConfirmCount)
if !trackerFulfilled {
return false, nil
}
if !shouldHold && !affinityHolds { if !shouldHold && !affinityHolds {
return true, nil return true, nil
} }
if shouldHold && trackerFulfilled && affinityHolds { if shouldHold && affinityHolds {
return true, nil return true, nil
} }
return false, nil return false, nil
@ -147,7 +186,7 @@ func checkAffinity(cs clientset.Interface, execPod *v1.Pod, serviceIP string, se
return false return false
} }
if !trackerFulfilled { if !trackerFulfilled {
checkAffinityFailed(tracker, fmt.Sprintf("Connection to %s timed out or not enough responses.", serviceIPPort)) checkAffinityFailed(tracker, fmt.Sprintf("Connection timed out or not enough responses."))
} }
if shouldHold { if shouldHold {
checkAffinityFailed(tracker, "Affinity should hold but didn't.") checkAffinityFailed(tracker, "Affinity should hold but didn't.")