Merge pull request #28850 from MHBauer/faster-test

Automatic merge from submit-queue

Faster test

<!--
Checklist for submitting a Pull Request

Please remove this comment block before submitting.

1. Please read our [contributor guidelines](https://github.com/kubernetes/kubernetes/blob/master/CONTRIBUTING.md).
2. See our [developer guide](https://github.com/kubernetes/kubernetes/blob/master/docs/devel/development.md).
3. If you want this PR to automatically close an issue when it is merged,
   add `fixes #<issue number>` or `fixes #<issue number>, fixes #<issue number>`
   to close multiple issues (see: https://github.com/blog/1506-closing-issues-via-pull-requests).
4. Follow the instructions for [labeling and writing a release note for this PR](https://github.com/kubernetes/kubernetes/blob/master/docs/devel/pull-requests.md#release-notes) in the block below.
-->
In attempting to troubleshoot flakes with this test case I actually wanted to understand how it worked.
There's some poor comments that need work.
I added some additional output which may or may not help in debugging the flakes.
I doubt this fixes the flake.

My major concern is the 'refactor' I did of the test case to batch up runs by sub-test-case. As it stood there was a 200ms pause between each sub, so they should not have interfered with each other. Now they are just started as fast as possible, but only 20 run at a time before moving on to the next 20. I am not sure if I am violating the ethos of the original test case.

Runs on my computer are down from 2m40s -> 40s.
Getting rid of the arbitrary client limiting brings it down to ~12 seconds. 11 to fetch the image and <1 to actually run the tests against the proxies. I can add a zero to the number of loops if you want to hit it harder. It would result in 10x as much text output though.


[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/.github/PULL_REQUEST_TEMPLATE.md?pixel)]()
This commit is contained in:
k8s-merge-robot
2016-07-26 23:38:09 -07:00
committed by GitHub

View File

@@ -53,7 +53,10 @@ const (
)
func proxyContext(version string) {
f := framework.NewDefaultFramework("proxy")
options := framework.FrameworkOptions{
ClientQPS: -1.0,
}
f := framework.NewFramework("proxy", options, nil)
prefix := "/api/" + version
// Port here has to be kept in sync with default kubelet port.
@@ -65,7 +68,10 @@ func proxyContext(version string) {
It("should proxy logs on node using proxy subresource [Conformance]", func() { nodeProxyTest(f, prefix+"/nodes/", "/proxy/logs/") })
It("should proxy to cadvisor using proxy subresource [Conformance]", func() { nodeProxyTest(f, prefix+"/nodes/", ":4194/proxy/containers/") })
// using the porter image to serve content, access the content
// (of multiple pods?) from multiple (endpoints/services?)
It("should proxy through a service and a pod [Conformance]", func() {
start := time.Now()
labels := map[string]string{"proxy-service-target": "true"}
service, err := f.Client.Services(f.Namespace.Name).Create(&api.Service{
ObjectMeta: api.ObjectMeta{
@@ -105,7 +111,10 @@ func proxyContext(version string) {
}
}(service.Name)
// Make an RC with a single pod.
// Make an RC with a single pod. The 'porter' image is
// a simple server which serves the values of the
// environmental variables below.
By("starting an echo server on multiple ports")
pods := []*api.Pod{}
cfg := framework.RCConfig{
Client: f.Client,
@@ -149,6 +158,7 @@ func proxyContext(version string) {
Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred())
// table constructors
// Try proxying through the service and directly to through the pod.
svcProxyURL := func(scheme, port string) string {
return prefix + "/proxy/namespaces/" + f.Namespace.Name + "/services/" + net.JoinSchemeNamePort(scheme, service.Name, port)
@@ -162,6 +172,8 @@ func proxyContext(version string) {
subresourcePodProxyURL := func(scheme, port string) string {
return prefix + "/namespaces/" + f.Namespace.Name + "/pods/" + net.JoinSchemeNamePort(scheme, pods[0].Name, port) + "/proxy"
}
// construct the table
expectations := map[string]string{
svcProxyURL("", "portname1") + "/": "foo",
svcProxyURL("", "80") + "/": "foo",
@@ -218,15 +230,24 @@ func proxyContext(version string) {
defer errLock.Unlock()
errs = append(errs, s)
}
d := time.Since(start)
framework.Logf("setup took %v, starting test cases", d)
numberTestCases := len(expectations)
totalAttempts := numberTestCases * proxyAttempts
By(fmt.Sprintf("running %v cases, %v attempts per case, %v total attempts", numberTestCases, proxyAttempts, totalAttempts))
for i := 0; i < proxyAttempts; i++ {
wg.Add(numberTestCases)
for path, val := range expectations {
wg.Add(1)
go func(i int, path, val string) {
defer wg.Done()
body, status, d, err := doProxy(f, path)
// this runs the test case
body, status, d, err := doProxy(f, path, i)
if err != nil {
if serr, ok := err.(*errors.StatusError); ok {
recordError(fmt.Sprintf("%v: path %v gave status error: %+v", i, path, serr.Status()))
recordError(fmt.Sprintf("%v (%v; %v): path %v gave status error: %+v",
i, status, d, path, serr.Status()))
} else {
recordError(fmt.Sprintf("%v: path %v gave error: %v", i, path, err))
}
@@ -242,11 +263,9 @@ func proxyContext(version string) {
recordError(fmt.Sprintf("%v: path %v took %v > %v", i, path, d, proxyHTTPCallTimeout))
}
}(i, path, val)
// default QPS is 5
time.Sleep(200 * time.Millisecond)
}
wg.Wait()
}
wg.Wait()
if len(errs) != 0 {
body, err := f.Client.Pods(f.Namespace.Name).GetLogs(pods[0].Name, &api.PodLogOptions{}).Do().Raw()
@@ -261,7 +280,7 @@ func proxyContext(version string) {
})
}
func doProxy(f *framework.Framework, path string) (body []byte, statusCode int, d time.Duration, err error) {
func doProxy(f *framework.Framework, path string, i int) (body []byte, statusCode int, d time.Duration, err error) {
// About all of the proxy accesses in this file:
// * AbsPath is used because it preserves the trailing '/'.
// * Do().Raw() is used (instead of DoRaw()) because it will turn an
@@ -272,7 +291,7 @@ func doProxy(f *framework.Framework, path string) (body []byte, statusCode int,
body, err = f.Client.Get().AbsPath(path).Do().StatusCode(&statusCode).Raw()
d = time.Since(start)
if len(body) > 0 {
framework.Logf("%v: %s (%v; %v)", path, truncate(body, maxDisplayBodyLen), statusCode, d)
framework.Logf("(%v) %v: %s (%v; %v)", i, path, truncate(body, maxDisplayBodyLen), statusCode, d)
} else {
framework.Logf("%v: %s (%v; %v)", path, "no body", statusCode, d)
}
@@ -304,7 +323,7 @@ func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) {
// not reaching Kubelet issue is debugged.
serviceUnavailableErrors := 0
for i := 0; i < proxyAttempts; i++ {
_, status, d, err := doProxy(f, prefix+node+nodeDest)
_, status, d, err := doProxy(f, prefix+node+nodeDest, i)
if status == http.StatusServiceUnavailable {
framework.Logf("Failed proxying node logs due to service unavailable: %v", err)
time.Sleep(time.Second)