diff --git a/test/integration/apiserver/apply/BUILD b/test/integration/apiserver/apply/BUILD index 72b6c7d2830..293fce4642a 100644 --- a/test/integration/apiserver/apply/BUILD +++ b/test/integration/apiserver/apply/BUILD @@ -13,6 +13,7 @@ go_test( ], deps = [ "//pkg/master:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -24,6 +25,7 @@ go_test( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", "//test/integration/framework:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/test/integration/apiserver/apply/apply_test.go b/test/integration/apiserver/apply/apply_test.go index 803888c1f8a..b917bebabf5 100644 --- a/test/integration/apiserver/apply/apply_test.go +++ b/test/integration/apiserver/apply/apply_test.go @@ -18,12 +18,16 @@ package apiserver import ( "encoding/json" + "flag" + "fmt" "net/http" "net/http/httptest" "reflect" + "strings" "testing" "time" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,15 +35,19 @@ import ( "k8s.io/apimachinery/pkg/types" genericfeatures "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/kubernetes/pkg/master" "k8s.io/kubernetes/test/integration/framework" + "sigs.k8s.io/yaml" ) -func setup(t *testing.T, groupVersions ...schema.GroupVersion) (*httptest.Server, clientset.Interface, framework.CloseFunc) { - masterConfig := framework.NewIntegrationTestMasterConfig() +func setup(t testing.TB, groupVersions ...schema.GroupVersion) (*httptest.Server, clientset.Interface, framework.CloseFunc) { + opts := framework.MasterConfigOptions{EtcdOptions: framework.DefaultEtcdOptions()} + opts.EtcdOptions.DefaultStorageMediaType = "application/vnd.kubernetes.protobuf" + masterConfig := framework.NewIntegrationTestMasterConfigWithOptions(&opts) if len(groupVersions) > 0 { resourceConfig := master.DefaultAPIResourceConfigSource() resourceConfig.EnableVersions(groupVersions...) @@ -48,7 +56,7 @@ func setup(t *testing.T, groupVersions ...schema.GroupVersion) (*httptest.Server masterConfig.GenericConfig.OpenAPIConfig = framework.DefaultOpenAPIConfig() _, s, closeFn := framework.RunAMaster(masterConfig) - clientSet, err := clientset.NewForConfig(&restclient.Config{Host: s.URL}) + clientSet, err := clientset.NewForConfig(&restclient.Config{Host: s.URL, QPS: -1}) if err != nil { t.Fatalf("Error in create clientset: %v", err) } @@ -1043,3 +1051,344 @@ func TestClearManagedFieldsWithUpdate(t *testing.T) { t.Fatalf("Expected other fields to stay untouched, got: %v", object) } } + +var podBytes = []byte(` +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: "2019-07-08T09:31:18Z" + labels: + app: some-app + plugin1: some-value + plugin2: some-value + plugin3: some-value + plugin4: some-value + name: some-name + namespace: default + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: ReplicaSet + name: some-name + uid: 0a9d2b9e-779e-11e7-b422-42010a8001be + selfLink: /api/v1/namespaces/pah + uid: 23e8f548-a163-11e9-abe4-42010a80026b +spec: + containers: + - args: + - one + - two + - three + - four + - five + - six + - seven + - eight + - nine + env: + - name: VAR_3 + valueFrom: + secretKeyRef: + key: some-other-key + name: some-oher-name + - name: VAR_2 + valueFrom: + secretKeyRef: + key: other-key + name: other-name + - name: VAR_1 + valueFrom: + secretKeyRef: + key: some-key + name: some-name + image: some-image-name + imagePullPolicy: IfNotPresent + name: some-name + resources: + requests: + cpu: "0" + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: default-token-hu5jz + readOnly: true + dnsPolicy: ClusterFirst + nodeName: node-name + priority: 0 + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + volumes: + - name: default-token-hu5jz + secret: + defaultMode: 420 + secretName: default-token-hu5jz +status: + conditions: + - lastProbeTime: null + lastTransitionTime: "2019-07-08T09:31:18Z" + status: "True" + type: Initialized + - lastProbeTime: null + lastTransitionTime: "2019-07-08T09:41:59Z" + status: "True" + type: Ready + - lastProbeTime: null + lastTransitionTime: null + status: "True" + type: ContainersReady + - lastProbeTime: null + lastTransitionTime: "2019-07-08T09:31:18Z" + status: "True" + type: PodScheduled + containerStatuses: + - containerID: docker://885e82a1ed0b7356541bb410a0126921ac42439607c09875cd8097dd5d7b5376 + image: some-image-name + imageID: docker-pullable://some-image-id + lastState: + terminated: + containerID: docker://d57290f9e00fad626b20d2dd87a3cf69bbc22edae07985374f86a8b2b4e39565 + exitCode: 255 + finishedAt: "2019-07-08T09:39:09Z" + reason: Error + startedAt: "2019-07-08T09:38:54Z" + name: name + ready: true + restartCount: 6 + state: + running: + startedAt: "2019-07-08T09:41:59Z" + hostIP: 10.0.0.1 + phase: Running + podIP: 10.0.0.1 + qosClass: BestEffort + startTime: "2019-07-08T09:31:18Z" +`) + +func decodePod(podBytes []byte) v1.Pod { + pod := v1.Pod{} + err := yaml.Unmarshal(podBytes, &pod) + if err != nil { + panic(err) + } + return pod +} + +func encodePod(pod v1.Pod) []byte { + podBytes, err := yaml.Marshal(pod) + if err != nil { + panic(err) + } + return podBytes +} + +func BenchmarkNoServerSideApply(b *testing.B) { + defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, false)() + + _, client, closeFn := setup(b) + defer closeFn() + flag.Lookup("v").Value.Set("0") + + benchAll(b, client, decodePod(podBytes)) +} + +func getPodSizeWhenEnabled(b *testing.B, pod v1.Pod) int { + defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)() + _, client, closeFn := setup(b) + defer closeFn() + flag.Lookup("v").Value.Set("0") + + pod.Name = "size-pod" + podB, err := client.CoreV1().RESTClient().Post(). + Namespace("default"). + Resource("pods"). + SetHeader("Content-Type", "application/yaml"). + SetHeader("Accept", "application/vnd.kubernetes.protobuf"). + Body(encodePod(pod)).DoRaw() + if err != nil { + b.Fatalf("Failed to create object: %v", err) + } + return len(podB) +} + +func BenchmarkNoServerSideApplyButSameSize(b *testing.B) { + pod := decodePod(podBytes) + + ssaPodSize := getPodSizeWhenEnabled(b, pod) + + defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, false)() + _, client, closeFn := setup(b) + defer closeFn() + flag.Lookup("v").Value.Set("0") + + pod.Name = "size-pod" + noSSAPod, err := client.CoreV1().RESTClient().Post(). + Namespace("default"). + Resource("pods"). + SetHeader("Content-Type", "application/yaml"). + SetHeader("Accept", "application/vnd.kubernetes.protobuf"). + Body(encodePod(pod)).DoRaw() + if err != nil { + b.Fatalf("Failed to create object: %v", err) + } + + ssaDiff := ssaPodSize - len(noSSAPod) + fmt.Printf("Without SSA: %v bytes, With SSA: %v bytes, Difference: %v bytes\n", len(noSSAPod), ssaPodSize, ssaDiff) + annotations := pod.GetAnnotations() + builder := strings.Builder{} + for i := 0; i < ssaDiff; i++ { + builder.WriteByte('0') + } + if annotations == nil { + annotations = map[string]string{} + } + annotations["x-ssa-difference"] = builder.String() + pod.SetAnnotations(annotations) + + benchAll(b, client, pod) +} + +func BenchmarkServerSideApply(b *testing.B) { + defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)() + + _, client, closeFn := setup(b) + defer closeFn() + flag.Lookup("v").Value.Set("0") + + benchAll(b, client, decodePod(podBytes)) +} + +func benchAll(b *testing.B, client kubernetes.Interface, pod v1.Pod) { + // Create pod for repeated-updates + pod.Name = "repeated-pod" + _, err := client.CoreV1().RESTClient().Post(). + Namespace("default"). + Resource("pods"). + SetHeader("Content-Type", "application/yaml"). + Body(encodePod(pod)).Do().Get() + if err != nil { + b.Fatalf("Failed to create object: %v", err) + } + + b.Run("List1", benchListPod(client, pod, 1)) + b.Run("List20", benchListPod(client, pod, 20)) + b.Run("List200", benchListPod(client, pod, 200)) + b.Run("List2000", benchListPod(client, pod, 2000)) + + b.Run("RepeatedUpdates", benchRepeatedUpdate(client, "repeated-pod")) + b.Run("Post1", benchPostPod(client, pod, 1)) + b.Run("Post10", benchPostPod(client, pod, 10)) + b.Run("Post50", benchPostPod(client, pod, 50)) +} + +func benchPostPod(client kubernetes.Interface, pod v1.Pod, parallel int) func(*testing.B) { + return func(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + c := make(chan error) + for j := 0; j < parallel; j++ { + j := j + i := i + go func(pod v1.Pod) { + pod.Name = fmt.Sprintf("post%d-%d-%d-%d", parallel, b.N, j, i) + _, err := client.CoreV1().RESTClient().Post(). + Namespace("default"). + Resource("pods"). + SetHeader("Content-Type", "application/yaml"). + Body(encodePod(pod)).Do().Get() + c <- err + }(pod) + } + for j := 0; j < parallel; j++ { + err := <-c + if err != nil { + b.Fatal(err) + } + } + close(c) + } + } +} + +func createNamespace(client kubernetes.Interface, name string) error { + namespace := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}} + namespaceBytes, err := yaml.Marshal(namespace) + if err != nil { + return fmt.Errorf("Failed to marshal namespace: %v", err) + } + _, err = client.CoreV1().RESTClient().Get(). + Resource("namespaces"). + SetHeader("Content-Type", "application/yaml"). + Body(namespaceBytes).Do().Get() + if err != nil { + return fmt.Errorf("Failed to create namespace: %v", err) + } + return nil +} + +func benchListPod(client kubernetes.Interface, pod v1.Pod, num int) func(*testing.B) { + return func(b *testing.B) { + namespace := fmt.Sprintf("get-%d-%d", num, b.N) + if err := createNamespace(client, namespace); err != nil { + b.Fatal(err) + } + // Create pods + for i := 0; i < num; i++ { + pod.Name = fmt.Sprintf("get-%d-%d", b.N, i) + pod.Namespace = namespace + _, err := client.CoreV1().RESTClient().Post(). + Namespace(namespace). + Resource("pods"). + SetHeader("Content-Type", "application/yaml"). + Body(encodePod(pod)).Do().Get() + if err != nil { + b.Fatalf("Failed to create object: %v", err) + } + } + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, err := client.CoreV1().RESTClient().Get(). + Namespace(namespace). + Resource("pods"). + SetHeader("Accept", "application/vnd.kubernetes.protobuf"). + Do().Get() + if err != nil { + b.Fatalf("Failed to patch object: %v", err) + } + } + } +} + +func benchRepeatedUpdate(client kubernetes.Interface, podName string) func(*testing.B) { + return func(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, err := client.CoreV1().RESTClient().Patch(types.JSONPatchType). + Namespace("default"). + Resource("pods"). + Name(podName). + Body([]byte(fmt.Sprintf(`[{"op": "replace", "path": "/spec/containers/0/image", "value": "image%d"}]`, i))).Do().Get() + if err != nil { + b.Fatalf("Failed to patch object: %v", err) + } + } + } +}