From 1a2e651a1ce41fbc3f8d93554c103e8fb7546e73 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Mon, 8 Jan 2018 14:21:24 +0100 Subject: [PATCH 01/53] Add gnufied as AWS approver. --- pkg/cloudprovider/providers/aws/OWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/cloudprovider/providers/aws/OWNERS b/pkg/cloudprovider/providers/aws/OWNERS index 905c5f972a2..2a8dcb8c834 100644 --- a/pkg/cloudprovider/providers/aws/OWNERS +++ b/pkg/cloudprovider/providers/aws/OWNERS @@ -1,6 +1,7 @@ approvers: - justinsb - zmerlynn +- gnufied reviewers: - gnufied - jsafrane From 64b40b5f485a8fdafaa80a794b8dae78ff0ed0bd Mon Sep 17 00:00:00 2001 From: Lei Xue Date: Mon, 15 Jan 2018 15:53:32 +0800 Subject: [PATCH 02/53] fix typo in resource_allocation.go --- pkg/scheduler/algorithm/priorities/resource_allocation.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/scheduler/algorithm/priorities/resource_allocation.go b/pkg/scheduler/algorithm/priorities/resource_allocation.go index c938cedfddb..a0dc86234d0 100644 --- a/pkg/scheduler/algorithm/priorities/resource_allocation.go +++ b/pkg/scheduler/algorithm/priorities/resource_allocation.go @@ -45,7 +45,7 @@ func (r *ResourceAllocationPriority) PriorityMap( if priorityMeta, ok := meta.(*priorityMetadata); ok { requested = *priorityMeta.nonZeroRequest } else { - // We couldn't parse metadatat - fallback to computing it. + // We couldn't parse metadata - fallback to computing it. requested = *getNonZeroRequests(pod) } From 7a67246d4766e92e410401f82ca9d9bc0723d03d Mon Sep 17 00:00:00 2001 From: Ismo Puustinen Date: Thu, 1 Feb 2018 17:59:57 +0200 Subject: [PATCH 03/53] build: fix a logic error in shell script. The right place to assign the "docker inspect" return value is outside of the subshell. The last return value was coming from something else than the expected command. --- build/common.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/build/common.sh b/build/common.sh index 6e4d01e6954..c2066dccbae 100755 --- a/build/common.sh +++ b/build/common.sh @@ -501,9 +501,11 @@ function kube::build::ensure_data_container() { # If the data container exists AND exited successfully, we can use it. # Otherwise nuke it and start over. local ret=0 - local code=$(docker inspect \ + local code=0 + + code=$(docker inspect \ -f '{{.State.ExitCode}}' \ - "${KUBE_DATA_CONTAINER_NAME}" 2>/dev/null || ret=$?) + "${KUBE_DATA_CONTAINER_NAME}" 2>/dev/null) || ret=$? if [[ "${ret}" == 0 && "${code}" != 0 ]]; then kube::build::destroy_container "${KUBE_DATA_CONTAINER_NAME}" ret=1 From 2ee1c80d0f71c9ad73dd2ecdb614f2f178a2bcd3 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Sun, 10 Dec 2017 15:09:50 +0800 Subject: [PATCH 04/53] abstract proxy servicePort and endpoints --- pkg/proxy/types.go | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/pkg/proxy/types.go b/pkg/proxy/types.go index 578baff693d..613c570897d 100644 --- a/pkg/proxy/types.go +++ b/pkg/proxy/types.go @@ -20,11 +20,12 @@ import ( "fmt" "k8s.io/apimachinery/pkg/types" + api "k8s.io/kubernetes/pkg/apis/core" ) // ProxyProvider is the interface provided by proxier implementations. type ProxyProvider interface { - // Sync immediately synchronizes the ProxyProvider's current state to iptables. + // Sync immediately synchronizes the ProxyProvider's current state to proxy rules. Sync() // SyncLoop runs periodic work. // This is expected to run as a goroutine or as the main loop of the app. @@ -33,7 +34,7 @@ type ProxyProvider interface { } // ServicePortName carries a namespace + name + portname. This is the unique -// identfier for a load-balanced service. +// identifier for a load-balanced service. type ServicePortName struct { types.NamespacedName Port string @@ -42,3 +43,34 @@ type ServicePortName struct { func (spn ServicePortName) String() string { return fmt.Sprintf("%s:%s", spn.NamespacedName.String(), spn.Port) } + +// ServicePort is an interface which abstracts information about a service. +type ServicePort interface { + // String returns service string. An example format can be: `IP:Port/Protocol`. + String() string + // ClusterIP returns service cluster IP. + ClusterIP() string + // Protocol returns service protocol. + Protocol() api.Protocol + // HealthCheckNodePort returns service health check node port if present. If return 0, it means not present. + HealthCheckNodePort() int +} + +// Endpoint in an interface which abstracts information about an endpoint. +type Endpoint interface { + // String returns endpoint string. An example format can be: `IP:Port`. + // We take the returned value as ServiceEndpoint.Endpoint. + String() string + // IsLocal returns true if the endpoint is running in same host as kube-proxy, otherwise returns false. + IsLocal() bool + // IP returns IP part of endpoints. + IP() string + // Equal checks if two endpoints are equal. + Equal(Endpoint) bool +} + +// ServiceEndpoint is used to identify a service and one of its endpoint pair. +type ServiceEndpoint struct { + Endpoint string + ServicePortName ServicePortName +} From ca29a37f22d5c5f4a480f4a7cb2aa1056f2bca2c Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Wed, 7 Feb 2018 20:23:25 +0100 Subject: [PATCH 05/53] Create short name for cronjob --- pkg/registry/batch/cronjob/storage/storage.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/registry/batch/cronjob/storage/storage.go b/pkg/registry/batch/cronjob/storage/storage.go index d8ae47f3890..7826ea5a1e7 100644 --- a/pkg/registry/batch/cronjob/storage/storage.go +++ b/pkg/registry/batch/cronjob/storage/storage.go @@ -60,12 +60,18 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { } var _ rest.CategoriesProvider = &REST{} +var _ rest.ShortNamesProvider = &REST{} // Categories implements the CategoriesProvider interface. Returns a list of categories a resource is part of. func (r *REST) Categories() []string { return []string{"all"} } +// ShortNames implements the ShortNamesProvider interface. Returns a list of short names for a resource. +func (r *REST) ShortNames() []string { + return []string{"cj"} +} + // StatusREST implements the REST endpoint for changing the status of a resourcequota. type StatusREST struct { store *genericregistry.Store From 6e83d88be906c174ab3860eec70f2a4aec0ecb48 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Wed, 8 Nov 2017 16:03:26 +0800 Subject: [PATCH 06/53] audit support wildcard matching subresources --- .../k8s.io/apiserver/pkg/apis/audit/types.go | 17 +++++++++++---- .../pkg/apis/audit/v1alpha1/types.go | 17 +++++++++++---- .../apiserver/pkg/apis/audit/v1beta1/types.go | 17 +++++++++++---- .../apiserver/pkg/audit/policy/checker.go | 21 +++++++++++++------ 4 files changed, 54 insertions(+), 18 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/types.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/types.go index 2b318d5754d..78463b72196 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/types.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/types.go @@ -226,10 +226,19 @@ type GroupResources struct { // The empty string represents the core API group. // +optional Group string - // Resources is a list of resources within the API group. Subresources are - // matched using a "/" to indicate the subresource. For example, "pods/log" - // would match request to the log subresource of pods. The top level resource - // does not match subresources, "pods" doesn't match "pods/log". + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' matches pods. + // 'pods/log' matches the log subresource of pods. + // '*' matches all resources and their subresources. + // 'pods/*' matches all subresources of pods. + // '*/scale' matches all scale subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // An empty list implies all resources and subresources in this API groups apply. // +optional Resources []string // ResourceNames is a list of resource instance names that the policy matches. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go index 21f10c78d48..9254402f5f5 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go @@ -233,10 +233,19 @@ type GroupResources struct { // The empty string represents the core API group. // +optional Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` - // Resources is a list of resources within the API group. Subresources are - // matched using a "/" to indicate the subresource. For example, "pods/logs" - // would match request to the logs subresource of pods. The top level resource - // does not match subresources, "pods" doesn't match "pods/logs". + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' matches pods. + // 'pods/log' matches the log subresource of pods. + // '*' matches all resources and their subresources. + // 'pods/*' matches all subresources of pods. + // '*/scale' matches all scale subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // An empty list implies all resources and subresources in this API groups apply. // +optional Resources []string `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"` // ResourceNames is a list of resource instance names that the policy matches. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go index 259599c050e..31e26db687a 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go @@ -229,10 +229,19 @@ type GroupResources struct { // The empty string represents the core API group. // +optional Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` - // Resources is a list of resources within the API group. Subresources are - // matched using a "/" to indicate the subresource. For example, "pods/log" - // would match request to the log subresource of pods. The top level resource - // does not match subresources, "pods" doesn't match "pods/log". + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' matches pods. + // 'pods/log' matches the log subresource of pods. + // '*' matches all resources and their subresources. + // 'pods/*' matches all subresources of pods. + // '*/scale' matches all scale subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // An empty list implies all resources and subresources in this API groups apply. // +optional Resources []string `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"` // ResourceNames is a list of resource instance names that the policy matches. diff --git a/staging/src/k8s.io/apiserver/pkg/audit/policy/checker.go b/staging/src/k8s.io/apiserver/pkg/audit/policy/checker.go index 3259013ad72..f3c17529966 100644 --- a/staging/src/k8s.io/apiserver/pkg/audit/policy/checker.go +++ b/staging/src/k8s.io/apiserver/pkg/audit/policy/checker.go @@ -160,11 +160,11 @@ func ruleMatchesResource(r *audit.PolicyRule, attrs authorizer.Attributes) bool apiGroup := attrs.GetAPIGroup() resource := attrs.GetResource() + subresource := attrs.GetSubresource() + combinedResource := resource // If subresource, the resource in the policy must match "(resource)/(subresource)" - // - // TODO: consider adding options like "pods/*" to match all subresources. - if sr := attrs.GetSubresource(); sr != "" { - resource = resource + "/" + sr + if subresource != "" { + combinedResource = resource + "/" + subresource } name := attrs.GetName() @@ -175,8 +175,17 @@ func ruleMatchesResource(r *audit.PolicyRule, attrs authorizer.Attributes) bool return true } for _, res := range gr.Resources { - if res == resource { - if len(gr.ResourceNames) == 0 || hasString(gr.ResourceNames, name) { + if len(gr.ResourceNames) == 0 || hasString(gr.ResourceNames, name) { + // match "*" + if res == combinedResource || res == "*" { + return true + } + // match "*/subresource" + if len(subresource) > 0 && strings.HasPrefix(res, "*/") && subresource == strings.TrimLeft(res, "*/") { + return true + } + // match "resource/*" + if strings.HasSuffix(res, "/*") && resource == strings.TrimRight(res, "/*") { return true } } From cc135e985ccde88ac662b33ef81dd71de3ad0520 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Wed, 8 Nov 2017 16:20:16 +0800 Subject: [PATCH 07/53] add test case --- .../apiserver/pkg/audit/policy/checker_test.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/staging/src/k8s.io/apiserver/pkg/audit/policy/checker_test.go b/staging/src/k8s.io/apiserver/pkg/audit/policy/checker_test.go index 0f323436a92..dff7c110aeb 100644 --- a/staging/src/k8s.io/apiserver/pkg/audit/policy/checker_test.go +++ b/staging/src/k8s.io/apiserver/pkg/audit/policy/checker_test.go @@ -105,6 +105,21 @@ var ( Verbs: []string{"get"}, Resources: []audit.GroupResources{{Resources: []string{"pods/log"}}}, }, + "getPodWildcardMatching": { + Level: audit.LevelRequest, + Verbs: []string{"get"}, + Resources: []audit.GroupResources{{Resources: []string{"*"}}}, + }, + "getPodResourceWildcardMatching": { + Level: audit.LevelRequest, + Verbs: []string{"get"}, + Resources: []audit.GroupResources{{Resources: []string{"*/log"}}}, + }, + "getPodSubResourceWildcardMatching": { + Level: audit.LevelRequest, + Verbs: []string{"get"}, + Resources: []audit.GroupResources{{Resources: []string{"pods/*"}}}, + }, "getClusterRoles": { Level: audit.LevelRequestResponse, Verbs: []string{"get"}, @@ -208,6 +223,9 @@ func testAuditLevel(t *testing.T, stages []audit.Stage) { test(t, "nonResource", audit.LevelNone, stages, stages, "getPodLogs", "getPods") test(t, "subresource", audit.LevelRequest, stages, stages, "getPodLogs", "getPods") + test(t, "subresource", audit.LevelRequest, stages, stages, "getPodWildcardMatching") + test(t, "subresource", audit.LevelRequest, stages, stages, "getPodResourceWildcardMatching") + test(t, "subresource", audit.LevelRequest, stages, stages, "getPodSubResourceWildcardMatching") } From 08c024f3670288648751b9444c7db6a63fb0cd04 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Wed, 8 Nov 2017 17:31:19 +0800 Subject: [PATCH 08/53] run hack/update-all.sh --- .../pkg/apis/audit/v1alpha1/generated.proto | 17 +++++++++++++---- .../pkg/apis/audit/v1beta1/generated.proto | 17 +++++++++++++---- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto index 926eb65edc9..0129b50e83a 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto @@ -114,10 +114,19 @@ message GroupResources { // +optional optional string group = 1; - // Resources is a list of resources within the API group. Subresources are - // matched using a "/" to indicate the subresource. For example, "pods/logs" - // would match request to the logs subresource of pods. The top level resource - // does not match subresources, "pods" doesn't match "pods/logs". + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' matches pods. + // 'pods/log' matches the log subresource of pods. + // '*' matches all resources and their subresources. + // 'pods/*' matches all subresources of pods. + // '*/scale' matches all scale subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // An empty list implies all resources and subresources in this API groups apply. // +optional repeated string resources = 2; diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto index bbac7f2b708..362bbbb9cbe 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto @@ -118,10 +118,19 @@ message GroupResources { // +optional optional string group = 1; - // Resources is a list of resources within the API group. Subresources are - // matched using a "/" to indicate the subresource. For example, "pods/log" - // would match request to the log subresource of pods. The top level resource - // does not match subresources, "pods" doesn't match "pods/log". + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' matches pods. + // 'pods/log' matches the log subresource of pods. + // '*' matches all resources and their subresources. + // 'pods/*' matches all subresources of pods. + // '*/scale' matches all scale subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // An empty list implies all resources and subresources in this API groups apply. // +optional repeated string resources = 2; From 95b2b94b1bbcb9191ab8de42abb0f920551d6be2 Mon Sep 17 00:00:00 2001 From: tanshanshan Date: Fri, 2 Feb 2018 11:26:41 +0800 Subject: [PATCH 09/53] =?UTF-8?q?Change=20critical=20pods=E2=80=99=20templ?= =?UTF-8?q?ate=20to=20use=20priority?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../addons/calico-policy-controller/calico-node-daemonset.yaml | 1 + .../calico-node-vertical-autoscaler-deployment.yaml | 1 + cluster/addons/calico-policy-controller/typha-deployment.yaml | 1 + .../typha-horizontal-autoscaler-deployment.yaml | 1 + .../typha-vertical-autoscaler-deployment.yaml | 1 + .../addons/cluster-monitoring/google/heapster-controller.yaml | 1 + .../googleinfluxdb/heapster-controller-combined.yaml | 1 + .../addons/cluster-monitoring/influxdb/heapster-controller.yaml | 1 + .../cluster-monitoring/influxdb/influxdb-grafana-controller.yaml | 1 + .../cluster-monitoring/stackdriver/heapster-controller.yaml | 1 + .../cluster-monitoring/standalone/heapster-controller.yaml | 1 + cluster/addons/dashboard/dashboard-controller.yaml | 1 + .../dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml | 1 + cluster/addons/dns/kube-dns.yaml.base | 1 + cluster/addons/dns/kube-dns.yaml.in | 1 + cluster/addons/dns/kube-dns.yaml.sed | 1 + .../addons/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml | 1 + cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml | 1 + cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml | 1 + cluster/addons/ip-masq-agent/ip-masq-agent.yaml | 1 + cluster/addons/metadata-proxy/gce/metadata-proxy.yaml | 1 + cluster/addons/metrics-server/metrics-server-deployment.yaml | 1 + 22 files changed, 22 insertions(+) diff --git a/cluster/addons/calico-policy-controller/calico-node-daemonset.yaml b/cluster/addons/calico-policy-controller/calico-node-daemonset.yaml index 39abe2a9bf5..c6394ade423 100644 --- a/cluster/addons/calico-policy-controller/calico-node-daemonset.yaml +++ b/cluster/addons/calico-policy-controller/calico-node-daemonset.yaml @@ -20,6 +20,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-node-critical nodeSelector: projectcalico.org/ds-ready: "true" hostNetwork: true diff --git a/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml b/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml index c66c3e07200..300a094f3b3 100644 --- a/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml +++ b/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml @@ -16,6 +16,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-cluster-critical containers: - image: gcr.io/google_containers/cpvpa-amd64:v0.6.0 name: autoscaler diff --git a/cluster/addons/calico-policy-controller/typha-deployment.yaml b/cluster/addons/calico-policy-controller/typha-deployment.yaml index 01b32c9ca16..c69da082f69 100644 --- a/cluster/addons/calico-policy-controller/typha-deployment.yaml +++ b/cluster/addons/calico-policy-controller/typha-deployment.yaml @@ -16,6 +16,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-cluster-critical tolerations: - key: CriticalAddonsOnly operator: Exists diff --git a/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml b/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml index 4f493b8bcf3..5ba6fbb57a9 100644 --- a/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml +++ b/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml @@ -16,6 +16,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-cluster-critical containers: - image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.2 name: autoscaler diff --git a/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml b/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml index c59be9af62a..7f0fbe99b5d 100644 --- a/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml +++ b/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml @@ -16,6 +16,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-cluster-critical containers: - image: gcr.io/google_containers/cpvpa-amd64:v0.6.0 name: autoscaler diff --git a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml index 429be0da773..0ce9438be76 100644 --- a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml @@ -57,6 +57,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-cluster-critical containers: - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: heapster diff --git a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml index 90e3ca32302..44f8fc49591 100644 --- a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml +++ b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml @@ -57,6 +57,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-cluster-critical containers: - image: gcr.io/google_containers/heapster-amd64:v1.5.0 diff --git a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml index 46a8e36813e..43e3b0e0597 100644 --- a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml @@ -57,6 +57,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-cluster-critical containers: - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: heapster diff --git a/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml b/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml index d562c748471..1bd2a53e6b3 100644 --- a/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml @@ -22,6 +22,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-cluster-critical tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule diff --git a/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml b/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml index c035d51a7a0..43bb182b51d 100644 --- a/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml @@ -44,6 +44,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-cluster-critical containers: - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: heapster diff --git a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml index ff994966d5f..affa89ed1d2 100644 --- a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml @@ -44,6 +44,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-cluster-critical containers: - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: heapster diff --git a/cluster/addons/dashboard/dashboard-controller.yaml b/cluster/addons/dashboard/dashboard-controller.yaml index 0c7ca6b5d80..6c3b51036db 100644 --- a/cluster/addons/dashboard/dashboard-controller.yaml +++ b/cluster/addons/dashboard/dashboard-controller.yaml @@ -27,6 +27,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-cluster-critical containers: - name: kubernetes-dashboard image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.8.0 diff --git a/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml b/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml index cd89c6cd3fd..669d7e752bb 100644 --- a/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml +++ b/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml @@ -78,6 +78,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-cluster-critical containers: - name: autoscaler image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.2-r2 diff --git a/cluster/addons/dns/kube-dns.yaml.base b/cluster/addons/dns/kube-dns.yaml.base index edf77c3569c..c81b8304511 100644 --- a/cluster/addons/dns/kube-dns.yaml.base +++ b/cluster/addons/dns/kube-dns.yaml.base @@ -84,6 +84,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-cluster-critical tolerations: - key: "CriticalAddonsOnly" operator: "Exists" diff --git a/cluster/addons/dns/kube-dns.yaml.in b/cluster/addons/dns/kube-dns.yaml.in index ea20f6d0ffd..e94b5c07aca 100644 --- a/cluster/addons/dns/kube-dns.yaml.in +++ b/cluster/addons/dns/kube-dns.yaml.in @@ -84,6 +84,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-cluster-critical tolerations: - key: "CriticalAddonsOnly" operator: "Exists" diff --git a/cluster/addons/dns/kube-dns.yaml.sed b/cluster/addons/dns/kube-dns.yaml.sed index af5772fc341..5512c8fa379 100644 --- a/cluster/addons/dns/kube-dns.yaml.sed +++ b/cluster/addons/dns/kube-dns.yaml.sed @@ -84,6 +84,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-cluster-critical tolerations: - key: "CriticalAddonsOnly" operator: "Exists" diff --git a/cluster/addons/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml b/cluster/addons/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml index e6ca6f9e2c3..f61d0277ac6 100644 --- a/cluster/addons/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml +++ b/cluster/addons/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml @@ -18,6 +18,7 @@ metadata: labels: k8s-app: etcd-empty-dir-cleanup spec: + priorityClassName: system-node-critical serviceAccountName: etcd-empty-dir-cleanup hostNetwork: true dnsPolicy: Default diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml index f390b3890a8..a9ddc51cb64 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml @@ -72,6 +72,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-node-critical serviceAccountName: fluentd-es containers: - name: fluentd-es diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml index 130e84aaccd..f83141032bd 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml @@ -23,6 +23,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-node-critical serviceAccountName: fluentd-gcp dnsPolicy: Default containers: diff --git a/cluster/addons/ip-masq-agent/ip-masq-agent.yaml b/cluster/addons/ip-masq-agent/ip-masq-agent.yaml index f6bb21c01b9..7f8f05c75cb 100644 --- a/cluster/addons/ip-masq-agent/ip-masq-agent.yaml +++ b/cluster/addons/ip-masq-agent/ip-masq-agent.yaml @@ -24,6 +24,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-node-critical serviceAccountName: ip-masq-agent hostNetwork: true containers: diff --git a/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml b/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml index e537a7ff167..3e25a43a8a7 100644 --- a/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml +++ b/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml @@ -33,6 +33,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-node-critical serviceAccountName: metadata-proxy hostNetwork: true dnsPolicy: Default diff --git a/cluster/addons/metrics-server/metrics-server-deployment.yaml b/cluster/addons/metrics-server/metrics-server-deployment.yaml index 1ed7dbc7285..ad24688aa2b 100644 --- a/cluster/addons/metrics-server/metrics-server-deployment.yaml +++ b/cluster/addons/metrics-server/metrics-server-deployment.yaml @@ -44,6 +44,7 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-cluster-critical serviceAccountName: metrics-server containers: - name: metrics-server From f2875274423dac61293069f79eddf1c397e7376a Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Wed, 29 Nov 2017 23:12:19 +0800 Subject: [PATCH 10/53] admission registration use shared informer instead of poll --- .../configuration/mutating_webhook_manager.go | 100 ++++++++------- .../mutating_webhook_manager_test.go | 118 ++++++++++++++++-- .../validating_webhook_manager.go | 90 +++++++------ .../validating_webhook_manager_test.go | 118 ++++++++++++++++-- .../plugin/webhook/mutating/admission.go | 9 +- .../plugin/webhook/validating/admission.go | 11 +- 6 files changed, 316 insertions(+), 130 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go b/staging/src/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go index bf4d0eabf98..fbde83b7d82 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go @@ -18,84 +18,82 @@ package configuration import ( "fmt" - "reflect" "sort" - - "github.com/golang/glog" + "sync/atomic" "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + admissionregistrationinformers "k8s.io/client-go/informers/admissionregistration/v1beta1" + admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1beta1" + "k8s.io/client-go/tools/cache" ) -type MutatingWebhookConfigurationLister interface { - List(opts metav1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error) -} - // MutatingWebhookConfigurationManager collects the mutating webhook objects so that they can be called. type MutatingWebhookConfigurationManager struct { - *poller + ready int32 + configuration *atomic.Value + hasSynced func() bool + lister admissionregistrationlisters.MutatingWebhookConfigurationLister } -func NewMutatingWebhookConfigurationManager(c MutatingWebhookConfigurationLister) *MutatingWebhookConfigurationManager { - getFn := func() (runtime.Object, error) { - list, err := c.List(metav1.ListOptions{}) - if err != nil { - if errors.IsNotFound(err) || errors.IsForbidden(err) { - glog.V(5).Infof("MutatingWebhookConfiguration are disabled due to an error: %v", err) - return nil, ErrDisabled - } - return nil, err - } - return mergeMutatingWebhookConfigurations(list), nil +func NewMutatingWebhookConfigurationManager(informer admissionregistrationinformers.MutatingWebhookConfigurationInformer) *MutatingWebhookConfigurationManager { + manager := &MutatingWebhookConfigurationManager{ + ready: 0, + configuration: &atomic.Value{}, + hasSynced: informer.Informer().HasSynced, + lister: informer.Lister(), } - return &MutatingWebhookConfigurationManager{ - newPoller(getFn), - } + // Start with an empty list + manager.configuration.Store(&v1beta1.MutatingWebhookConfiguration{}) + + // On any change, rebuild the config + informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(_ interface{}) { manager.updateConfiguration() }, + UpdateFunc: func(_, _ interface{}) { manager.updateConfiguration() }, + DeleteFunc: func(_ interface{}) { manager.updateConfiguration() }, + }) + + return manager } // Webhooks returns the merged MutatingWebhookConfiguration. -func (im *MutatingWebhookConfigurationManager) Webhooks() (*v1beta1.MutatingWebhookConfiguration, error) { - configuration, err := im.poller.configuration() +func (m *MutatingWebhookConfigurationManager) Webhooks() (*v1beta1.MutatingWebhookConfiguration, error) { + if atomic.LoadInt32(&m.ready) == 0 { + if !m.hasSynced() { + // Return an error until we've synced + return nil, fmt.Errorf("mutating webhook configuration is not ready") + } + // Remember we're ready + atomic.StoreInt32(&m.ready, 1) + } + return m.configuration.Load().(*v1beta1.MutatingWebhookConfiguration), nil +} + +func (m *MutatingWebhookConfigurationManager) updateConfiguration() { + configurations, err := m.lister.List(labels.Everything()) if err != nil { - return nil, err + utilruntime.HandleError(fmt.Errorf("error updating configuration: %v", err)) + return } - mutatingWebhookConfiguration, ok := configuration.(*v1beta1.MutatingWebhookConfiguration) - if !ok { - return nil, fmt.Errorf("expected type %v, got type %v", reflect.TypeOf(mutatingWebhookConfiguration), reflect.TypeOf(configuration)) - } - return mutatingWebhookConfiguration, nil + m.configuration.Store(mergeMutatingWebhookConfigurations(configurations)) } -func (im *MutatingWebhookConfigurationManager) Run(stopCh <-chan struct{}) { - im.poller.Run(stopCh) -} - -func mergeMutatingWebhookConfigurations( - list *v1beta1.MutatingWebhookConfigurationList, -) *v1beta1.MutatingWebhookConfiguration { - configurations := append([]v1beta1.MutatingWebhookConfiguration{}, list.Items...) +func mergeMutatingWebhookConfigurations(configurations []*v1beta1.MutatingWebhookConfiguration) *v1beta1.MutatingWebhookConfiguration { var ret v1beta1.MutatingWebhookConfiguration // The internal order of webhooks for each configuration is provided by the user // but configurations themselves can be in any order. As we are going to run these // webhooks in serial, they are sorted here to have a deterministic order. - sort.Sort(byName(configurations)) + sort.SliceStable(configurations, MutatingWebhookConfigurationSorter(configurations).ByName) for _, c := range configurations { ret.Webhooks = append(ret.Webhooks, c.Webhooks...) } return &ret } -// byName sorts MutatingWebhookConfiguration by name. These objects are all in -// cluster namespace (aka no namespace) thus they all have unique names. -type byName []v1beta1.MutatingWebhookConfiguration +type MutatingWebhookConfigurationSorter []*v1beta1.MutatingWebhookConfiguration -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - return x[i].ObjectMeta.Name < x[j].ObjectMeta.Name +func (a MutatingWebhookConfigurationSorter) ByName(i, j int) bool { + return a[i].Name < a[j].Name } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager_test.go b/staging/src/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager_test.go index 97333880b09..e6b50aa2320 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager_test.go @@ -17,24 +17,118 @@ limitations under the License. package configuration import ( + "fmt" + "reflect" "testing" + "time" "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/labels" + admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1beta1" + "k8s.io/client-go/tools/cache" ) -type disabledMutatingWebhookConfigLister struct{} - -func (l *disabledMutatingWebhookConfigLister) List(options metav1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error) { - return nil, errors.NewNotFound(schema.GroupResource{Group: "admissionregistration", Resource: "MutatingWebhookConfigurations"}, "") +type fakeMutatingWebhookConfigSharedInformer struct { + informer *fakeMutatingWebhookConfigInformer + lister *fakeMutatingWebhookConfigLister } -func TestMutatingWebhookConfigDisabled(t *testing.T) { - manager := NewMutatingWebhookConfigurationManager(&disabledMutatingWebhookConfigLister{}) - manager.sync() - _, err := manager.Webhooks() - if err.Error() != ErrDisabled.Error() { - t.Errorf("expected %v, got %v", ErrDisabled, err) + +func (f *fakeMutatingWebhookConfigSharedInformer) Informer() cache.SharedIndexInformer { + return f.informer +} +func (f *fakeMutatingWebhookConfigSharedInformer) Lister() admissionregistrationlisters.MutatingWebhookConfigurationLister { + return f.lister +} + +type fakeMutatingWebhookConfigInformer struct { + eventHandler cache.ResourceEventHandler + hasSynced bool +} + +func (f *fakeMutatingWebhookConfigInformer) AddEventHandler(handler cache.ResourceEventHandler) { + fmt.Println("added handler") + f.eventHandler = handler +} +func (f *fakeMutatingWebhookConfigInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) { + panic("unsupported") +} +func (f *fakeMutatingWebhookConfigInformer) GetStore() cache.Store { + panic("unsupported") +} +func (f *fakeMutatingWebhookConfigInformer) GetController() cache.Controller { + panic("unsupported") +} +func (f *fakeMutatingWebhookConfigInformer) Run(stopCh <-chan struct{}) { + panic("unsupported") +} +func (f *fakeMutatingWebhookConfigInformer) HasSynced() bool { + return f.hasSynced +} +func (f *fakeMutatingWebhookConfigInformer) LastSyncResourceVersion() string { + panic("unsupported") +} +func (f *fakeMutatingWebhookConfigInformer) AddIndexers(indexers cache.Indexers) error { + panic("unsupported") +} +func (f *fakeMutatingWebhookConfigInformer) GetIndexer() cache.Indexer { panic("unsupported") } + +type fakeMutatingWebhookConfigLister struct { + list []*v1beta1.MutatingWebhookConfiguration + err error +} + +func (f *fakeMutatingWebhookConfigLister) List(selector labels.Selector) (ret []*v1beta1.MutatingWebhookConfiguration, err error) { + return f.list, f.err +} + +func (f *fakeMutatingWebhookConfigLister) Get(name string) (*v1beta1.MutatingWebhookConfiguration, error) { + panic("unsupported") +} + +func TestGetMutatingWebhookConfig(t *testing.T) { + informer := &fakeMutatingWebhookConfigSharedInformer{ + informer: &fakeMutatingWebhookConfigInformer{}, + lister: &fakeMutatingWebhookConfigLister{}, + } + + // unsynced, error retrieving list + informer.informer.hasSynced = false + informer.lister.list = nil + informer.lister.err = fmt.Errorf("mutating webhook configuration is not ready") + manager := NewMutatingWebhookConfigurationManager(informer) + if _, err := manager.Webhooks(); err == nil { + t.Errorf("expected err, but got none") + } + + // list found, still unsynced + informer.informer.hasSynced = false + informer.lister.list = []*v1beta1.MutatingWebhookConfiguration{} + informer.lister.err = nil + if _, err := manager.Webhooks(); err == nil { + t.Errorf("expected err, but got none") + } + + // items populated, still unsynced + webhookContainer := &v1beta1.MutatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{Name: "webhook1"}, + Webhooks: []v1beta1.Webhook{{Name: "webhook1.1"}}, + } + informer.informer.hasSynced = false + informer.lister.list = []*v1beta1.MutatingWebhookConfiguration{webhookContainer.DeepCopy()} + informer.lister.err = nil + informer.informer.eventHandler.OnAdd(webhookContainer.DeepCopy()) + if _, err := manager.Webhooks(); err == nil { + t.Errorf("expected err, but got none") + } + + // sync completed + informer.informer.hasSynced = true + hooks, err := manager.Webhooks() + if err != nil { + t.Errorf("unexpected err: %v", err) + } + if !reflect.DeepEqual(hooks.Webhooks, webhookContainer.Webhooks) { + t.Errorf("Expected\n%#v\ngot\n%#v", webhookContainer.Webhooks, hooks.Webhooks) } } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go b/staging/src/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go index 8f9fd34daae..f93068b8037 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go @@ -18,67 +18,81 @@ package configuration import ( "fmt" - "reflect" - - "github.com/golang/glog" + "sort" + "sync/atomic" "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + admissionregistrationinformers "k8s.io/client-go/informers/admissionregistration/v1beta1" + admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1beta1" + "k8s.io/client-go/tools/cache" ) -type ValidatingWebhookConfigurationLister interface { - List(opts metav1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error) -} - // ValidatingWebhookConfigurationManager collects the validating webhook objects so that they can be called. type ValidatingWebhookConfigurationManager struct { - *poller + ready int32 + configuration *atomic.Value + hasSynced func() bool + lister admissionregistrationlisters.ValidatingWebhookConfigurationLister } -func NewValidatingWebhookConfigurationManager(c ValidatingWebhookConfigurationLister) *ValidatingWebhookConfigurationManager { - getFn := func() (runtime.Object, error) { - list, err := c.List(metav1.ListOptions{}) - if err != nil { - if errors.IsNotFound(err) || errors.IsForbidden(err) { - glog.V(5).Infof("ValidatingWebhookConfiguration are disabled due to an error: %v", err) - return nil, ErrDisabled - } - return nil, err - } - return mergeValidatingWebhookConfigurations(list), nil +func NewValidatingWebhookConfigurationManager(informer admissionregistrationinformers.ValidatingWebhookConfigurationInformer) *ValidatingWebhookConfigurationManager { + manager := &ValidatingWebhookConfigurationManager{ + ready: 0, + configuration: &atomic.Value{}, + hasSynced: informer.Informer().HasSynced, + lister: informer.Lister(), } - return &ValidatingWebhookConfigurationManager{ - newPoller(getFn), - } + // Start with an empty list + manager.configuration.Store(&v1beta1.ValidatingWebhookConfiguration{}) + + // On any change, rebuild the config + informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(_ interface{}) { manager.updateConfiguration() }, + UpdateFunc: func(_, _ interface{}) { manager.updateConfiguration() }, + DeleteFunc: func(_ interface{}) { manager.updateConfiguration() }, + }) + + return manager } // Webhooks returns the merged ValidatingWebhookConfiguration. -func (im *ValidatingWebhookConfigurationManager) Webhooks() (*v1beta1.ValidatingWebhookConfiguration, error) { - configuration, err := im.poller.configuration() - if err != nil { - return nil, err +func (v *ValidatingWebhookConfigurationManager) Webhooks() (*v1beta1.ValidatingWebhookConfiguration, error) { + if atomic.LoadInt32(&v.ready) == 0 { + if !v.hasSynced() { + // Return an error until we've synced + return nil, fmt.Errorf("validating webhook configuration is not ready") + } + // Remember we're ready + atomic.StoreInt32(&v.ready, 1) } - validatingWebhookConfiguration, ok := configuration.(*v1beta1.ValidatingWebhookConfiguration) - if !ok { - return nil, fmt.Errorf("expected type %v, got type %v", reflect.TypeOf(validatingWebhookConfiguration), reflect.TypeOf(configuration)) - } - return validatingWebhookConfiguration, nil + return v.configuration.Load().(*v1beta1.ValidatingWebhookConfiguration), nil } -func (im *ValidatingWebhookConfigurationManager) Run(stopCh <-chan struct{}) { - im.poller.Run(stopCh) +func (v *ValidatingWebhookConfigurationManager) updateConfiguration() { + configurations, err := v.lister.List(labels.Everything()) + if err != nil { + utilruntime.HandleError(fmt.Errorf("error updating configuration: %v", err)) + return + } + v.configuration.Store(mergeValidatingWebhookConfigurations(configurations)) } func mergeValidatingWebhookConfigurations( - list *v1beta1.ValidatingWebhookConfigurationList, + configurations []*v1beta1.ValidatingWebhookConfiguration, ) *v1beta1.ValidatingWebhookConfiguration { - configurations := list.Items + sort.SliceStable(configurations, ValidatingWebhookConfigurationSorter(configurations).ByName) var ret v1beta1.ValidatingWebhookConfiguration for _, c := range configurations { ret.Webhooks = append(ret.Webhooks, c.Webhooks...) } return &ret } + +type ValidatingWebhookConfigurationSorter []*v1beta1.ValidatingWebhookConfiguration + +func (a ValidatingWebhookConfigurationSorter) ByName(i, j int) bool { + return a[i].Name < a[j].Name +} diff --git a/staging/src/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager_test.go b/staging/src/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager_test.go index 60ba5367325..929d7b2cfcf 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager_test.go @@ -17,24 +17,118 @@ limitations under the License. package configuration import ( + "fmt" + "reflect" "testing" + "time" "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/labels" + admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1beta1" + "k8s.io/client-go/tools/cache" ) -type disabledValidatingWebhookConfigLister struct{} - -func (l *disabledValidatingWebhookConfigLister) List(options metav1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error) { - return nil, errors.NewNotFound(schema.GroupResource{Group: "admissionregistration", Resource: "ValidatingWebhookConfigurations"}, "") +type fakeValidatingWebhookConfigSharedInformer struct { + informer *fakeValidatingWebhookConfigInformer + lister *fakeValidatingWebhookConfigLister } -func TestWebhookConfigDisabled(t *testing.T) { - manager := NewValidatingWebhookConfigurationManager(&disabledValidatingWebhookConfigLister{}) - manager.sync() - _, err := manager.Webhooks() - if err.Error() != ErrDisabled.Error() { - t.Errorf("expected %v, got %v", ErrDisabled, err) + +func (f *fakeValidatingWebhookConfigSharedInformer) Informer() cache.SharedIndexInformer { + return f.informer +} +func (f *fakeValidatingWebhookConfigSharedInformer) Lister() admissionregistrationlisters.ValidatingWebhookConfigurationLister { + return f.lister +} + +type fakeValidatingWebhookConfigInformer struct { + eventHandler cache.ResourceEventHandler + hasSynced bool +} + +func (f *fakeValidatingWebhookConfigInformer) AddEventHandler(handler cache.ResourceEventHandler) { + fmt.Println("added handler") + f.eventHandler = handler +} +func (f *fakeValidatingWebhookConfigInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) { + panic("unsupported") +} +func (f *fakeValidatingWebhookConfigInformer) GetStore() cache.Store { + panic("unsupported") +} +func (f *fakeValidatingWebhookConfigInformer) GetController() cache.Controller { + panic("unsupported") +} +func (f *fakeValidatingWebhookConfigInformer) Run(stopCh <-chan struct{}) { + panic("unsupported") +} +func (f *fakeValidatingWebhookConfigInformer) HasSynced() bool { + return f.hasSynced +} +func (f *fakeValidatingWebhookConfigInformer) LastSyncResourceVersion() string { + panic("unsupported") +} +func (f *fakeValidatingWebhookConfigInformer) AddIndexers(indexers cache.Indexers) error { + panic("unsupported") +} +func (f *fakeValidatingWebhookConfigInformer) GetIndexer() cache.Indexer { panic("unsupported") } + +type fakeValidatingWebhookConfigLister struct { + list []*v1beta1.ValidatingWebhookConfiguration + err error +} + +func (f *fakeValidatingWebhookConfigLister) List(selector labels.Selector) (ret []*v1beta1.ValidatingWebhookConfiguration, err error) { + return f.list, f.err +} + +func (f *fakeValidatingWebhookConfigLister) Get(name string) (*v1beta1.ValidatingWebhookConfiguration, error) { + panic("unsupported") +} + +func TestGettValidatingWebhookConfig(t *testing.T) { + informer := &fakeValidatingWebhookConfigSharedInformer{ + informer: &fakeValidatingWebhookConfigInformer{}, + lister: &fakeValidatingWebhookConfigLister{}, + } + + // unsynced, error retrieving list + informer.informer.hasSynced = false + informer.lister.list = nil + informer.lister.err = fmt.Errorf("validating webhook configuration is not ready") + manager := NewValidatingWebhookConfigurationManager(informer) + if _, err := manager.Webhooks(); err == nil { + t.Errorf("expected err, but got none") + } + + // list found, still unsynced + informer.informer.hasSynced = false + informer.lister.list = []*v1beta1.ValidatingWebhookConfiguration{} + informer.lister.err = nil + if _, err := manager.Webhooks(); err == nil { + t.Errorf("expected err, but got none") + } + + // items populated, still unsynced + webhookContainer := &v1beta1.ValidatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{Name: "webhook1"}, + Webhooks: []v1beta1.Webhook{{Name: "webhook1.1"}}, + } + informer.informer.hasSynced = false + informer.lister.list = []*v1beta1.ValidatingWebhookConfiguration{webhookContainer.DeepCopy()} + informer.lister.err = nil + informer.informer.eventHandler.OnAdd(webhookContainer.DeepCopy()) + if _, err := manager.Webhooks(); err == nil { + t.Errorf("expected err, but got none") + } + + // sync completed + informer.informer.hasSynced = true + hooks, err := manager.Webhooks() + if err != nil { + t.Errorf("unexpected err: %v", err) + } + if !reflect.DeepEqual(hooks.Webhooks, webhookContainer.Webhooks) { + t.Errorf("Expected\n%#v\ngot\n%#v", webhookContainer.Webhooks, hooks.Webhooks) } } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go index 6d62a36f629..0f50edf6a3e 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go @@ -35,7 +35,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/runtime/serializer/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/admission/configuration" genericadmissioninit "k8s.io/apiserver/pkg/admission/initializer" @@ -69,7 +68,6 @@ func Register(plugins *admission.Plugins) { // WebhookSource can list dynamic webhook plugins. type WebhookSource interface { - Run(stopCh <-chan struct{}) Webhooks() (*v1beta1.MutatingWebhookConfiguration, error) } @@ -146,7 +144,6 @@ func (a *MutatingWebhook) SetScheme(scheme *runtime.Scheme) { // WantsExternalKubeClientSet defines a function which sets external ClientSet for admission plugins that need it func (a *MutatingWebhook) SetExternalKubeClientSet(client clientset.Interface) { a.namespaceMatcher.Client = client - a.hookSource = configuration.NewMutatingWebhookConfigurationManager(client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations()) } // SetExternalKubeInformerFactory implements the WantsExternalKubeInformerFactory interface. @@ -154,6 +151,7 @@ func (a *MutatingWebhook) SetExternalKubeInformerFactory(f informers.SharedInfor namespaceInformer := f.Core().V1().Namespaces() a.namespaceMatcher.NamespaceLister = namespaceInformer.Lister() a.SetReadyFunc(namespaceInformer.Informer().HasSynced) + a.hookSource = configuration.NewMutatingWebhookConfigurationManager(f.Admissionregistration().V1beta1().MutatingWebhookConfigurations()) } // ValidateInitialization implements the InitializationValidator interface. @@ -176,16 +174,11 @@ func (a *MutatingWebhook) ValidateInitialization() error { if a.defaulter == nil { return fmt.Errorf("MutatingWebhook.defaulter is not properly setup") } - go a.hookSource.Run(wait.NeverStop) return nil } func (a *MutatingWebhook) loadConfiguration(attr admission.Attributes) (*v1beta1.MutatingWebhookConfiguration, error) { hookConfig, err := a.hookSource.Webhooks() - // if Webhook configuration is disabled, fail open - if err == configuration.ErrDisabled { - return &v1beta1.MutatingWebhookConfiguration{}, nil - } if err != nil { e := apierrors.NewServerTimeout(attr.GetResource().GroupResource(), string(attr.GetOperation()), 1) e.ErrStatus.Message = fmt.Sprintf("Unable to refresh the Webhook configuration: %v", err) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go index f68e46fa585..f14d65083de 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go @@ -34,7 +34,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/admission/configuration" genericadmissioninit "k8s.io/apiserver/pkg/admission/initializer" @@ -68,7 +67,6 @@ func Register(plugins *admission.Plugins) { // WebhookSource can list dynamic webhook plugins. type WebhookSource interface { - Run(stopCh <-chan struct{}) Webhooks() (*v1beta1.ValidatingWebhookConfiguration, error) } @@ -141,7 +139,6 @@ func (a *ValidatingAdmissionWebhook) SetScheme(scheme *runtime.Scheme) { // WantsExternalKubeClientSet defines a function which sets external ClientSet for admission plugins that need it func (a *ValidatingAdmissionWebhook) SetExternalKubeClientSet(client clientset.Interface) { a.namespaceMatcher.Client = client - a.hookSource = configuration.NewValidatingWebhookConfigurationManager(client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations()) } // SetExternalKubeInformerFactory implements the WantsExternalKubeInformerFactory interface. @@ -149,12 +146,13 @@ func (a *ValidatingAdmissionWebhook) SetExternalKubeInformerFactory(f informers. namespaceInformer := f.Core().V1().Namespaces() a.namespaceMatcher.NamespaceLister = namespaceInformer.Lister() a.SetReadyFunc(namespaceInformer.Informer().HasSynced) + a.hookSource = configuration.NewValidatingWebhookConfigurationManager(f.Admissionregistration().V1beta1().ValidatingWebhookConfigurations()) } // ValidateInitialization implements the InitializationValidator interface. func (a *ValidatingAdmissionWebhook) ValidateInitialization() error { if a.hookSource == nil { - return fmt.Errorf("ValidatingAdmissionWebhook admission plugin requires a Kubernetes client to be provided") + return fmt.Errorf("ValidatingAdmissionWebhook admission plugin requires a Kubernetes informer to be provided") } if err := a.namespaceMatcher.Validate(); err != nil { return fmt.Errorf("ValidatingAdmissionWebhook.namespaceMatcher is not properly setup: %v", err) @@ -165,16 +163,11 @@ func (a *ValidatingAdmissionWebhook) ValidateInitialization() error { if err := a.convertor.Validate(); err != nil { return fmt.Errorf("ValidatingAdmissionWebhook.convertor is not properly setup: %v", err) } - go a.hookSource.Run(wait.NeverStop) return nil } func (a *ValidatingAdmissionWebhook) loadConfiguration(attr admission.Attributes) (*v1beta1.ValidatingWebhookConfiguration, error) { hookConfig, err := a.hookSource.Webhooks() - // if Webhook configuration is disabled, fail open - if err == configuration.ErrDisabled { - return &v1beta1.ValidatingWebhookConfiguration{}, nil - } if err != nil { e := apierrors.NewServerTimeout(attr.GetResource().GroupResource(), string(attr.GetOperation()), 1) e.ErrStatus.Message = fmt.Sprintf("Unable to refresh the Webhook configuration: %v", err) From ec3925978511cc6b844c5b479c9b30ae21a0136a Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Wed, 6 Dec 2017 11:06:04 +0800 Subject: [PATCH 11/53] add wait ready for mutating/validating webhook configuration --- .../configuration/mutating_webhook_manager.go | 16 +----- .../mutating_webhook_manager_test.go | 49 +++++++------------ .../validating_webhook_manager.go | 16 +----- .../validating_webhook_manager_test.go | 49 +++++++------------ .../admission/plugin/webhook/mutating/BUILD | 1 - .../plugin/webhook/mutating/admission.go | 33 +++++-------- .../plugin/webhook/mutating/admission_test.go | 6 +-- .../admission/plugin/webhook/validating/BUILD | 1 - .../plugin/webhook/validating/admission.go | 31 +++++------- .../webhook/validating/admission_test.go | 6 +-- 10 files changed, 72 insertions(+), 136 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go b/staging/src/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go index fbde83b7d82..3c0990699a4 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go @@ -31,17 +31,13 @@ import ( // MutatingWebhookConfigurationManager collects the mutating webhook objects so that they can be called. type MutatingWebhookConfigurationManager struct { - ready int32 configuration *atomic.Value - hasSynced func() bool lister admissionregistrationlisters.MutatingWebhookConfigurationLister } func NewMutatingWebhookConfigurationManager(informer admissionregistrationinformers.MutatingWebhookConfigurationInformer) *MutatingWebhookConfigurationManager { manager := &MutatingWebhookConfigurationManager{ - ready: 0, configuration: &atomic.Value{}, - hasSynced: informer.Informer().HasSynced, lister: informer.Lister(), } @@ -59,16 +55,8 @@ func NewMutatingWebhookConfigurationManager(informer admissionregistrationinform } // Webhooks returns the merged MutatingWebhookConfiguration. -func (m *MutatingWebhookConfigurationManager) Webhooks() (*v1beta1.MutatingWebhookConfiguration, error) { - if atomic.LoadInt32(&m.ready) == 0 { - if !m.hasSynced() { - // Return an error until we've synced - return nil, fmt.Errorf("mutating webhook configuration is not ready") - } - // Remember we're ready - atomic.StoreInt32(&m.ready, 1) - } - return m.configuration.Load().(*v1beta1.MutatingWebhookConfiguration), nil +func (m *MutatingWebhookConfigurationManager) Webhooks() *v1beta1.MutatingWebhookConfiguration { + return m.configuration.Load().(*v1beta1.MutatingWebhookConfiguration) } func (m *MutatingWebhookConfigurationManager) updateConfiguration() { diff --git a/staging/src/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager_test.go b/staging/src/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager_test.go index e6b50aa2320..d6f4f1a4512 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager_test.go @@ -43,7 +43,6 @@ func (f *fakeMutatingWebhookConfigSharedInformer) Lister() admissionregistration type fakeMutatingWebhookConfigInformer struct { eventHandler cache.ResourceEventHandler - hasSynced bool } func (f *fakeMutatingWebhookConfigInformer) AddEventHandler(handler cache.ResourceEventHandler) { @@ -63,7 +62,7 @@ func (f *fakeMutatingWebhookConfigInformer) Run(stopCh <-chan struct{}) { panic("unsupported") } func (f *fakeMutatingWebhookConfigInformer) HasSynced() bool { - return f.hasSynced + panic("unsupported") } func (f *fakeMutatingWebhookConfigInformer) LastSyncResourceVersion() string { panic("unsupported") @@ -92,43 +91,33 @@ func TestGetMutatingWebhookConfig(t *testing.T) { lister: &fakeMutatingWebhookConfigLister{}, } - // unsynced, error retrieving list - informer.informer.hasSynced = false + // no configurations informer.lister.list = nil - informer.lister.err = fmt.Errorf("mutating webhook configuration is not ready") manager := NewMutatingWebhookConfigurationManager(informer) - if _, err := manager.Webhooks(); err == nil { - t.Errorf("expected err, but got none") + if configurations := manager.Webhooks(); len(configurations.Webhooks) != 0 { + t.Errorf("expected empty webhooks, but got %v", configurations.Webhooks) } - // list found, still unsynced - informer.informer.hasSynced = false - informer.lister.list = []*v1beta1.MutatingWebhookConfiguration{} - informer.lister.err = nil - if _, err := manager.Webhooks(); err == nil { - t.Errorf("expected err, but got none") - } - - // items populated, still unsynced - webhookContainer := &v1beta1.MutatingWebhookConfiguration{ + // list err + webhookConfiguration := &v1beta1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: "webhook1"}, Webhooks: []v1beta1.Webhook{{Name: "webhook1.1"}}, } - informer.informer.hasSynced = false - informer.lister.list = []*v1beta1.MutatingWebhookConfiguration{webhookContainer.DeepCopy()} - informer.lister.err = nil - informer.informer.eventHandler.OnAdd(webhookContainer.DeepCopy()) - if _, err := manager.Webhooks(); err == nil { - t.Errorf("expected err, but got none") + informer.lister.list = []*v1beta1.MutatingWebhookConfiguration{webhookConfiguration.DeepCopy()} + informer.lister.err = fmt.Errorf("mutating webhook configuration list error") + informer.informer.eventHandler.OnAdd(webhookConfiguration.DeepCopy()) + if configurations := manager.Webhooks(); len(configurations.Webhooks) != 0 { + t.Errorf("expected empty webhooks, but got %v", configurations.Webhooks) } - // sync completed - informer.informer.hasSynced = true - hooks, err := manager.Webhooks() - if err != nil { - t.Errorf("unexpected err: %v", err) + // configuration populated + informer.lister.err = nil + informer.informer.eventHandler.OnAdd(webhookConfiguration.DeepCopy()) + configurations := manager.Webhooks() + if len(configurations.Webhooks) == 0 { + t.Errorf("expected non empty webhooks") } - if !reflect.DeepEqual(hooks.Webhooks, webhookContainer.Webhooks) { - t.Errorf("Expected\n%#v\ngot\n%#v", webhookContainer.Webhooks, hooks.Webhooks) + if !reflect.DeepEqual(configurations.Webhooks, webhookConfiguration.Webhooks) { + t.Errorf("Expected\n%#v\ngot\n%#v", webhookConfiguration.Webhooks, configurations.Webhooks) } } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go b/staging/src/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go index f93068b8037..33644f57fed 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go @@ -31,17 +31,13 @@ import ( // ValidatingWebhookConfigurationManager collects the validating webhook objects so that they can be called. type ValidatingWebhookConfigurationManager struct { - ready int32 configuration *atomic.Value - hasSynced func() bool lister admissionregistrationlisters.ValidatingWebhookConfigurationLister } func NewValidatingWebhookConfigurationManager(informer admissionregistrationinformers.ValidatingWebhookConfigurationInformer) *ValidatingWebhookConfigurationManager { manager := &ValidatingWebhookConfigurationManager{ - ready: 0, configuration: &atomic.Value{}, - hasSynced: informer.Informer().HasSynced, lister: informer.Lister(), } @@ -59,16 +55,8 @@ func NewValidatingWebhookConfigurationManager(informer admissionregistrationinfo } // Webhooks returns the merged ValidatingWebhookConfiguration. -func (v *ValidatingWebhookConfigurationManager) Webhooks() (*v1beta1.ValidatingWebhookConfiguration, error) { - if atomic.LoadInt32(&v.ready) == 0 { - if !v.hasSynced() { - // Return an error until we've synced - return nil, fmt.Errorf("validating webhook configuration is not ready") - } - // Remember we're ready - atomic.StoreInt32(&v.ready, 1) - } - return v.configuration.Load().(*v1beta1.ValidatingWebhookConfiguration), nil +func (v *ValidatingWebhookConfigurationManager) Webhooks() *v1beta1.ValidatingWebhookConfiguration { + return v.configuration.Load().(*v1beta1.ValidatingWebhookConfiguration) } func (v *ValidatingWebhookConfigurationManager) updateConfiguration() { diff --git a/staging/src/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager_test.go b/staging/src/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager_test.go index 929d7b2cfcf..6505b2b9b4b 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager_test.go @@ -43,7 +43,6 @@ func (f *fakeValidatingWebhookConfigSharedInformer) Lister() admissionregistrati type fakeValidatingWebhookConfigInformer struct { eventHandler cache.ResourceEventHandler - hasSynced bool } func (f *fakeValidatingWebhookConfigInformer) AddEventHandler(handler cache.ResourceEventHandler) { @@ -63,7 +62,7 @@ func (f *fakeValidatingWebhookConfigInformer) Run(stopCh <-chan struct{}) { panic("unsupported") } func (f *fakeValidatingWebhookConfigInformer) HasSynced() bool { - return f.hasSynced + panic("unsupported") } func (f *fakeValidatingWebhookConfigInformer) LastSyncResourceVersion() string { panic("unsupported") @@ -92,43 +91,33 @@ func TestGettValidatingWebhookConfig(t *testing.T) { lister: &fakeValidatingWebhookConfigLister{}, } - // unsynced, error retrieving list - informer.informer.hasSynced = false + // no configurations informer.lister.list = nil - informer.lister.err = fmt.Errorf("validating webhook configuration is not ready") manager := NewValidatingWebhookConfigurationManager(informer) - if _, err := manager.Webhooks(); err == nil { - t.Errorf("expected err, but got none") + if configurations := manager.Webhooks(); len(configurations.Webhooks) != 0 { + t.Errorf("expected empty webhooks, but got %v", configurations.Webhooks) } - // list found, still unsynced - informer.informer.hasSynced = false - informer.lister.list = []*v1beta1.ValidatingWebhookConfiguration{} - informer.lister.err = nil - if _, err := manager.Webhooks(); err == nil { - t.Errorf("expected err, but got none") - } - - // items populated, still unsynced - webhookContainer := &v1beta1.ValidatingWebhookConfiguration{ + // list error + webhookConfiguration := &v1beta1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: "webhook1"}, Webhooks: []v1beta1.Webhook{{Name: "webhook1.1"}}, } - informer.informer.hasSynced = false - informer.lister.list = []*v1beta1.ValidatingWebhookConfiguration{webhookContainer.DeepCopy()} - informer.lister.err = nil - informer.informer.eventHandler.OnAdd(webhookContainer.DeepCopy()) - if _, err := manager.Webhooks(); err == nil { - t.Errorf("expected err, but got none") + informer.lister.list = []*v1beta1.ValidatingWebhookConfiguration{webhookConfiguration.DeepCopy()} + informer.lister.err = fmt.Errorf("validating webhook configuration list error") + informer.informer.eventHandler.OnAdd(webhookConfiguration.DeepCopy()) + if configurations := manager.Webhooks(); len(configurations.Webhooks) != 0 { + t.Errorf("expected empty webhooks, but got %v", configurations.Webhooks) } - // sync completed - informer.informer.hasSynced = true - hooks, err := manager.Webhooks() - if err != nil { - t.Errorf("unexpected err: %v", err) + // configuration populated + informer.lister.err = nil + informer.informer.eventHandler.OnAdd(webhookConfiguration.DeepCopy()) + configurations := manager.Webhooks() + if len(configurations.Webhooks) == 0 { + t.Errorf("expected non empty webhooks") } - if !reflect.DeepEqual(hooks.Webhooks, webhookContainer.Webhooks) { - t.Errorf("Expected\n%#v\ngot\n%#v", webhookContainer.Webhooks, hooks.Webhooks) + if !reflect.DeepEqual(configurations.Webhooks, webhookConfiguration.Webhooks) { + t.Errorf("Expected\n%#v\ngot\n%#v", webhookConfiguration.Webhooks, configurations.Webhooks) } } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD index 0d46b5d7627..6ed322b2500 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD @@ -14,7 +14,6 @@ go_library( "//vendor/k8s.io/api/admission/v1beta1:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go index 0f50edf6a3e..73e213a6f10 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go @@ -30,7 +30,6 @@ import ( admissionv1beta1 "k8s.io/api/admission/v1beta1" "k8s.io/api/admissionregistration/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/runtime/serializer/json" @@ -68,7 +67,7 @@ func Register(plugins *admission.Plugins) { // WebhookSource can list dynamic webhook plugins. type WebhookSource interface { - Webhooks() (*v1beta1.MutatingWebhookConfiguration, error) + Webhooks() *v1beta1.MutatingWebhookConfiguration } // NewMutatingWebhook returns a generic admission webhook plugin. @@ -150,8 +149,11 @@ func (a *MutatingWebhook) SetExternalKubeClientSet(client clientset.Interface) { func (a *MutatingWebhook) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { namespaceInformer := f.Core().V1().Namespaces() a.namespaceMatcher.NamespaceLister = namespaceInformer.Lister() - a.SetReadyFunc(namespaceInformer.Informer().HasSynced) - a.hookSource = configuration.NewMutatingWebhookConfigurationManager(f.Admissionregistration().V1beta1().MutatingWebhookConfigurations()) + mutatingWebhookConfigurationsInformer := f.Admissionregistration().V1beta1().MutatingWebhookConfigurations() + a.hookSource = configuration.NewMutatingWebhookConfigurationManager(mutatingWebhookConfigurationsInformer) + a.SetReadyFunc(func() bool { + return namespaceInformer.Informer().HasSynced() && mutatingWebhookConfigurationsInformer.Informer().HasSynced() + }) } // ValidateInitialization implements the InitializationValidator interface. @@ -177,27 +179,18 @@ func (a *MutatingWebhook) ValidateInitialization() error { return nil } -func (a *MutatingWebhook) loadConfiguration(attr admission.Attributes) (*v1beta1.MutatingWebhookConfiguration, error) { - hookConfig, err := a.hookSource.Webhooks() - if err != nil { - e := apierrors.NewServerTimeout(attr.GetResource().GroupResource(), string(attr.GetOperation()), 1) - e.ErrStatus.Message = fmt.Sprintf("Unable to refresh the Webhook configuration: %v", err) - e.ErrStatus.Reason = "LoadingConfiguration" - e.ErrStatus.Details.Causes = append(e.ErrStatus.Details.Causes, metav1.StatusCause{ - Type: "MutatingWebhookConfigurationFailure", - Message: "An error has occurred while refreshing the MutatingWebhook configuration, no resources can be created/updated/deleted/connected until a refresh succeeds.", - }) - return nil, e - } - return hookConfig, nil +func (a *MutatingWebhook) loadConfiguration(attr admission.Attributes) *v1beta1.MutatingWebhookConfiguration { + hookConfig := a.hookSource.Webhooks() + return hookConfig } // Admit makes an admission decision based on the request attributes. func (a *MutatingWebhook) Admit(attr admission.Attributes) error { - hookConfig, err := a.loadConfiguration(attr) - if err != nil { - return err + if !a.WaitForReady() { + return admission.NewForbidden(attr, fmt.Errorf("not yet ready to handle request")) } + + hookConfig := a.loadConfiguration(attr) hooks := hookConfig.Webhooks ctx := context.TODO() diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission_test.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission_test.go index 9f92fad1126..523e03762d0 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission_test.go @@ -47,16 +47,16 @@ type fakeHookSource struct { err error } -func (f *fakeHookSource) Webhooks() (*registrationv1beta1.MutatingWebhookConfiguration, error) { +func (f *fakeHookSource) Webhooks() *registrationv1beta1.MutatingWebhookConfiguration { if f.err != nil { - return nil, f.err + return nil } for i, h := range f.hooks { if h.NamespaceSelector == nil { f.hooks[i].NamespaceSelector = &metav1.LabelSelector{} } } - return ®istrationv1beta1.MutatingWebhookConfiguration{Webhooks: f.hooks}, nil + return ®istrationv1beta1.MutatingWebhookConfiguration{Webhooks: f.hooks} } func (f *fakeHookSource) Run(stopCh <-chan struct{}) {} diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD index 4226a13912c..a3b6a155937 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD @@ -13,7 +13,6 @@ go_library( "//vendor/k8s.io/api/admission/v1beta1:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go index f14d65083de..8e8c7448fcb 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go @@ -30,7 +30,6 @@ import ( admissionv1beta1 "k8s.io/api/admission/v1beta1" "k8s.io/api/admissionregistration/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -67,7 +66,7 @@ func Register(plugins *admission.Plugins) { // WebhookSource can list dynamic webhook plugins. type WebhookSource interface { - Webhooks() (*v1beta1.ValidatingWebhookConfiguration, error) + Webhooks() *v1beta1.ValidatingWebhookConfiguration } // NewValidatingAdmissionWebhook returns a generic admission webhook plugin. @@ -145,8 +144,11 @@ func (a *ValidatingAdmissionWebhook) SetExternalKubeClientSet(client clientset.I func (a *ValidatingAdmissionWebhook) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { namespaceInformer := f.Core().V1().Namespaces() a.namespaceMatcher.NamespaceLister = namespaceInformer.Lister() - a.SetReadyFunc(namespaceInformer.Informer().HasSynced) - a.hookSource = configuration.NewValidatingWebhookConfigurationManager(f.Admissionregistration().V1beta1().ValidatingWebhookConfigurations()) + validatingWebhookConfigurationsInformer := f.Admissionregistration().V1beta1().ValidatingWebhookConfigurations() + a.hookSource = configuration.NewValidatingWebhookConfigurationManager(validatingWebhookConfigurationsInformer) + a.SetReadyFunc(func() bool { + return namespaceInformer.Informer().HasSynced() && validatingWebhookConfigurationsInformer.Informer().HasSynced() + }) } // ValidateInitialization implements the InitializationValidator interface. @@ -166,27 +168,16 @@ func (a *ValidatingAdmissionWebhook) ValidateInitialization() error { return nil } -func (a *ValidatingAdmissionWebhook) loadConfiguration(attr admission.Attributes) (*v1beta1.ValidatingWebhookConfiguration, error) { - hookConfig, err := a.hookSource.Webhooks() - if err != nil { - e := apierrors.NewServerTimeout(attr.GetResource().GroupResource(), string(attr.GetOperation()), 1) - e.ErrStatus.Message = fmt.Sprintf("Unable to refresh the Webhook configuration: %v", err) - e.ErrStatus.Reason = "LoadingConfiguration" - e.ErrStatus.Details.Causes = append(e.ErrStatus.Details.Causes, metav1.StatusCause{ - Type: "ValidatingWebhookConfigurationFailure", - Message: "An error has occurred while refreshing the ValidatingWebhook configuration, no resources can be created/updated/deleted/connected until a refresh succeeds.", - }) - return nil, e - } - return hookConfig, nil +func (a *ValidatingAdmissionWebhook) loadConfiguration(attr admission.Attributes) *v1beta1.ValidatingWebhookConfiguration { + return a.hookSource.Webhooks() } // Validate makes an admission decision based on the request attributes. func (a *ValidatingAdmissionWebhook) Validate(attr admission.Attributes) error { - hookConfig, err := a.loadConfiguration(attr) - if err != nil { - return err + if !a.WaitForReady() { + return admission.NewForbidden(attr, fmt.Errorf("not yet ready to handle request")) } + hookConfig := a.loadConfiguration(attr) hooks := hookConfig.Webhooks ctx := context.TODO() diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission_test.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission_test.go index 9a190f41239..77871f01942 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission_test.go @@ -47,16 +47,16 @@ type fakeHookSource struct { err error } -func (f *fakeHookSource) Webhooks() (*registrationv1beta1.ValidatingWebhookConfiguration, error) { +func (f *fakeHookSource) Webhooks() *registrationv1beta1.ValidatingWebhookConfiguration { if f.err != nil { - return nil, f.err + return nil } for i, h := range f.hooks { if h.NamespaceSelector == nil { f.hooks[i].NamespaceSelector = &metav1.LabelSelector{} } } - return ®istrationv1beta1.ValidatingWebhookConfiguration{Webhooks: f.hooks}, nil + return ®istrationv1beta1.ValidatingWebhookConfiguration{Webhooks: f.hooks} } func (f *fakeHookSource) Run(stopCh <-chan struct{}) {} From ea7a71301009fb3e0426ea93f070c27538e59f86 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Wed, 29 Nov 2017 23:28:53 +0800 Subject: [PATCH 12/53] run update bazel staging-dep --- staging/src/k8s.io/apiserver/Godeps/Godeps.json | 8 ++++++++ .../k8s.io/apiserver/pkg/admission/configuration/BUILD | 8 ++++++++ .../apiserver/pkg/admission/plugin/webhook/mutating/BUILD | 1 - .../pkg/admission/plugin/webhook/validating/BUILD | 1 - 4 files changed, 16 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index 18797d5b362..df8ae10e9f3 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -1790,6 +1790,10 @@ "ImportPath": "k8s.io/client-go/informers", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/informers/admissionregistration/v1beta1", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -1814,6 +1818,10 @@ "ImportPath": "k8s.io/client-go/kubernetes/typed/core/v1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/listers/admissionregistration/v1beta1", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/listers/core/v1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" diff --git a/staging/src/k8s.io/apiserver/pkg/admission/configuration/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/configuration/BUILD index c892344c3e7..13a7afbd781 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/configuration/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/configuration/BUILD @@ -21,9 +21,12 @@ go_test( "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/listers/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], ) @@ -42,8 +45,13 @@ go_library( "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/informers/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/listers/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD index 6ed322b2500..85bc608d792 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD @@ -18,7 +18,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/configuration:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/initializer:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD index a3b6a155937..e4bf9ced2d6 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD @@ -16,7 +16,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/configuration:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/initializer:go_default_library", From 6e0a52e7ff7ba73557400a0a4b8a1c7209516ebe Mon Sep 17 00:00:00 2001 From: Derek Carr Date: Wed, 7 Feb 2018 17:01:18 -0500 Subject: [PATCH 13/53] Add node e2e to verify hugepages feature --- test/e2e_node/BUILD | 1 + test/e2e_node/hugepages_test.go | 248 ++++++++++++++++++++++++++++++++ 2 files changed, 249 insertions(+) create mode 100644 test/e2e_node/hugepages_test.go diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index c363c42ab0d..4de856da166 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -92,6 +92,7 @@ go_test( "eviction_test.go", "garbage_collector_test.go", "gke_environment_test.go", + "hugepages_test.go", "image_id_test.go", "kubelet_test.go", "lifecycle_hook_test.go", diff --git a/test/e2e_node/hugepages_test.go b/test/e2e_node/hugepages_test.go new file mode 100644 index 00000000000..a8683dc3cda --- /dev/null +++ b/test/e2e_node/hugepages_test.go @@ -0,0 +1,248 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e_node + +import ( + "fmt" + "os/exec" + "path" + "strconv" + "strings" + "time" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + + "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" + "k8s.io/kubernetes/pkg/kubelet/cm" + "k8s.io/kubernetes/test/e2e/framework" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +// makePodToVerifyHugePages returns a pod that verifies specified cgroup with hugetlb +func makePodToVerifyHugePages(cgroupName cm.CgroupName, hugePagesLimit resource.Quantity) *apiv1.Pod { + // convert the cgroup name to its literal form + cgroupFsName := "" + cgroupName = cm.CgroupName(path.Join(defaultNodeAllocatableCgroup, string(cgroupName))) + if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" { + cgroupFsName = cm.ConvertCgroupNameToSystemd(cgroupName, true) + } else { + cgroupFsName = string(cgroupName) + } + + // this command takes the expected value and compares it against the actual value for the pod cgroup hugetlb.2MB.limit_in_bytes + command := fmt.Sprintf("expected=%v; actual=$(cat /tmp/hugetlb/%v/hugetlb.2MB.limit_in_bytes); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", hugePagesLimit.Value(), cgroupFsName) + framework.Logf("Pod to run command: %v", command) + pod := &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod" + string(uuid.NewUUID()), + }, + Spec: apiv1.PodSpec{ + RestartPolicy: apiv1.RestartPolicyNever, + Containers: []apiv1.Container{ + { + Image: busyboxImage, + Name: "container" + string(uuid.NewUUID()), + Command: []string{"sh", "-c", command}, + VolumeMounts: []apiv1.VolumeMount{ + { + Name: "sysfscgroup", + MountPath: "/tmp", + }, + }, + }, + }, + Volumes: []apiv1.Volume{ + { + Name: "sysfscgroup", + VolumeSource: apiv1.VolumeSource{ + HostPath: &apiv1.HostPathVolumeSource{Path: "/sys/fs/cgroup"}, + }, + }, + }, + }, + } + return pod +} + +// enableHugePagesInKubelet enables hugepages feature for kubelet +func enableHugePagesInKubelet(f *framework.Framework) *kubeletconfig.KubeletConfiguration { + oldCfg, err := getCurrentKubeletConfig() + framework.ExpectNoError(err) + newCfg := oldCfg.DeepCopy() + if newCfg.FeatureGates == nil { + newCfg.FeatureGates = make(map[string]bool) + newCfg.FeatureGates["HugePages"] = true + } + + // Update the Kubelet configuration. + framework.ExpectNoError(setKubeletConfiguration(f, newCfg)) + + // Wait for the Kubelet to be ready. + Eventually(func() bool { + nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + return len(nodeList.Items) == 1 + }, time.Minute, time.Second).Should(BeTrue()) + + return oldCfg +} + +// configureHugePages attempts to allocate 100Mi of 2Mi hugepages for testing purposes +func configureHugePages() error { + err := exec.Command("/bin/sh", "-c", "echo 50 > /proc/sys/vm/nr_hugepages").Run() + if err != nil { + return err + } + outData, err := exec.Command("/bin/sh", "-c", "cat /proc/meminfo | grep 'HugePages_Total' | awk '{print $2}'").Output() + if err != nil { + return err + } + numHugePages, err := strconv.Atoi(strings.TrimSpace(string(outData))) + if err != nil { + return err + } + framework.Logf("HugePages_Total is set to %v", numHugePages) + if numHugePages == 50 { + return nil + } + return fmt.Errorf("expected hugepages %v, but found %v", 50, numHugePages) +} + +// releaseHugePages releases all pre-allocated hugepages +func releaseHugePages() error { + return exec.Command("/bin/sh", "-c", "echo 0 > /proc/sys/vm/nr_hugepages").Run() +} + +// isHugePageSupported returns true if the default hugepagesize on host is 2Mi (i.e. 2048 kB) +func isHugePageSupported() bool { + outData, err := exec.Command("/bin/sh", "-c", "cat /proc/meminfo | grep 'Hugepagesize:' | awk '{print $2}'").Output() + framework.ExpectNoError(err) + pageSize, err := strconv.Atoi(strings.TrimSpace(string(outData))) + framework.ExpectNoError(err) + return pageSize == 2048 +} + +// pollResourceAsString polls for a specified resource and capacity from node +func pollResourceAsString(f *framework.Framework, resourceName string) string { + node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + amount := amountOfResourceAsString(node, resourceName) + framework.Logf("amount of %v: %v", resourceName, amount) + return amount +} + +// amountOfResourceAsString returns the amount of resourceName advertised by a node +func amountOfResourceAsString(node *apiv1.Node, resourceName string) string { + val, ok := node.Status.Capacity[apiv1.ResourceName(resourceName)] + if !ok { + return "" + } + return val.String() +} + +func runHugePagesTests(f *framework.Framework) { + It("should assign hugepages as expected based on the Pod spec", func() { + By("by running a G pod that requests hugepages") + pod := f.PodClient().Create(&apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod" + string(uuid.NewUUID()), + Namespace: f.Namespace.Name, + }, + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Image: framework.GetPauseImageName(f.ClientSet), + Name: "container" + string(uuid.NewUUID()), + Resources: apiv1.ResourceRequirements{ + Limits: apiv1.ResourceList{ + apiv1.ResourceName("cpu"): resource.MustParse("10m"), + apiv1.ResourceName("memory"): resource.MustParse("100Mi"), + apiv1.ResourceName("hugepages-2Mi"): resource.MustParse("50Mi"), + }, + }, + }, + }, + }, + }) + podUID := string(pod.UID) + By("checking if the expected hugetlb settings were applied") + verifyPod := makePodToVerifyHugePages(cm.CgroupName("pod"+podUID), resource.MustParse("50Mi")) + f.PodClient().Create(verifyPod) + err := framework.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name) + Expect(err).NotTo(HaveOccurred()) + }) +} + +// Serial because the test updates kubelet configuration. +var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages]", func() { + f := framework.NewDefaultFramework("hugepages-test") + + Context("With config updated with hugepages feature enabled", func() { + var oldCfg *kubeletconfig.KubeletConfiguration + + BeforeEach(func() { + By("verifying hugepages are supported") + if !isHugePageSupported() { + framework.Skipf("skipping test because hugepages are not supported") + return + } + By("configuring the host to reserve a number of pre-allocated hugepages") + Eventually(func() error { + err := configureHugePages() + if err != nil { + return err + } + return nil + }, 30*time.Second, framework.Poll).Should(BeNil()) + By("enabling hugepages in kubelet") + oldCfg = enableHugePagesInKubelet(f) + By("restarting kubelet to pick up pre-allocated hugepages") + restartKubelet() + By("by waiting for hugepages resource to become available on the local node") + Eventually(func() string { + return pollResourceAsString(f, "hugepages-2Mi") + }, 30*time.Second, framework.Poll).Should(Equal("100Mi")) + }) + + runHugePagesTests(f) + + AfterEach(func() { + By("Releasing hugepages") + Eventually(func() error { + err := releaseHugePages() + if err != nil { + return err + } + return nil + }, 30*time.Second, framework.Poll).Should(BeNil()) + if oldCfg != nil { + By("Restoring old kubelet config") + setOldKubeletConfig(f, oldCfg) + } + By("restarting kubelet to release hugepages") + restartKubelet() + By("by waiting for hugepages resource to not appear available on the local node") + Eventually(func() string { + return pollResourceAsString(f, "hugepages-2Mi") + }, 30*time.Second, framework.Poll).Should(Equal("0")) + }) + }) +}) From e155582662f80a491d84428c902a4fc78b93f345 Mon Sep 17 00:00:00 2001 From: tossmilestone Date: Thu, 8 Feb 2018 10:25:49 +0800 Subject: [PATCH 14/53] Avoid race condition when updating equivalence cache. --- pkg/scheduler/core/equivalence_cache.go | 15 +++++++---- pkg/scheduler/core/equivalence_cache_test.go | 5 ++++ pkg/scheduler/core/generic_scheduler.go | 26 +++++++++----------- 3 files changed, 27 insertions(+), 19 deletions(-) diff --git a/pkg/scheduler/core/equivalence_cache.go b/pkg/scheduler/core/equivalence_cache.go index cd44e66ed9f..fef04b94121 100644 --- a/pkg/scheduler/core/equivalence_cache.go +++ b/pkg/scheduler/core/equivalence_cache.go @@ -75,9 +75,12 @@ func (ec *EquivalenceCache) UpdateCachedPredicateItem( fit bool, reasons []algorithm.PredicateFailureReason, equivalenceHash uint64, + needLock bool, ) { - ec.Lock() - defer ec.Unlock() + if needLock { + ec.Lock() + defer ec.Unlock() + } if _, exist := ec.algorithmCache[nodeName]; !exist { ec.algorithmCache[nodeName] = newAlgorithmCache() } @@ -106,10 +109,12 @@ func (ec *EquivalenceCache) UpdateCachedPredicateItem( // based on cached predicate results func (ec *EquivalenceCache) PredicateWithECache( podName, nodeName, predicateKey string, - equivalenceHash uint64, + equivalenceHash uint64, needLock bool, ) (bool, []algorithm.PredicateFailureReason, bool) { - ec.RLock() - defer ec.RUnlock() + if needLock { + ec.RLock() + defer ec.RUnlock() + } glog.V(5).Infof("Begin to calculate predicate: %v for pod: %s on node: %s based on equivalence cache", predicateKey, podName, nodeName) if algorithmCache, exist := ec.algorithmCache[nodeName]; exist { diff --git a/pkg/scheduler/core/equivalence_cache_test.go b/pkg/scheduler/core/equivalence_cache_test.go index 35d7592761e..fcb2c9455a7 100644 --- a/pkg/scheduler/core/equivalence_cache_test.go +++ b/pkg/scheduler/core/equivalence_cache_test.go @@ -90,6 +90,7 @@ func TestUpdateCachedPredicateItem(t *testing.T) { test.fit, test.reasons, test.equivalenceHash, + true, ) value, ok := ecache.algorithmCache[test.nodeName].predicatesCache.Get(test.predicateKey) @@ -201,6 +202,7 @@ func TestPredicateWithECache(t *testing.T) { test.cachedItem.fit, test.cachedItem.reasons, test.equivalenceHashForUpdatePredicate, + true, ) // if we want to do invalid, invalid the cached item if test.expectedInvalidPredicateKey { @@ -213,6 +215,7 @@ func TestPredicateWithECache(t *testing.T) { test.nodeName, test.predicateKey, test.equivalenceHashForCalPredicate, + true, ) // returned invalid should match expectedInvalidPredicateKey or expectedInvalidEquivalenceHash if test.equivalenceHashForUpdatePredicate != test.equivalenceHashForCalPredicate { @@ -564,6 +567,7 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) { test.cachedItem.fit, test.cachedItem.reasons, test.equivalenceHashForUpdatePredicate, + true, ) } @@ -632,6 +636,7 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) { test.cachedItem.fit, test.cachedItem.reasons, test.equivalenceHashForUpdatePredicate, + true, ) } diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index 017d1505dc5..2afb4236632 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -108,7 +108,7 @@ type genericScheduler struct { // Schedule tries to schedule the given pod to one of node in the node list. // If it succeeds, it will return the name of the node. -// If it fails, it will return a Fiterror error with reasons. +// If it fails, it will return a FitError error with reasons. func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister) (string, error) { trace := utiltrace.New(fmt.Sprintf("Scheduling %s/%s", pod.Namespace, pod.Name)) defer trace.LogIfLong(100 * time.Millisecond) @@ -469,8 +469,11 @@ func podFitsOnNode( //TODO (yastij) : compute average predicate restrictiveness to export it as Prometheus metric if predicate, exist := predicateFuncs[predicateKey]; exist { if eCacheAvailable { + // Lock ecache here to avoid a race condition against cache invalidation invoked + // in event handlers. This race has existed despite locks in eCache implementation. + ecache.Lock() // PredicateWithECache will return its cached predicate results. - fit, reasons, invalid = ecache.PredicateWithECache(pod.GetName(), info.Node().GetName(), predicateKey, equivCacheInfo.hash) + fit, reasons, invalid = ecache.PredicateWithECache(pod.GetName(), info.Node().GetName(), predicateKey, equivCacheInfo.hash, false) } if !eCacheAvailable || invalid { @@ -488,8 +491,15 @@ func podFitsOnNode( } else { predicateResults[predicateKey] = HostPredicate{Fit: fit, FailReasons: reasons} } + result := predicateResults[predicateKey] + ecache.UpdateCachedPredicateItem(pod.GetName(), info.Node().GetName(), predicateKey, result.Fit, result.FailReasons, equivCacheInfo.hash, false) } } + + if eCacheAvailable { + ecache.Unlock() + } + if !fit { // eCache is available and valid, and predicates result is unfit, record the fail reasons failedPredicates = append(failedPredicates, reasons...) @@ -503,18 +513,6 @@ func podFitsOnNode( } } - // TODO(bsalamat): This way of updating equiv. cache has a race condition against - // cache invalidations invoked in event handlers. This race has existed despite locks - // in eCache implementation. If cache is invalidated after a predicate is executed - // and before we update the cache, the updates should not be written to the cache. - if eCacheAvailable { - nodeName := info.Node().GetName() - for predKey, result := range predicateResults { - // update equivalence cache with newly computed fit & reasons - // TODO(resouer) should we do this in another thread? any race? - ecache.UpdateCachedPredicateItem(pod.GetName(), nodeName, predKey, result.Fit, result.FailReasons, equivCacheInfo.hash) - } - } return len(failedPredicates) == 0, failedPredicates, nil } From 9e85b526cb4814589dd8e0e9e5777c2ff82a3cb2 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Sun, 10 Dec 2017 15:11:00 +0800 Subject: [PATCH 15/53] proxy service part changes --- pkg/proxy/service.go | 224 ++++++++++++++++ pkg/proxy/service_test.go | 531 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 755 insertions(+) create mode 100644 pkg/proxy/service.go create mode 100644 pkg/proxy/service_test.go diff --git a/pkg/proxy/service.go b/pkg/proxy/service.go new file mode 100644 index 00000000000..82407ab1970 --- /dev/null +++ b/pkg/proxy/service.go @@ -0,0 +1,224 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "reflect" + "sync" + + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + api "k8s.io/kubernetes/pkg/apis/core" + proxyutil "k8s.io/kubernetes/pkg/proxy/util" +) + +// serviceChange contains all changes to services that happened since proxy rules were synced. For a single object, +// changes are accumulated, i.e. previous is state from before applying the changes, +// current is state after applying all of the changes. +type serviceChange struct { + previous ServiceMap + current ServiceMap +} + +// ServiceChangeTracker carries state about uncommitted changes to an arbitrary number of +// Services, keyed by their namespace and name. +type ServiceChangeTracker struct { + // lock protects items. + lock sync.Mutex + // items maps a service to its serviceChange. + items map[types.NamespacedName]*serviceChange +} + +// NewServiceChangeTracker initializes a ServiceChangeTracker +func NewServiceChangeTracker() *ServiceChangeTracker { + return &ServiceChangeTracker{ + items: make(map[types.NamespacedName]*serviceChange), + } +} + +// Update updates given service's change map based on the service pair. It returns true if items changed, +// otherwise return false. Update can be used to add/update/delete items of ServiceChangeMap. For example, +// Add item +// - pass as the pair. +// Update item +// - pass as the pair. +// Delete item +// - pass as the pair. +// +// makeServicePort() return a proxy.ServicePort based on the given Service and its ServicePort. We inject makeServicePort() +// so that giving caller side a chance to initialize proxy.ServicePort interface. +func (sct *ServiceChangeTracker) Update(previous, current *api.Service, makeServicePort func(servicePort *api.ServicePort, service *api.Service) ServicePort) bool { + svc := current + if svc == nil { + svc = previous + } + // previous == nil && current == nil is unexpected, we should return false directly. + if svc == nil { + return false + } + namespacedName := types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name} + + sct.lock.Lock() + defer sct.lock.Unlock() + + change, exists := sct.items[namespacedName] + if !exists { + change = &serviceChange{} + change.previous = serviceToServiceMap(previous, makeServicePort) + sct.items[namespacedName] = change + } + change.current = serviceToServiceMap(current, makeServicePort) + // if change.previous equal to change.current, it means no change + if reflect.DeepEqual(change.previous, change.current) { + delete(sct.items, namespacedName) + } + return len(sct.items) > 0 +} + +// UpdateServiceMapResult is the updated results after applying service changes. +type UpdateServiceMapResult struct { + // HCServiceNodePorts is a map of Service names to node port numbers which indicate the health of that Service on this Node. + // The value(uint16) of HCServices map is the service health check node port. + HCServiceNodePorts map[types.NamespacedName]uint16 + // UDPStaleClusterIP holds stale (no longer assigned to a Service) Service IPs that had UDP ports. + // Callers can use this to abort timeout-waits or clear connection-tracking information. + UDPStaleClusterIP sets.String +} + +// UpdateServiceMap updates ServiceMap based on the given changes. +func UpdateServiceMap(serviceMap ServiceMap, changes *ServiceChangeTracker) (result UpdateServiceMapResult) { + result.UDPStaleClusterIP = sets.NewString() + serviceMap.apply(changes, result.UDPStaleClusterIP) + + // TODO: If this will appear to be computationally expensive, consider + // computing this incrementally similarly to serviceMap. + result.HCServiceNodePorts = make(map[types.NamespacedName]uint16) + for svcPortName, info := range serviceMap { + if info.HealthCheckNodePort() != 0 { + result.HCServiceNodePorts[svcPortName.NamespacedName] = uint16(info.HealthCheckNodePort()) + } + } + + return result +} + +// ServiceMap maps a service to its ServicePort information. +type ServiceMap map[ServicePortName]ServicePort + +// serviceToServiceMap translates a single Service object to a ServiceMap. +// makeServicePort() return a proxy.ServicePort based on the given Service and its ServicePort. We inject makeServicePort() +// so that giving caller side a chance to initialize proxy.ServicePort interface. +// +// NOTE: service object should NOT be modified. +func serviceToServiceMap(service *api.Service, makeServicePort func(servicePort *api.ServicePort, service *api.Service) ServicePort) ServiceMap { + if service == nil { + return nil + } + svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} + if proxyutil.ShouldSkipService(svcName, service) { + return nil + } + + serviceMap := make(ServiceMap) + for i := range service.Spec.Ports { + servicePort := &service.Spec.Ports[i] + svcPortName := ServicePortName{NamespacedName: svcName, Port: servicePort.Name} + serviceMap[svcPortName] = makeServicePort(servicePort, service) + } + return serviceMap +} + +// apply the changes to ServiceMap and update the stale udp cluster IP set. The UDPStaleClusterIP argument is passed in to store the +// udp protocol service cluster ip when service is deleted from the ServiceMap. +func (serviceMap *ServiceMap) apply(changes *ServiceChangeTracker, UDPStaleClusterIP sets.String) { + changes.lock.Lock() + defer changes.lock.Unlock() + for _, change := range changes.items { + serviceMap.merge(change.current) + // filter out the Update event of current changes from previous changes before calling unmerge() so that can + // skip deleting the Update events. + change.previous.filter(change.current) + serviceMap.unmerge(change.previous, UDPStaleClusterIP) + } + // clear changes after applying them to ServiceMap. + changes.items = make(map[types.NamespacedName]*serviceChange) + return +} + +// merge adds other ServiceMap's elements to current ServiceMap. +// If collision, other ALWAYS win. Otherwise add the other to current. +// In other words, if some elements in current collisions with other, update the current by other. +// It returns a string type set which stores all the newly merged services' identifier, ServicePortName.String(), to help users +// tell if a service is deleted or updated. +// The returned value is one of the arguments of ServiceMap.unmerge(). +// ServiceMap A Merge ServiceMap B will do following 2 things: +// * update ServiceMap A. +// * produce a string set which stores all other ServiceMap's ServicePortName.String(). +// For example, +// - A{} +// - B{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} +// - A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} +// - produce string set {"ns/cluster-ip:http"} +// - A{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 345, "UDP"}} +// - B{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} +// - A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} +// - produce string set {"ns/cluster-ip:http"} +func (sm *ServiceMap) merge(other ServiceMap) sets.String { + // existingPorts is going to store all identifiers of all services in `other` ServiceMap. + existingPorts := sets.NewString() + for svcPortName, info := range other { + // Take ServicePortName.String() as the newly merged service's identifier and put it into existingPorts. + existingPorts.Insert(svcPortName.String()) + _, exists := (*sm)[svcPortName] + if !exists { + glog.V(1).Infof("Adding new service port %q at %s", svcPortName, info.String()) + } else { + glog.V(1).Infof("Updating existing service port %q at %s", svcPortName, info.String()) + } + (*sm)[svcPortName] = info + } + return existingPorts +} + +// filter filters out elements from ServiceMap base on given ports string sets. +func (sm *ServiceMap) filter(other ServiceMap) { + for svcPortName := range *sm { + // skip the delete for Update event. + if _, ok := other[svcPortName]; ok { + delete(*sm, svcPortName) + } + } +} + +// unmerge deletes all other ServiceMap's elements from current ServiceMap. We pass in the UDPStaleClusterIP strings sets +// for storing the stale udp service cluster IPs. We will clear stale udp connection base on UDPStaleClusterIP later +func (sm *ServiceMap) unmerge(other ServiceMap, UDPStaleClusterIP sets.String) { + for svcPortName := range other { + info, exists := (*sm)[svcPortName] + if exists { + glog.V(1).Infof("Removing service port %q", svcPortName) + if info.Protocol() == api.ProtocolUDP { + UDPStaleClusterIP.Insert(info.ClusterIP()) + } + delete(*sm, svcPortName) + } else { + glog.Errorf("Service port %q doesn't exists", svcPortName) + } + } +} diff --git a/pkg/proxy/service_test.go b/pkg/proxy/service_test.go new file mode 100644 index 00000000000..ed2ad7d556e --- /dev/null +++ b/pkg/proxy/service_test.go @@ -0,0 +1,531 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "fmt" + "net" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + apiservice "k8s.io/kubernetes/pkg/api/service" + api "k8s.io/kubernetes/pkg/apis/core" +) + +const testHostname = "test-hostname" + +// fake implementation for service info. +type fakeServiceInfo struct { + clusterIP net.IP + port int + protocol api.Protocol + healthCheckNodePort int +} + +func (f *fakeServiceInfo) String() string { + return fmt.Sprintf("%s:%d/%s", f.clusterIP, f.port, f.protocol) +} + +func (f *fakeServiceInfo) ClusterIP() string { + return f.clusterIP.String() +} + +func (f *fakeServiceInfo) Protocol() api.Protocol { + return f.protocol +} + +func (f *fakeServiceInfo) HealthCheckNodePort() int { + return f.healthCheckNodePort +} + +func makeTestServiceInfo(clusterIP string, port int, protocol string, healthcheckNodePort int) *fakeServiceInfo { + info := &fakeServiceInfo{ + clusterIP: net.ParseIP(clusterIP), + port: port, + protocol: api.Protocol(protocol), + } + if healthcheckNodePort != 0 { + info.healthCheckNodePort = healthcheckNodePort + } + return info +} + +func newFakeServiceInfo(servicePort *api.ServicePort, service *api.Service) ServicePort { + info := &fakeServiceInfo{ + clusterIP: net.ParseIP(service.Spec.ClusterIP), + port: int(servicePort.Port), + protocol: servicePort.Protocol, + } + if apiservice.NeedsHealthCheck(service) { + p := service.Spec.HealthCheckNodePort + if p != 0 { + info.healthCheckNodePort = int(p) + } + } + return info +} + +func makeTestService(namespace, name string, svcFunc func(*api.Service)) *api.Service { + svc := &api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: map[string]string{}, + }, + Spec: api.ServiceSpec{}, + Status: api.ServiceStatus{}, + } + svcFunc(svc) + return svc +} + +func addTestPort(array []api.ServicePort, name string, protocol api.Protocol, port, nodeport int32, targetPort int) []api.ServicePort { + svcPort := api.ServicePort{ + Name: name, + Protocol: protocol, + Port: port, + NodePort: nodeport, + TargetPort: intstr.FromInt(targetPort), + } + return append(array, svcPort) +} + +func makeNSN(namespace, name string) types.NamespacedName { + return types.NamespacedName{Namespace: namespace, Name: name} +} + +func makeServicePortName(ns, name, port string) ServicePortName { + return ServicePortName{ + NamespacedName: makeNSN(ns, name), + Port: port, + } +} + +func Test_serviceToServiceMap(t *testing.T) { + testCases := []struct { + service *api.Service + expected map[ServicePortName]*fakeServiceInfo + }{ + { + // Case[0]: nothing + service: nil, + expected: map[ServicePortName]*fakeServiceInfo{}, + }, + { + // Case[1]: headless service + service: makeTestService("ns2", "headless", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeClusterIP + svc.Spec.ClusterIP = api.ClusterIPNone + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0) + }), + expected: map[ServicePortName]*fakeServiceInfo{}, + }, + { + // Case[2]: headless service without port + service: makeTestService("ns2", "headless-without-port", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeClusterIP + svc.Spec.ClusterIP = api.ClusterIPNone + }), + expected: map[ServicePortName]*fakeServiceInfo{}, + }, + { + // Case[3]: cluster ip service + service: makeTestService("ns2", "cluster-ip", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeClusterIP + svc.Spec.ClusterIP = "172.16.55.4" + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p1", "UDP", 1234, 4321, 0) + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p2", "UDP", 1235, 5321, 0) + }), + expected: map[ServicePortName]*fakeServiceInfo{ + makeServicePortName("ns2", "cluster-ip", "p1"): makeTestServiceInfo("172.16.55.4", 1234, "UDP", 0), + makeServicePortName("ns2", "cluster-ip", "p2"): makeTestServiceInfo("172.16.55.4", 1235, "UDP", 0), + }, + }, + { + // Case[4]: nodeport service + service: makeTestService("ns2", "node-port", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeNodePort + svc.Spec.ClusterIP = "172.16.55.10" + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port1", "UDP", 345, 678, 0) + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port2", "TCP", 344, 677, 0) + }), + expected: map[ServicePortName]*fakeServiceInfo{ + makeServicePortName("ns2", "node-port", "port1"): makeTestServiceInfo("172.16.55.10", 345, "UDP", 0), + makeServicePortName("ns2", "node-port", "port2"): makeTestServiceInfo("172.16.55.10", 344, "TCP", 0), + }, + }, + { + // Case[5]: load balancer service + service: makeTestService("ns1", "load-balancer", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeLoadBalancer + svc.Spec.ClusterIP = "172.16.55.11" + svc.Spec.LoadBalancerIP = "5.6.7.8" + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port3", "UDP", 8675, 30061, 7000) + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port4", "UDP", 8676, 30062, 7001) + svc.Status.LoadBalancer = api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + {IP: "10.1.2.4"}, + }, + } + }), + expected: map[ServicePortName]*fakeServiceInfo{ + makeServicePortName("ns1", "load-balancer", "port3"): makeTestServiceInfo("172.16.55.11", 8675, "UDP", 0), + makeServicePortName("ns1", "load-balancer", "port4"): makeTestServiceInfo("172.16.55.11", 8676, "UDP", 0), + }, + }, + { + // Case[6]: load balancer service with only local traffic policy + service: makeTestService("ns1", "only-local-load-balancer", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeLoadBalancer + svc.Spec.ClusterIP = "172.16.55.12" + svc.Spec.LoadBalancerIP = "5.6.7.8" + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "portx", "UDP", 8677, 30063, 7002) + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "porty", "UDP", 8678, 30064, 7003) + svc.Status.LoadBalancer = api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + {IP: "10.1.2.3"}, + }, + } + svc.Spec.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyTypeLocal + svc.Spec.HealthCheckNodePort = 345 + }), + expected: map[ServicePortName]*fakeServiceInfo{ + makeServicePortName("ns1", "only-local-load-balancer", "portx"): makeTestServiceInfo("172.16.55.12", 8677, "UDP", 345), + makeServicePortName("ns1", "only-local-load-balancer", "porty"): makeTestServiceInfo("172.16.55.12", 8678, "UDP", 345), + }, + }, + { + // Case[7]: external name service + service: makeTestService("ns2", "external-name", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeExternalName + svc.Spec.ClusterIP = "172.16.55.4" // Should be ignored + svc.Spec.ExternalName = "foo2.bar.com" + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "portz", "UDP", 1235, 5321, 0) + }), + expected: map[ServicePortName]*fakeServiceInfo{}, + }, + } + + for tci, tc := range testCases { + // outputs + newServices := serviceToServiceMap(tc.service, newFakeServiceInfo) + + if len(newServices) != len(tc.expected) { + t.Errorf("[%d] expected %d new, got %d: %v", tci, len(tc.expected), len(newServices), spew.Sdump(newServices)) + } + for x := range tc.expected { + svc := newServices[x].(*fakeServiceInfo) + if !reflect.DeepEqual(svc, tc.expected[x]) { + t.Errorf("[%d] expected new[%v]to be %v, got %v", tci, x, tc.expected[x], *svc) + } + } + } +} + +type FakeProxier struct { + endpointsChanges *EndpointChangeTracker + serviceChanges *ServiceChangeTracker + serviceMap ServiceMap + endpointsMap EndpointsMap + hostname string +} + +func newFakeProxier() *FakeProxier { + return &FakeProxier{ + serviceMap: make(ServiceMap), + serviceChanges: NewServiceChangeTracker(), + endpointsMap: make(EndpointsMap), + endpointsChanges: NewEndpointChangeTracker(testHostname), + } +} + +func makeServiceMap(fake *FakeProxier, allServices ...*api.Service) { + for i := range allServices { + fake.addService(allServices[i]) + } +} + +func (fake *FakeProxier) addService(service *api.Service) { + fake.serviceChanges.Update(nil, service, makeServicePort) +} + +func (fake *FakeProxier) updateService(oldService *api.Service, service *api.Service) { + fake.serviceChanges.Update(oldService, service, makeServicePort) +} + +func (fake *FakeProxier) deleteService(service *api.Service) { + fake.serviceChanges.Update(service, nil, makeServicePort) +} + +func makeServicePort(port *api.ServicePort, service *api.Service) ServicePort { + info := &fakeServiceInfo{ + clusterIP: net.ParseIP(service.Spec.ClusterIP), + port: int(port.Port), + protocol: port.Protocol, + } + if apiservice.NeedsHealthCheck(service) { + p := service.Spec.HealthCheckNodePort + if p != 0 { + info.healthCheckNodePort = int(p) + } + } + return info +} + +func TestUpdateServiceMapHeadless(t *testing.T) { + fp := newFakeProxier() + + makeServiceMap(fp, + makeTestService("ns2", "headless", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeClusterIP + svc.Spec.ClusterIP = api.ClusterIPNone + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0) + }), + makeTestService("ns2", "headless-without-port", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeClusterIP + svc.Spec.ClusterIP = api.ClusterIPNone + }), + ) + + // Headless service should be ignored + result := UpdateServiceMap(fp.serviceMap, fp.serviceChanges) + if len(fp.serviceMap) != 0 { + t.Errorf("expected service map length 0, got %d", len(fp.serviceMap)) + } + + // No proxied services, so no healthchecks + if len(result.HCServiceNodePorts) != 0 { + t.Errorf("expected healthcheck ports length 0, got %d", len(result.HCServiceNodePorts)) + } + + if len(result.UDPStaleClusterIP) != 0 { + t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP)) + } +} + +func TestUpdateServiceTypeExternalName(t *testing.T) { + fp := newFakeProxier() + + makeServiceMap(fp, + makeTestService("ns2", "external-name", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeExternalName + svc.Spec.ClusterIP = "172.16.55.4" // Should be ignored + svc.Spec.ExternalName = "foo2.bar.com" + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blah", "UDP", 1235, 5321, 0) + }), + ) + + result := UpdateServiceMap(fp.serviceMap, fp.serviceChanges) + if len(fp.serviceMap) != 0 { + t.Errorf("expected service map length 0, got %v", fp.serviceMap) + } + // No proxied services, so no healthchecks + if len(result.HCServiceNodePorts) != 0 { + t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts) + } + if len(result.UDPStaleClusterIP) != 0 { + t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP) + } +} + +func TestBuildServiceMapAddRemove(t *testing.T) { + fp := newFakeProxier() + + services := []*api.Service{ + makeTestService("ns2", "cluster-ip", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeClusterIP + svc.Spec.ClusterIP = "172.16.55.4" + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port1", "UDP", 1234, 4321, 0) + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port2", "UDP", 1235, 5321, 0) + }), + makeTestService("ns2", "node-port", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeNodePort + svc.Spec.ClusterIP = "172.16.55.10" + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port1", "UDP", 345, 678, 0) + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port2", "TCP", 344, 677, 0) + }), + makeTestService("ns1", "load-balancer", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeLoadBalancer + svc.Spec.ClusterIP = "172.16.55.11" + svc.Spec.LoadBalancerIP = "5.6.7.8" + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar", "UDP", 8675, 30061, 7000) + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8676, 30062, 7001) + svc.Status.LoadBalancer = api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + {IP: "10.1.2.4"}, + }, + } + }), + makeTestService("ns1", "only-local-load-balancer", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeLoadBalancer + svc.Spec.ClusterIP = "172.16.55.12" + svc.Spec.LoadBalancerIP = "5.6.7.8" + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar2", "UDP", 8677, 30063, 7002) + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8678, 30064, 7003) + svc.Status.LoadBalancer = api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + {IP: "10.1.2.3"}, + }, + } + svc.Spec.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyTypeLocal + svc.Spec.HealthCheckNodePort = 345 + }), + } + + for i := range services { + fp.addService(services[i]) + } + result := UpdateServiceMap(fp.serviceMap, fp.serviceChanges) + if len(fp.serviceMap) != 8 { + t.Errorf("expected service map length 2, got %v", fp.serviceMap) + } + + // The only-local-loadbalancer ones get added + if len(result.HCServiceNodePorts) != 1 { + t.Errorf("expected 1 healthcheck port, got %v", result.HCServiceNodePorts) + } else { + nsn := makeNSN("ns1", "only-local-load-balancer") + if port, found := result.HCServiceNodePorts[nsn]; !found || port != 345 { + t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, result.HCServiceNodePorts) + } + } + + if len(result.UDPStaleClusterIP) != 0 { + // Services only added, so nothing stale yet + t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP)) + } + + // Remove some stuff + // oneService is a modification of services[0] with removed first port. + oneService := makeTestService("ns2", "cluster-ip", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeClusterIP + svc.Spec.ClusterIP = "172.16.55.4" + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p2", "UDP", 1235, 5321, 0) + }) + + fp.updateService(services[0], oneService) + fp.deleteService(services[1]) + fp.deleteService(services[2]) + fp.deleteService(services[3]) + + result = UpdateServiceMap(fp.serviceMap, fp.serviceChanges) + if len(fp.serviceMap) != 1 { + t.Errorf("expected service map length 1, got %v", fp.serviceMap) + } + + if len(result.HCServiceNodePorts) != 0 { + t.Errorf("expected 0 healthcheck ports, got %v", result.HCServiceNodePorts) + } + + // All services but one were deleted. While you'd expect only the ClusterIPs + // from the three deleted services here, we still have the ClusterIP for + // the not-deleted service, because one of it's ServicePorts was deleted. + expectedStaleUDPServices := []string{"172.16.55.10", "172.16.55.4", "172.16.55.11", "172.16.55.12"} + if len(result.UDPStaleClusterIP) != len(expectedStaleUDPServices) { + t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.UDPStaleClusterIP.UnsortedList()) + } + for _, ip := range expectedStaleUDPServices { + if !result.UDPStaleClusterIP.Has(ip) { + t.Errorf("expected stale UDP service service %s", ip) + } + } +} + +func TestBuildServiceMapServiceUpdate(t *testing.T) { + fp := newFakeProxier() + + servicev1 := makeTestService("ns1", "svc1", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeClusterIP + svc.Spec.ClusterIP = "172.16.55.4" + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p1", "UDP", 1234, 4321, 0) + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p2", "TCP", 1235, 5321, 0) + }) + servicev2 := makeTestService("ns1", "svc1", func(svc *api.Service) { + svc.Spec.Type = api.ServiceTypeLoadBalancer + svc.Spec.ClusterIP = "172.16.55.4" + svc.Spec.LoadBalancerIP = "5.6.7.8" + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p1", "UDP", 1234, 4321, 7002) + svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p2", "TCP", 1235, 5321, 7003) + svc.Status.LoadBalancer = api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + {IP: "10.1.2.3"}, + }, + } + svc.Spec.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyTypeLocal + svc.Spec.HealthCheckNodePort = 345 + }) + + fp.addService(servicev1) + + result := UpdateServiceMap(fp.serviceMap, fp.serviceChanges) + if len(fp.serviceMap) != 2 { + t.Errorf("expected service map length 2, got %v", fp.serviceMap) + } + if len(result.HCServiceNodePorts) != 0 { + t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts) + } + if len(result.UDPStaleClusterIP) != 0 { + // Services only added, so nothing stale yet + t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP)) + } + + // Change service to load-balancer + fp.updateService(servicev1, servicev2) + result = UpdateServiceMap(fp.serviceMap, fp.serviceChanges) + if len(fp.serviceMap) != 2 { + t.Errorf("expected service map length 2, got %v", fp.serviceMap) + } + if len(result.HCServiceNodePorts) != 1 { + t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts) + } + if len(result.UDPStaleClusterIP) != 0 { + t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.UnsortedList()) + } + + // No change; make sure the service map stays the same and there are + // no health-check changes + fp.updateService(servicev2, servicev2) + result = UpdateServiceMap(fp.serviceMap, fp.serviceChanges) + if len(fp.serviceMap) != 2 { + t.Errorf("expected service map length 2, got %v", fp.serviceMap) + } + if len(result.HCServiceNodePorts) != 1 { + t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts) + } + if len(result.UDPStaleClusterIP) != 0 { + t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.UnsortedList()) + } + + // And back to ClusterIP + fp.updateService(servicev2, servicev1) + result = UpdateServiceMap(fp.serviceMap, fp.serviceChanges) + if len(fp.serviceMap) != 2 { + t.Errorf("expected service map length 2, got %v", fp.serviceMap) + } + if len(result.HCServiceNodePorts) != 0 { + t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts) + } + if len(result.UDPStaleClusterIP) != 0 { + // Services only added, so nothing stale yet + t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP)) + } +} From 6edcf02d9eec9aba20370cd746bb347d3f3ebc49 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Sun, 10 Dec 2017 15:11:35 +0800 Subject: [PATCH 16/53] proxy endpoints part changes --- pkg/proxy/endpoints.go | 243 +++++++ pkg/proxy/endpoints_test.go | 1247 +++++++++++++++++++++++++++++++++++ pkg/proxy/util/endpoints.go | 1 + 3 files changed, 1491 insertions(+) create mode 100644 pkg/proxy/endpoints.go create mode 100644 pkg/proxy/endpoints_test.go diff --git a/pkg/proxy/endpoints.go b/pkg/proxy/endpoints.go new file mode 100644 index 00000000000..62f3aabbbd5 --- /dev/null +++ b/pkg/proxy/endpoints.go @@ -0,0 +1,243 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "reflect" + "sync" + + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + api "k8s.io/kubernetes/pkg/apis/core" +) + +// EndpointChangeTracker carries state about uncommitted changes to an arbitrary number of +// Endpoints, keyed by their namespace and name. +type EndpointChangeTracker struct { + // lock protects items. + lock sync.Mutex + // hostname is the host where kube-proxy is running. + hostname string + // items maps a service to is endpointsChange. + items map[types.NamespacedName]*endpointsChange +} + +// NewEndpointChangeTracker initializes an EndpointsChangeMap +func NewEndpointChangeTracker(hostname string) *EndpointChangeTracker { + return &EndpointChangeTracker{ + hostname: hostname, + items: make(map[types.NamespacedName]*endpointsChange), + } +} + +// Update updates given service's endpoints change map based on the endpoints pair. It returns true +// if items changed, otherwise return false. Update can be used to add/update/delete items of EndpointsChangeMap. For example, +// Add item +// - pass as the pair. +// Update item +// - pass as the pair. +// Delete item +// - pass as the pair. +func (ect *EndpointChangeTracker) Update(previous, current *api.Endpoints, makeEndpoints func(IP string, port int, isLocal bool) Endpoint) bool { + endpoints := current + if endpoints == nil { + endpoints = previous + } + // previous == nil && current == nil is unexpected, we should return false directly. + if endpoints == nil { + return false + } + namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name} + + ect.lock.Lock() + defer ect.lock.Unlock() + + change, exists := ect.items[namespacedName] + if !exists { + change = &endpointsChange{} + change.previous = endpointsToEndpointsMap(previous, ect.hostname, makeEndpoints) + ect.items[namespacedName] = change + } + change.current = endpointsToEndpointsMap(current, ect.hostname, makeEndpoints) + // if change.previous equal to change.current, it means no change + if reflect.DeepEqual(change.previous, change.current) { + delete(ect.items, namespacedName) + } + return len(ect.items) > 0 +} + +// endpointsChange contains all changes to endpoints that happened since proxy rules were synced. For a single object, +// changes are accumulated, i.e. previous is state from before applying the changes, +// current is state after applying the changes. +type endpointsChange struct { + previous EndpointsMap + current EndpointsMap +} + +// UpdateEndpointMapResult is the updated results after applying endpoints changes. +type UpdateEndpointMapResult struct { + // HCEndpointsLocalIPSize maps an endpoints name to the length of its local IPs. + HCEndpointsLocalIPSize map[types.NamespacedName]int + // StaleEndpoints identifies if an endpoints service pair is stale. + StaleEndpoints []ServiceEndpoint + // StaleServiceNames identifies if a service is stale. + StaleServiceNames []ServicePortName +} + +// UpdateEndpointsMap updates endpointsMap base on the given changes. +func UpdateEndpointsMap(endpointsMap EndpointsMap, changes *EndpointChangeTracker) (result UpdateEndpointMapResult) { + result.StaleEndpoints = make([]ServiceEndpoint, 0) + result.StaleServiceNames = make([]ServicePortName, 0) + + endpointsMap.apply(changes, &result.StaleEndpoints, &result.StaleServiceNames) + + // TODO: If this will appear to be computationally expensive, consider + // computing this incrementally similarly to endpointsMap. + result.HCEndpointsLocalIPSize = make(map[types.NamespacedName]int) + localIPs := GetLocalEndpointIPs(endpointsMap) + for nsn, ips := range localIPs { + result.HCEndpointsLocalIPSize[nsn] = len(ips) + } + + return result +} + +// EndpointsMap maps a service to one of its endpoint. +type EndpointsMap map[ServicePortName][]Endpoint + +// endpointsToEndpointsMap translates single Endpoints object to EndpointsMap. +// This function is used for incremental updated of endpointsMap. +// +// NOTE: endpoints object should NOT be modified. +func endpointsToEndpointsMap(endpoints *api.Endpoints, hostname string, makeEndpoints func(IP string, port int, isLocal bool) Endpoint) EndpointsMap { + if endpoints == nil { + return nil + } + + endpointsMap := make(EndpointsMap) + // We need to build a map of portname -> all ip:ports for that + // portname. Explode Endpoints.Subsets[*] into this structure. + for i := range endpoints.Subsets { + ss := &endpoints.Subsets[i] + for i := range ss.Ports { + port := &ss.Ports[i] + if port.Port == 0 { + glog.Warningf("ignoring invalid endpoint port %s", port.Name) + continue + } + svcPortName := ServicePortName{ + NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, + Port: port.Name, + } + for i := range ss.Addresses { + addr := &ss.Addresses[i] + if addr.IP == "" { + glog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name) + continue + } + isLocal := addr.NodeName != nil && *addr.NodeName == hostname + epInfo := makeEndpoints(addr.IP, int(port.Port), isLocal) + endpointsMap[svcPortName] = append(endpointsMap[svcPortName], epInfo) + } + if glog.V(3) { + newEPList := []string{} + for _, ep := range endpointsMap[svcPortName] { + newEPList = append(newEPList, ep.String()) + } + glog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList) + } + } + } + return endpointsMap +} + +// apply the changes to EndpointsMap and updates stale endpoints and service-endpoints pair. The `staleEndpoints` argument +// is passed in to store the stale udp endpoints and `staleServiceNames` argument is passed in to store the stale udp service. +// The changes map is cleared after applying them. +func (endpointsMap EndpointsMap) apply(changes *EndpointChangeTracker, staleEndpoints *[]ServiceEndpoint, staleServiceNames *[]ServicePortName) { + if changes == nil { + return + } + changes.lock.Lock() + defer changes.lock.Unlock() + for _, change := range changes.items { + endpointsMap.Unmerge(change.previous) + endpointsMap.Merge(change.current) + detectStaleConnections(change.previous, change.current, staleEndpoints, staleServiceNames) + } + changes.items = make(map[types.NamespacedName]*endpointsChange) +} + +// Merge ensures that the current EndpointsMap contains all pairs from the EndpointsMap passed in. +func (em EndpointsMap) Merge(other EndpointsMap) { + for svcPortName := range other { + em[svcPortName] = other[svcPortName] + } +} + +// Unmerge removes the pairs from the current EndpointsMap which are contained in the EndpointsMap passed in. +func (em EndpointsMap) Unmerge(other EndpointsMap) { + for svcPortName := range other { + delete(em, svcPortName) + } +} + +// GetLocalEndpointIPs returns endpoints IPs if given endpoint is local - local means the endpoint is running in same host as kube-proxy. +func GetLocalEndpointIPs(endpointsMap EndpointsMap) map[types.NamespacedName]sets.String { + localIPs := make(map[types.NamespacedName]sets.String) + for svcPortName, epList := range endpointsMap { + for _, ep := range epList { + if ep.IsLocal() { + nsn := svcPortName.NamespacedName + if localIPs[nsn] == nil { + localIPs[nsn] = sets.NewString() + } + localIPs[nsn].Insert(ep.IP()) + } + } + } + return localIPs +} + +// detectStaleConnections modifies and with detected stale connections. +// is used to store stale udp service in order to clear udp conntrack later. +func detectStaleConnections(oldEndpointsMap, newEndpointsMap EndpointsMap, staleEndpoints *[]ServiceEndpoint, staleServiceNames *[]ServicePortName) { + for svcPortName, epList := range oldEndpointsMap { + for _, ep := range epList { + stale := true + for i := range newEndpointsMap[svcPortName] { + if newEndpointsMap[svcPortName][i].Equal(ep) { + stale = false + break + } + } + if stale { + glog.V(4).Infof("Stale endpoint %v -> %v", svcPortName, ep.String()) + *staleEndpoints = append(*staleEndpoints, ServiceEndpoint{Endpoint: ep.String(), ServicePortName: svcPortName}) + } + } + } + + for svcPortName, epList := range newEndpointsMap { + // For udp service, if its backend changes from 0 to non-0. There may exist a conntrack entry that could blackhole traffic to the service. + if len(epList) > 0 && len(oldEndpointsMap[svcPortName]) == 0 { + *staleServiceNames = append(*staleServiceNames, svcPortName) + } + } +} diff --git a/pkg/proxy/endpoints_test.go b/pkg/proxy/endpoints_test.go new file mode 100644 index 00000000000..76c68e47910 --- /dev/null +++ b/pkg/proxy/endpoints_test.go @@ -0,0 +1,1247 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "net" + "reflect" + "strconv" + "testing" + + "github.com/davecgh/go-spew/spew" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + api "k8s.io/kubernetes/pkg/apis/core" +) + +type fakeEndpointsInfo struct { + endpoint string + isLocal bool +} + +func newFakeEndpointsInfo(IP string, port int, isLocal bool) Endpoint { + return &fakeEndpointsInfo{ + endpoint: net.JoinHostPort(IP, strconv.Itoa(port)), + isLocal: isLocal, + } +} + +func (f *fakeEndpointsInfo) String() string { + return f.endpoint +} + +func (f *fakeEndpointsInfo) IsLocal() bool { + return f.isLocal +} + +func (f *fakeEndpointsInfo) IP() string { + // Must be IP:port + host, _, _ := net.SplitHostPort(f.endpoint) + return host +} + +func (f *fakeEndpointsInfo) Equal(other Endpoint) bool { + return f.String() == other.String() && + f.IsLocal() == other.IsLocal() && + f.IP() == other.IP() +} + +func (proxier *FakeProxier) addEndpoints(endpoints *api.Endpoints) { + proxier.endpointsChanges.Update(nil, endpoints, newFakeEndpointsInfo) +} + +func (proxier *FakeProxier) updateEndpoints(oldEndpoints, endpoints *api.Endpoints) { + proxier.endpointsChanges.Update(oldEndpoints, endpoints, newFakeEndpointsInfo) +} + +func (proxier *FakeProxier) deleteEndpoints(endpoints *api.Endpoints) { + proxier.endpointsChanges.Update(endpoints, nil, newFakeEndpointsInfo) +} + +func TestGetLocalEndpointIPs(t *testing.T) { + testCases := []struct { + endpointsMap EndpointsMap + expected map[types.NamespacedName]sets.String + }{{ + // Case[0]: nothing + endpointsMap: EndpointsMap{}, + expected: map[types.NamespacedName]sets.String{}, + }, { + // Case[1]: unnamed port + endpointsMap: EndpointsMap{ + makeServicePortName("ns1", "ep1", ""): []Endpoint{ + &fakeEndpointsInfo{endpoint: "1.1.1.1:11", isLocal: false}, + }, + }, + expected: map[types.NamespacedName]sets.String{}, + }, { + // Case[2]: unnamed port local + endpointsMap: EndpointsMap{ + makeServicePortName("ns1", "ep1", ""): []Endpoint{ + &fakeEndpointsInfo{endpoint: "1.1.1.1:11", isLocal: true}, + }, + }, + expected: map[types.NamespacedName]sets.String{ + {Namespace: "ns1", Name: "ep1"}: sets.NewString("1.1.1.1"), + }, + }, { + // Case[3]: named local and non-local ports for the same IP. + endpointsMap: EndpointsMap{ + makeServicePortName("ns1", "ep1", "p11"): []Endpoint{ + &fakeEndpointsInfo{endpoint: "1.1.1.1:11", isLocal: false}, + &fakeEndpointsInfo{endpoint: "1.1.1.2:11", isLocal: true}, + }, + makeServicePortName("ns1", "ep1", "p12"): []Endpoint{ + &fakeEndpointsInfo{endpoint: "1.1.1.1:12", isLocal: false}, + &fakeEndpointsInfo{endpoint: "1.1.1.2:12", isLocal: true}, + }, + }, + expected: map[types.NamespacedName]sets.String{ + {Namespace: "ns1", Name: "ep1"}: sets.NewString("1.1.1.2"), + }, + }, { + // Case[4]: named local and non-local ports for different IPs. + endpointsMap: EndpointsMap{ + makeServicePortName("ns1", "ep1", "p11"): []Endpoint{ + &fakeEndpointsInfo{endpoint: "1.1.1.1:11", isLocal: false}, + }, + makeServicePortName("ns2", "ep2", "p22"): []Endpoint{ + &fakeEndpointsInfo{endpoint: "2.2.2.2:22", isLocal: true}, + &fakeEndpointsInfo{endpoint: "2.2.2.22:22", isLocal: true}, + }, + makeServicePortName("ns2", "ep2", "p23"): []Endpoint{ + &fakeEndpointsInfo{endpoint: "2.2.2.3:23", isLocal: true}, + }, + makeServicePortName("ns4", "ep4", "p44"): []Endpoint{ + &fakeEndpointsInfo{endpoint: "4.4.4.4:44", isLocal: true}, + &fakeEndpointsInfo{endpoint: "4.4.4.5:44", isLocal: false}, + }, + makeServicePortName("ns4", "ep4", "p45"): []Endpoint{ + &fakeEndpointsInfo{endpoint: "4.4.4.6:45", isLocal: true}, + }, + }, + expected: map[types.NamespacedName]sets.String{ + {Namespace: "ns2", Name: "ep2"}: sets.NewString("2.2.2.2", "2.2.2.22", "2.2.2.3"), + {Namespace: "ns4", Name: "ep4"}: sets.NewString("4.4.4.4", "4.4.4.6"), + }, + }} + + for tci, tc := range testCases { + // outputs + localIPs := GetLocalEndpointIPs(tc.endpointsMap) + + if !reflect.DeepEqual(localIPs, tc.expected) { + t.Errorf("[%d] expected %#v, got %#v", tci, tc.expected, localIPs) + } + } +} + +func makeTestEndpoints(namespace, name string, eptFunc func(*api.Endpoints)) *api.Endpoints { + ept := &api.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + eptFunc(ept) + return ept +} + +// This is a coarse test, but it offers some modicum of confidence as the code is evolved. +func Test_endpointsToEndpointsMap(t *testing.T) { + testCases := []struct { + newEndpoints *api.Endpoints + expected map[ServicePortName][]*fakeEndpointsInfo + }{{ + // Case[0]: nothing + newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {}), + expected: map[ServicePortName][]*fakeEndpointsInfo{}, + }, { + // Case[1]: no changes, unnamed port + newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + }}, + Ports: []api.EndpointPort{{ + Name: "", + Port: 11, + }}, + }, + } + }), + expected: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", ""): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + }, + }, { + // Case[2]: no changes, named port + newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + }}, + Ports: []api.EndpointPort{{ + Name: "port", + Port: 11, + }}, + }, + } + }), + expected: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "port"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + }, + }, { + // Case[3]: new port + newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + }}, + Ports: []api.EndpointPort{{ + Port: 11, + }}, + }, + } + }), + expected: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", ""): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + }, + }, { + // Case[4]: remove port + newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {}), + expected: map[ServicePortName][]*fakeEndpointsInfo{}, + }, { + // Case[5]: new IP and port + newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + }, { + IP: "2.2.2.2", + }}, + Ports: []api.EndpointPort{{ + Name: "p1", + Port: 11, + }, { + Name: "p2", + Port: 22, + }}, + }, + } + }), + expected: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p1"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + {endpoint: "2.2.2.2:11", isLocal: false}, + }, + makeServicePortName("ns1", "ep1", "p2"): { + {endpoint: "1.1.1.1:22", isLocal: false}, + {endpoint: "2.2.2.2:22", isLocal: false}, + }, + }, + }, { + // Case[6]: remove IP and port + newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + }}, + Ports: []api.EndpointPort{{ + Name: "p1", + Port: 11, + }}, + }, + } + }), + expected: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p1"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + }, + }, { + // Case[7]: rename port + newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + }}, + Ports: []api.EndpointPort{{ + Name: "p2", + Port: 11, + }}, + }, + } + }), + expected: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p2"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + }, + }, { + // Case[8]: renumber port + newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + }}, + Ports: []api.EndpointPort{{ + Name: "p1", + Port: 22, + }}, + }, + } + }), + expected: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p1"): { + {endpoint: "1.1.1.1:22", isLocal: false}, + }, + }, + }} + + for tci, tc := range testCases { + // outputs + newEndpoints := endpointsToEndpointsMap(tc.newEndpoints, "host", newFakeEndpointsInfo) + + if len(newEndpoints) != len(tc.expected) { + t.Errorf("[%d] expected %d new, got %d: %v", tci, len(tc.expected), len(newEndpoints), spew.Sdump(newEndpoints)) + } + for x := range tc.expected { + if len(newEndpoints[x]) != len(tc.expected[x]) { + t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(tc.expected[x]), x, len(newEndpoints[x])) + } else { + for i := range newEndpoints[x] { + ep := newEndpoints[x][i].(*fakeEndpointsInfo) + if *ep != *(tc.expected[x][i]) { + t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, tc.expected[x][i], *ep) + } + } + } + } + } +} + +func TestUpdateEndpointsMap(t *testing.T) { + var nodeName = testHostname + + emptyEndpoint := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{} + } + unnamedPort := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + }}, + Ports: []api.EndpointPort{{ + Port: 11, + }}, + }} + } + unnamedPortLocal := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + NodeName: &nodeName, + }}, + Ports: []api.EndpointPort{{ + Port: 11, + }}, + }} + } + namedPortLocal := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + NodeName: &nodeName, + }}, + Ports: []api.EndpointPort{{ + Name: "p11", + Port: 11, + }}, + }} + } + namedPort := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + }}, + Ports: []api.EndpointPort{{ + Name: "p11", + Port: 11, + }}, + }} + } + namedPortRenamed := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + }}, + Ports: []api.EndpointPort{{ + Name: "p11-2", + Port: 11, + }}, + }} + } + namedPortRenumbered := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + }}, + Ports: []api.EndpointPort{{ + Name: "p11", + Port: 22, + }}, + }} + } + namedPortsLocalNoLocal := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + }, { + IP: "1.1.1.2", + NodeName: &nodeName, + }}, + Ports: []api.EndpointPort{{ + Name: "p11", + Port: 11, + }, { + Name: "p12", + Port: 12, + }}, + }} + } + multipleSubsets := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + }}, + Ports: []api.EndpointPort{{ + Name: "p11", + Port: 11, + }}, + }, { + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.2", + }}, + Ports: []api.EndpointPort{{ + Name: "p12", + Port: 12, + }}, + }} + } + multipleSubsetsWithLocal := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + }}, + Ports: []api.EndpointPort{{ + Name: "p11", + Port: 11, + }}, + }, { + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.2", + NodeName: &nodeName, + }}, + Ports: []api.EndpointPort{{ + Name: "p12", + Port: 12, + }}, + }} + } + multipleSubsetsMultiplePortsLocal := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + NodeName: &nodeName, + }}, + Ports: []api.EndpointPort{{ + Name: "p11", + Port: 11, + }, { + Name: "p12", + Port: 12, + }}, + }, { + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.3", + }}, + Ports: []api.EndpointPort{{ + Name: "p13", + Port: 13, + }}, + }} + } + multipleSubsetsIPsPorts1 := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + }, { + IP: "1.1.1.2", + NodeName: &nodeName, + }}, + Ports: []api.EndpointPort{{ + Name: "p11", + Port: 11, + }, { + Name: "p12", + Port: 12, + }}, + }, { + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.3", + }, { + IP: "1.1.1.4", + NodeName: &nodeName, + }}, + Ports: []api.EndpointPort{{ + Name: "p13", + Port: 13, + }, { + Name: "p14", + Port: 14, + }}, + }} + } + multipleSubsetsIPsPorts2 := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "2.2.2.1", + }, { + IP: "2.2.2.2", + NodeName: &nodeName, + }}, + Ports: []api.EndpointPort{{ + Name: "p21", + Port: 21, + }, { + Name: "p22", + Port: 22, + }}, + }} + } + complexBefore1 := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + }}, + Ports: []api.EndpointPort{{ + Name: "p11", + Port: 11, + }}, + }} + } + complexBefore2 := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "2.2.2.2", + NodeName: &nodeName, + }, { + IP: "2.2.2.22", + NodeName: &nodeName, + }}, + Ports: []api.EndpointPort{{ + Name: "p22", + Port: 22, + }}, + }, { + Addresses: []api.EndpointAddress{{ + IP: "2.2.2.3", + NodeName: &nodeName, + }}, + Ports: []api.EndpointPort{{ + Name: "p23", + Port: 23, + }}, + }} + } + complexBefore4 := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "4.4.4.4", + NodeName: &nodeName, + }, { + IP: "4.4.4.5", + NodeName: &nodeName, + }}, + Ports: []api.EndpointPort{{ + Name: "p44", + Port: 44, + }}, + }, { + Addresses: []api.EndpointAddress{{ + IP: "4.4.4.6", + NodeName: &nodeName, + }}, + Ports: []api.EndpointPort{{ + Name: "p45", + Port: 45, + }}, + }} + } + complexAfter1 := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.1", + }, { + IP: "1.1.1.11", + }}, + Ports: []api.EndpointPort{{ + Name: "p11", + Port: 11, + }}, + }, { + Addresses: []api.EndpointAddress{{ + IP: "1.1.1.2", + }}, + Ports: []api.EndpointPort{{ + Name: "p12", + Port: 12, + }, { + Name: "p122", + Port: 122, + }}, + }} + } + complexAfter3 := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "3.3.3.3", + }}, + Ports: []api.EndpointPort{{ + Name: "p33", + Port: 33, + }}, + }} + } + complexAfter4 := func(ept *api.Endpoints) { + ept.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{ + IP: "4.4.4.4", + NodeName: &nodeName, + }}, + Ports: []api.EndpointPort{{ + Name: "p44", + Port: 44, + }}, + }} + } + + testCases := []struct { + // previousEndpoints and currentEndpoints are used to call appropriate + // handlers OnEndpoints* (based on whether corresponding values are nil + // or non-nil) and must be of equal length. + previousEndpoints []*api.Endpoints + currentEndpoints []*api.Endpoints + oldEndpoints map[ServicePortName][]*fakeEndpointsInfo + expectedResult map[ServicePortName][]*fakeEndpointsInfo + expectedStaleEndpoints []ServiceEndpoint + expectedStaleServiceNames map[ServicePortName]bool + expectedHealthchecks map[types.NamespacedName]int + }{{ + // Case[0]: nothing + oldEndpoints: map[ServicePortName][]*fakeEndpointsInfo{}, + expectedResult: map[ServicePortName][]*fakeEndpointsInfo{}, + expectedStaleEndpoints: []ServiceEndpoint{}, + expectedStaleServiceNames: map[ServicePortName]bool{}, + expectedHealthchecks: map[types.NamespacedName]int{}, + }, { + // Case[1]: no change, unnamed port + previousEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", unnamedPort), + }, + currentEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", unnamedPort), + }, + oldEndpoints: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", ""): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + }, + expectedResult: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", ""): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + }, + expectedStaleEndpoints: []ServiceEndpoint{}, + expectedStaleServiceNames: map[ServicePortName]bool{}, + expectedHealthchecks: map[types.NamespacedName]int{}, + }, { + // Case[2]: no change, named port, local + previousEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", namedPortLocal), + }, + currentEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", namedPortLocal), + }, + oldEndpoints: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: true}, + }, + }, + expectedResult: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: true}, + }, + }, + expectedStaleEndpoints: []ServiceEndpoint{}, + expectedStaleServiceNames: map[ServicePortName]bool{}, + expectedHealthchecks: map[types.NamespacedName]int{ + makeNSN("ns1", "ep1"): 1, + }, + }, { + // Case[3]: no change, multiple subsets + previousEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", multipleSubsets), + }, + currentEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", multipleSubsets), + }, + oldEndpoints: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + makeServicePortName("ns1", "ep1", "p12"): { + {endpoint: "1.1.1.2:12", isLocal: false}, + }, + }, + expectedResult: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + makeServicePortName("ns1", "ep1", "p12"): { + {endpoint: "1.1.1.2:12", isLocal: false}, + }, + }, + expectedStaleEndpoints: []ServiceEndpoint{}, + expectedStaleServiceNames: map[ServicePortName]bool{}, + expectedHealthchecks: map[types.NamespacedName]int{}, + }, { + // Case[4]: no change, multiple subsets, multiple ports, local + previousEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", multipleSubsetsMultiplePortsLocal), + }, + currentEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", multipleSubsetsMultiplePortsLocal), + }, + oldEndpoints: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: true}, + }, + makeServicePortName("ns1", "ep1", "p12"): { + {endpoint: "1.1.1.1:12", isLocal: true}, + }, + makeServicePortName("ns1", "ep1", "p13"): { + {endpoint: "1.1.1.3:13", isLocal: false}, + }, + }, + expectedResult: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: true}, + }, + makeServicePortName("ns1", "ep1", "p12"): { + {endpoint: "1.1.1.1:12", isLocal: true}, + }, + makeServicePortName("ns1", "ep1", "p13"): { + {endpoint: "1.1.1.3:13", isLocal: false}, + }, + }, + expectedStaleEndpoints: []ServiceEndpoint{}, + expectedStaleServiceNames: map[ServicePortName]bool{}, + expectedHealthchecks: map[types.NamespacedName]int{ + makeNSN("ns1", "ep1"): 1, + }, + }, { + // Case[5]: no change, multiple endpoints, subsets, IPs, and ports + previousEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", multipleSubsetsIPsPorts1), + makeTestEndpoints("ns2", "ep2", multipleSubsetsIPsPorts2), + }, + currentEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", multipleSubsetsIPsPorts1), + makeTestEndpoints("ns2", "ep2", multipleSubsetsIPsPorts2), + }, + oldEndpoints: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + {endpoint: "1.1.1.2:11", isLocal: true}, + }, + makeServicePortName("ns1", "ep1", "p12"): { + {endpoint: "1.1.1.1:12", isLocal: false}, + {endpoint: "1.1.1.2:12", isLocal: true}, + }, + makeServicePortName("ns1", "ep1", "p13"): { + {endpoint: "1.1.1.3:13", isLocal: false}, + {endpoint: "1.1.1.4:13", isLocal: true}, + }, + makeServicePortName("ns1", "ep1", "p14"): { + {endpoint: "1.1.1.3:14", isLocal: false}, + {endpoint: "1.1.1.4:14", isLocal: true}, + }, + makeServicePortName("ns2", "ep2", "p21"): { + {endpoint: "2.2.2.1:21", isLocal: false}, + {endpoint: "2.2.2.2:21", isLocal: true}, + }, + makeServicePortName("ns2", "ep2", "p22"): { + {endpoint: "2.2.2.1:22", isLocal: false}, + {endpoint: "2.2.2.2:22", isLocal: true}, + }, + }, + expectedResult: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + {endpoint: "1.1.1.2:11", isLocal: true}, + }, + makeServicePortName("ns1", "ep1", "p12"): { + {endpoint: "1.1.1.1:12", isLocal: false}, + {endpoint: "1.1.1.2:12", isLocal: true}, + }, + makeServicePortName("ns1", "ep1", "p13"): { + {endpoint: "1.1.1.3:13", isLocal: false}, + {endpoint: "1.1.1.4:13", isLocal: true}, + }, + makeServicePortName("ns1", "ep1", "p14"): { + {endpoint: "1.1.1.3:14", isLocal: false}, + {endpoint: "1.1.1.4:14", isLocal: true}, + }, + makeServicePortName("ns2", "ep2", "p21"): { + {endpoint: "2.2.2.1:21", isLocal: false}, + {endpoint: "2.2.2.2:21", isLocal: true}, + }, + makeServicePortName("ns2", "ep2", "p22"): { + {endpoint: "2.2.2.1:22", isLocal: false}, + {endpoint: "2.2.2.2:22", isLocal: true}, + }, + }, + expectedStaleEndpoints: []ServiceEndpoint{}, + expectedStaleServiceNames: map[ServicePortName]bool{}, + expectedHealthchecks: map[types.NamespacedName]int{ + makeNSN("ns1", "ep1"): 2, + makeNSN("ns2", "ep2"): 1, + }, + }, { + // Case[6]: add an Endpoints + previousEndpoints: []*api.Endpoints{ + nil, + }, + currentEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", unnamedPortLocal), + }, + oldEndpoints: map[ServicePortName][]*fakeEndpointsInfo{}, + expectedResult: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", ""): { + {endpoint: "1.1.1.1:11", isLocal: true}, + }, + }, + expectedStaleEndpoints: []ServiceEndpoint{}, + expectedStaleServiceNames: map[ServicePortName]bool{ + makeServicePortName("ns1", "ep1", ""): true, + }, + expectedHealthchecks: map[types.NamespacedName]int{ + makeNSN("ns1", "ep1"): 1, + }, + }, { + // Case[7]: remove an Endpoints + previousEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", unnamedPortLocal), + }, + currentEndpoints: []*api.Endpoints{ + nil, + }, + oldEndpoints: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", ""): { + {endpoint: "1.1.1.1:11", isLocal: true}, + }, + }, + expectedResult: map[ServicePortName][]*fakeEndpointsInfo{}, + expectedStaleEndpoints: []ServiceEndpoint{{ + Endpoint: "1.1.1.1:11", + ServicePortName: makeServicePortName("ns1", "ep1", ""), + }}, + expectedStaleServiceNames: map[ServicePortName]bool{}, + expectedHealthchecks: map[types.NamespacedName]int{}, + }, { + // Case[8]: add an IP and port + previousEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", namedPort), + }, + currentEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", namedPortsLocalNoLocal), + }, + oldEndpoints: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + }, + expectedResult: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + {endpoint: "1.1.1.2:11", isLocal: true}, + }, + makeServicePortName("ns1", "ep1", "p12"): { + {endpoint: "1.1.1.1:12", isLocal: false}, + {endpoint: "1.1.1.2:12", isLocal: true}, + }, + }, + expectedStaleEndpoints: []ServiceEndpoint{}, + expectedStaleServiceNames: map[ServicePortName]bool{ + makeServicePortName("ns1", "ep1", "p12"): true, + }, + expectedHealthchecks: map[types.NamespacedName]int{ + makeNSN("ns1", "ep1"): 1, + }, + }, { + // Case[9]: remove an IP and port + previousEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", namedPortsLocalNoLocal), + }, + currentEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", namedPort), + }, + oldEndpoints: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + {endpoint: "1.1.1.2:11", isLocal: true}, + }, + makeServicePortName("ns1", "ep1", "p12"): { + {endpoint: "1.1.1.1:12", isLocal: false}, + {endpoint: "1.1.1.2:12", isLocal: true}, + }, + }, + expectedResult: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + }, + expectedStaleEndpoints: []ServiceEndpoint{{ + Endpoint: "1.1.1.2:11", + ServicePortName: makeServicePortName("ns1", "ep1", "p11"), + }, { + Endpoint: "1.1.1.1:12", + ServicePortName: makeServicePortName("ns1", "ep1", "p12"), + }, { + Endpoint: "1.1.1.2:12", + ServicePortName: makeServicePortName("ns1", "ep1", "p12"), + }}, + expectedStaleServiceNames: map[ServicePortName]bool{}, + expectedHealthchecks: map[types.NamespacedName]int{}, + }, { + // Case[10]: add a subset + previousEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", namedPort), + }, + currentEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", multipleSubsetsWithLocal), + }, + oldEndpoints: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + }, + expectedResult: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + makeServicePortName("ns1", "ep1", "p12"): { + {endpoint: "1.1.1.2:12", isLocal: true}, + }, + }, + expectedStaleEndpoints: []ServiceEndpoint{}, + expectedStaleServiceNames: map[ServicePortName]bool{ + makeServicePortName("ns1", "ep1", "p12"): true, + }, + expectedHealthchecks: map[types.NamespacedName]int{ + makeNSN("ns1", "ep1"): 1, + }, + }, { + // Case[11]: remove a subset + previousEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", multipleSubsets), + }, + currentEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", namedPort), + }, + oldEndpoints: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + makeServicePortName("ns1", "ep1", "p12"): { + {endpoint: "1.1.1.2:12", isLocal: false}, + }, + }, + expectedResult: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + }, + expectedStaleEndpoints: []ServiceEndpoint{{ + Endpoint: "1.1.1.2:12", + ServicePortName: makeServicePortName("ns1", "ep1", "p12"), + }}, + expectedStaleServiceNames: map[ServicePortName]bool{}, + expectedHealthchecks: map[types.NamespacedName]int{}, + }, { + // Case[12]: rename a port + previousEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", namedPort), + }, + currentEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", namedPortRenamed), + }, + oldEndpoints: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + }, + expectedResult: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11-2"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + }, + expectedStaleEndpoints: []ServiceEndpoint{{ + Endpoint: "1.1.1.1:11", + ServicePortName: makeServicePortName("ns1", "ep1", "p11"), + }}, + expectedStaleServiceNames: map[ServicePortName]bool{ + makeServicePortName("ns1", "ep1", "p11-2"): true, + }, + expectedHealthchecks: map[types.NamespacedName]int{}, + }, { + // Case[13]: renumber a port + previousEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", namedPort), + }, + currentEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", namedPortRenumbered), + }, + oldEndpoints: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + }, + expectedResult: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:22", isLocal: false}, + }, + }, + expectedStaleEndpoints: []ServiceEndpoint{{ + Endpoint: "1.1.1.1:11", + ServicePortName: makeServicePortName("ns1", "ep1", "p11"), + }}, + expectedStaleServiceNames: map[ServicePortName]bool{}, + expectedHealthchecks: map[types.NamespacedName]int{}, + }, { + // Case[14]: complex add and remove + previousEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", complexBefore1), + makeTestEndpoints("ns2", "ep2", complexBefore2), + nil, + makeTestEndpoints("ns4", "ep4", complexBefore4), + }, + currentEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", complexAfter1), + nil, + makeTestEndpoints("ns3", "ep3", complexAfter3), + makeTestEndpoints("ns4", "ep4", complexAfter4), + }, + oldEndpoints: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + makeServicePortName("ns2", "ep2", "p22"): { + {endpoint: "2.2.2.2:22", isLocal: true}, + {endpoint: "2.2.2.22:22", isLocal: true}, + }, + makeServicePortName("ns2", "ep2", "p23"): { + {endpoint: "2.2.2.3:23", isLocal: true}, + }, + makeServicePortName("ns4", "ep4", "p44"): { + {endpoint: "4.4.4.4:44", isLocal: true}, + {endpoint: "4.4.4.5:44", isLocal: true}, + }, + makeServicePortName("ns4", "ep4", "p45"): { + {endpoint: "4.4.4.6:45", isLocal: true}, + }, + }, + expectedResult: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", "p11"): { + {endpoint: "1.1.1.1:11", isLocal: false}, + {endpoint: "1.1.1.11:11", isLocal: false}, + }, + makeServicePortName("ns1", "ep1", "p12"): { + {endpoint: "1.1.1.2:12", isLocal: false}, + }, + makeServicePortName("ns1", "ep1", "p122"): { + {endpoint: "1.1.1.2:122", isLocal: false}, + }, + makeServicePortName("ns3", "ep3", "p33"): { + {endpoint: "3.3.3.3:33", isLocal: false}, + }, + makeServicePortName("ns4", "ep4", "p44"): { + {endpoint: "4.4.4.4:44", isLocal: true}, + }, + }, + expectedStaleEndpoints: []ServiceEndpoint{{ + Endpoint: "2.2.2.2:22", + ServicePortName: makeServicePortName("ns2", "ep2", "p22"), + }, { + Endpoint: "2.2.2.22:22", + ServicePortName: makeServicePortName("ns2", "ep2", "p22"), + }, { + Endpoint: "2.2.2.3:23", + ServicePortName: makeServicePortName("ns2", "ep2", "p23"), + }, { + Endpoint: "4.4.4.5:44", + ServicePortName: makeServicePortName("ns4", "ep4", "p44"), + }, { + Endpoint: "4.4.4.6:45", + ServicePortName: makeServicePortName("ns4", "ep4", "p45"), + }}, + expectedStaleServiceNames: map[ServicePortName]bool{ + makeServicePortName("ns1", "ep1", "p12"): true, + makeServicePortName("ns1", "ep1", "p122"): true, + makeServicePortName("ns3", "ep3", "p33"): true, + }, + expectedHealthchecks: map[types.NamespacedName]int{ + makeNSN("ns4", "ep4"): 1, + }, + }, { + // Case[15]: change from 0 endpoint address to 1 unnamed port + previousEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", emptyEndpoint), + }, + currentEndpoints: []*api.Endpoints{ + makeTestEndpoints("ns1", "ep1", unnamedPort), + }, + oldEndpoints: map[ServicePortName][]*fakeEndpointsInfo{}, + expectedResult: map[ServicePortName][]*fakeEndpointsInfo{ + makeServicePortName("ns1", "ep1", ""): { + {endpoint: "1.1.1.1:11", isLocal: false}, + }, + }, + expectedStaleEndpoints: []ServiceEndpoint{}, + expectedStaleServiceNames: map[ServicePortName]bool{ + makeServicePortName("ns1", "ep1", ""): true, + }, + expectedHealthchecks: map[types.NamespacedName]int{}, + }, + } + + for tci, tc := range testCases { + fp := newFakeProxier() + fp.hostname = nodeName + + // First check that after adding all previous versions of endpoints, + // the fp.oldEndpoints is as we expect. + for i := range tc.previousEndpoints { + if tc.previousEndpoints[i] != nil { + fp.addEndpoints(tc.previousEndpoints[i]) + } + } + UpdateEndpointsMap(fp.endpointsMap, fp.endpointsChanges) + compareEndpointsMaps(t, tci, fp.endpointsMap, tc.oldEndpoints) + + // Now let's call appropriate handlers to get to state we want to be. + if len(tc.previousEndpoints) != len(tc.currentEndpoints) { + t.Fatalf("[%d] different lengths of previous and current endpoints", tci) + continue + } + + for i := range tc.previousEndpoints { + prev, curr := tc.previousEndpoints[i], tc.currentEndpoints[i] + switch { + case prev == nil: + fp.addEndpoints(curr) + case curr == nil: + fp.deleteEndpoints(prev) + default: + fp.updateEndpoints(prev, curr) + } + } + result := UpdateEndpointsMap(fp.endpointsMap, fp.endpointsChanges) + newMap := fp.endpointsMap + compareEndpointsMaps(t, tci, newMap, tc.expectedResult) + if len(result.StaleEndpoints) != len(tc.expectedStaleEndpoints) { + t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedStaleEndpoints), len(result.StaleEndpoints), result.StaleEndpoints) + } + for _, x := range tc.expectedStaleEndpoints { + found := false + for _, stale := range result.StaleEndpoints { + if stale == x { + found = true + break + } + } + if !found { + t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.StaleEndpoints) + } + } + if len(result.StaleServiceNames) != len(tc.expectedStaleServiceNames) { + t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedStaleServiceNames), len(result.StaleServiceNames), result.StaleServiceNames) + } + for svcName := range tc.expectedStaleServiceNames { + found := false + for _, stale := range result.StaleServiceNames { + if stale == svcName { + found = true + } + } + if !found { + t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.StaleServiceNames) + } + } + if !reflect.DeepEqual(result.HCEndpointsLocalIPSize, tc.expectedHealthchecks) { + t.Errorf("[%d] expected healthchecks %v, got %v", tci, tc.expectedHealthchecks, result.HCEndpointsLocalIPSize) + } + } +} + +func compareEndpointsMaps(t *testing.T, tci int, newMap EndpointsMap, expected map[ServicePortName][]*fakeEndpointsInfo) { + if len(newMap) != len(expected) { + t.Errorf("[%d] expected %d results, got %d: %v", tci, len(expected), len(newMap), newMap) + } + for x := range expected { + if len(newMap[x]) != len(expected[x]) { + t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(expected[x]), x, len(newMap[x])) + } else { + for i := range expected[x] { + newEp, ok := newMap[x][i].(*fakeEndpointsInfo) + if !ok { + t.Errorf("Failed to cast endpointsInfo") + continue + } + if *newEp != *(expected[x][i]) { + t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newEp) + } + } + } + } +} diff --git a/pkg/proxy/util/endpoints.go b/pkg/proxy/util/endpoints.go index 449e112619d..7c91b9cd6a1 100644 --- a/pkg/proxy/util/endpoints.go +++ b/pkg/proxy/util/endpoints.go @@ -47,6 +47,7 @@ func IPPart(s string) string { return "" } +// PortPart returns just the port part of an endpoint string. func PortPart(s string) (int, error) { // Must be IP:port _, port, err := net.SplitHostPort(s) From f3512cbbb93bba43773da64f09f4f9296fa0936c Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Sun, 10 Dec 2017 15:12:23 +0800 Subject: [PATCH 17/53] iptables proxier part changes --- pkg/proxy/iptables/proxier.go | 471 ++++++-------------------- pkg/proxy/iptables/proxier_test.go | 519 ++++++++--------------------- 2 files changed, 240 insertions(+), 750 deletions(-) diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index 6ce3199dd3c..f46ed98e04f 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -26,7 +26,6 @@ import ( "encoding/base32" "fmt" "net" - "reflect" "strconv" "strings" "sync" @@ -37,7 +36,6 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" apiservice "k8s.io/kubernetes/pkg/api/service" @@ -157,37 +155,8 @@ type serviceInfo struct { serviceLBChainName utiliptables.Chain } -// internal struct for endpoints information -type endpointsInfo struct { - endpoint string // TODO: should be an endpointString type - isLocal bool - // The following fields we lazily compute and store here for performance - // reasons. If the protocol is the same as you expect it to be, then the - // chainName can be reused, otherwise it should be recomputed. - protocol string - chainName utiliptables.Chain -} - -// IPPart returns just the IP part of the endpoint. -func (e *endpointsInfo) IPPart() string { - return utilproxy.IPPart(e.endpoint) -} - -// Returns the endpoint chain name for a given endpointsInfo. -func (e *endpointsInfo) endpointChain(svcNameString, protocol string) utiliptables.Chain { - if e.protocol != protocol { - e.protocol = protocol - e.chainName = servicePortEndpointChainName(svcNameString, protocol, e.endpoint) - } - return e.chainName -} - -func (e *endpointsInfo) String() string { - return fmt.Sprintf("%v", *e) -} - -// returns a new serviceInfo struct -func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, service *api.Service) *serviceInfo { +// returns a new proxy.ServicePort which abstracts a serviceInfo +func newServiceInfo(port *api.ServicePort, service *api.Service) proxy.ServicePort { onlyNodeLocalEndpoints := false if apiservice.RequestsOnlyLocalTraffic(service) { onlyNodeLocalEndpoints = true @@ -214,10 +183,13 @@ func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, se copy(info.loadBalancerSourceRanges, service.Spec.LoadBalancerSourceRanges) copy(info.externalIPs, service.Spec.ExternalIPs) + svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} + svcPortName := proxy.ServicePortName{NamespacedName: svcName, Port: port.Name} + if apiservice.NeedsHealthCheck(service) { p := service.Spec.HealthCheckNodePort if p == 0 { - glog.Errorf("Service %q has no healthcheck nodeport", svcPortName.NamespacedName.String()) + glog.Errorf("Service %q has no healthcheck nodeport", svcName.String()) } else { info.healthCheckNodePort = int(p) } @@ -233,134 +205,90 @@ func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, se return info } -type endpointsChange struct { - previous proxyEndpointsMap - current proxyEndpointsMap +// ClusterIP is part of proxy.ServicePort interface. +func (info *serviceInfo) ClusterIP() string { + return info.clusterIP.String() } -type endpointsChangeMap struct { - lock sync.Mutex - hostname string - items map[types.NamespacedName]*endpointsChange +// Port is part of proxy.ServicePort interface. +func (info *serviceInfo) Port() int { + return info.port } -type serviceChange struct { - previous proxyServiceMap - current proxyServiceMap +// Protocol is part of proxy.ServicePort interface. +func (info *serviceInfo) Protocol() api.Protocol { + return info.protocol } -type serviceChangeMap struct { - lock sync.Mutex - items map[types.NamespacedName]*serviceChange +// String is part of proxy.ServicePort interface. +func (info *serviceInfo) String() string { + return fmt.Sprintf("%s:%d/%s", info.clusterIP, info.port, info.protocol) } -type updateEndpointMapResult struct { - hcEndpoints map[types.NamespacedName]int - staleEndpoints map[endpointServicePair]bool - staleServiceNames map[proxy.ServicePortName]bool +// HealthCheckNodePort is part of proxy.ServicePort interface. +func (info *serviceInfo) HealthCheckNodePort() int { + return info.healthCheckNodePort } -type updateServiceMapResult struct { - hcServices map[types.NamespacedName]uint16 - staleServices sets.String +var _ proxy.ServicePort = &serviceInfo{} + +// internal struct for endpoints information +type endpointsInfo struct { + endpoint string // TODO: should be an endpointString type + isLocal bool + // The following fields we lazily compute and store here for performance + // reasons. If the protocol is the same as you expect it to be, then the + // chainName can be reused, otherwise it should be recomputed. + protocol string + chainName utiliptables.Chain } -type proxyServiceMap map[proxy.ServicePortName]*serviceInfo -type proxyEndpointsMap map[proxy.ServicePortName][]*endpointsInfo - -func newEndpointsChangeMap(hostname string) endpointsChangeMap { - return endpointsChangeMap{ - hostname: hostname, - items: make(map[types.NamespacedName]*endpointsChange), +// returns a new proxy.Endpoint which abstracts a endpointsInfo +func newEndpointsInfo(IP string, port int, isLocal bool) proxy.Endpoint { + return &endpointsInfo{ + endpoint: net.JoinHostPort(IP, strconv.Itoa(port)), + isLocal: isLocal, } } -func (ecm *endpointsChangeMap) update(namespacedName *types.NamespacedName, previous, current *api.Endpoints) bool { - ecm.lock.Lock() - defer ecm.lock.Unlock() - - change, exists := ecm.items[*namespacedName] - if !exists { - change = &endpointsChange{} - change.previous = endpointsToEndpointsMap(previous, ecm.hostname) - ecm.items[*namespacedName] = change - } - change.current = endpointsToEndpointsMap(current, ecm.hostname) - if reflect.DeepEqual(change.previous, change.current) { - delete(ecm.items, *namespacedName) - } - return len(ecm.items) > 0 +// IsLocal is part of proxy.Endpoint interface. +func (e *endpointsInfo) IsLocal() bool { + return e.isLocal } -func newServiceChangeMap() serviceChangeMap { - return serviceChangeMap{ - items: make(map[types.NamespacedName]*serviceChange), - } +// IP is part of proxy.Endpoint interface. +func (e *endpointsInfo) IP() string { + return utilproxy.IPPart(e.endpoint) } -func (scm *serviceChangeMap) update(namespacedName *types.NamespacedName, previous, current *api.Service) bool { - scm.lock.Lock() - defer scm.lock.Unlock() - - change, exists := scm.items[*namespacedName] - if !exists { - change = &serviceChange{} - change.previous = serviceToServiceMap(previous) - scm.items[*namespacedName] = change +// Equal is part of proxy.Endpoint interface. +func (e *endpointsInfo) Equal(other proxy.Endpoint) bool { + o, ok := other.(*endpointsInfo) + if !ok { + glog.Errorf("Failed to cast endpointsInfo") + return false } - change.current = serviceToServiceMap(current) - if reflect.DeepEqual(change.previous, change.current) { - delete(scm.items, *namespacedName) - } - return len(scm.items) > 0 + return e.endpoint == o.endpoint && + e.isLocal == o.isLocal && + e.protocol == o.protocol && + e.chainName == o.chainName } -func (sm *proxyServiceMap) merge(other proxyServiceMap) sets.String { - existingPorts := sets.NewString() - for svcPortName, info := range other { - port := strconv.Itoa(info.port) - clusterIPPort := net.JoinHostPort(info.clusterIP.String(), port) - existingPorts.Insert(svcPortName.Port) - _, exists := (*sm)[svcPortName] - if !exists { - glog.V(1).Infof("Adding new service port %q at %s/%s", svcPortName, clusterIPPort, info.protocol) - } else { - glog.V(1).Infof("Updating existing service port %q at %s/%s", svcPortName, clusterIPPort, info.protocol) - } - (*sm)[svcPortName] = info - } - return existingPorts +// String is part of proxy.Endpoint interface. +func (e *endpointsInfo) String() string { + return e.endpoint } -func (sm *proxyServiceMap) unmerge(other proxyServiceMap, existingPorts, staleServices sets.String) { - for svcPortName := range other { - if existingPorts.Has(svcPortName.Port) { - continue - } - info, exists := (*sm)[svcPortName] - if exists { - glog.V(1).Infof("Removing service port %q", svcPortName) - if info.protocol == api.ProtocolUDP { - staleServices.Insert(info.clusterIP.String()) - } - delete(*sm, svcPortName) - } else { - glog.Errorf("Service port %q removed, but doesn't exists", svcPortName) - } +// Returns the endpoint chain name for a given endpointsInfo. +func (e *endpointsInfo) endpointChain(svcNameString, protocol string) utiliptables.Chain { + if e.protocol != protocol { + e.protocol = protocol + e.chainName = servicePortEndpointChainName(svcNameString, protocol, e.endpoint) } + return e.chainName } -func (em proxyEndpointsMap) merge(other proxyEndpointsMap) { - for svcPortName := range other { - em[svcPortName] = other[svcPortName] - } -} - -func (em proxyEndpointsMap) unmerge(other proxyEndpointsMap) { - for svcPortName := range other { - delete(em, svcPortName) - } -} +var _ proxy.Endpoint = &endpointsInfo{} // Proxier is an iptables based proxy for connections between a localhost:lport // and services that provide the actual backends. @@ -369,12 +297,12 @@ type Proxier struct { // services that happened since iptables was synced. For a single object, // changes are accumulated, i.e. previous is state from before all of them, // current is state after applying all of those. - endpointsChanges endpointsChangeMap - serviceChanges serviceChangeMap + endpointsChanges *proxy.EndpointChangeTracker + serviceChanges *proxy.ServiceChangeTracker mu sync.Mutex // protects the following fields - serviceMap proxyServiceMap - endpointsMap proxyEndpointsMap + serviceMap proxy.ServiceMap + endpointsMap proxy.EndpointsMap portsMap map[utilproxy.LocalPort]utilproxy.Closeable // endpointsSynced and servicesSynced are set to true when corresponding // objects are synced after startup. This is used to avoid updating iptables @@ -469,10 +397,10 @@ func NewProxier(ipt utiliptables.Interface, proxier := &Proxier{ portsMap: make(map[utilproxy.LocalPort]utilproxy.Closeable), - serviceMap: make(proxyServiceMap), - serviceChanges: newServiceChangeMap(), - endpointsMap: make(proxyEndpointsMap), - endpointsChanges: newEndpointsChangeMap(hostname), + serviceMap: make(proxy.ServiceMap), + serviceChanges: proxy.NewServiceChangeTracker(), + endpointsMap: make(proxy.EndpointsMap), + endpointsChanges: proxy.NewEndpointChangeTracker(hostname), iptables: ipt, masqueradeAll: masqueradeAll, masqueradeMark: masqueradeMark, @@ -660,22 +588,19 @@ func (proxier *Proxier) isInitialized() bool { } func (proxier *Proxier) OnServiceAdd(service *api.Service) { - namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - if proxier.serviceChanges.update(&namespacedName, nil, service) && proxier.isInitialized() { + if proxier.serviceChanges.Update(nil, service, newServiceInfo) && proxier.isInitialized() { proxier.syncRunner.Run() } } func (proxier *Proxier) OnServiceUpdate(oldService, service *api.Service) { - namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - if proxier.serviceChanges.update(&namespacedName, oldService, service) && proxier.isInitialized() { + if proxier.serviceChanges.Update(oldService, service, newServiceInfo) && proxier.isInitialized() { proxier.syncRunner.Run() } } func (proxier *Proxier) OnServiceDelete(service *api.Service) { - namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - if proxier.serviceChanges.update(&namespacedName, service, nil) && proxier.isInitialized() { + if proxier.serviceChanges.Update(service, nil, newServiceInfo) && proxier.isInitialized() { proxier.syncRunner.Run() } } @@ -690,52 +615,20 @@ func (proxier *Proxier) OnServiceSynced() { proxier.syncProxyRules() } -// is updated by this function (based on the given changes). -// map is cleared after applying them. -func updateServiceMap( - serviceMap proxyServiceMap, - changes *serviceChangeMap) (result updateServiceMapResult) { - result.staleServices = sets.NewString() - - func() { - changes.lock.Lock() - defer changes.lock.Unlock() - for _, change := range changes.items { - existingPorts := serviceMap.merge(change.current) - serviceMap.unmerge(change.previous, existingPorts, result.staleServices) - } - changes.items = make(map[types.NamespacedName]*serviceChange) - }() - - // TODO: If this will appear to be computationally expensive, consider - // computing this incrementally similarly to serviceMap. - result.hcServices = make(map[types.NamespacedName]uint16) - for svcPortName, info := range serviceMap { - if info.healthCheckNodePort != 0 { - result.hcServices[svcPortName.NamespacedName] = uint16(info.healthCheckNodePort) - } - } - - return result -} - func (proxier *Proxier) OnEndpointsAdd(endpoints *api.Endpoints) { - namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name} - if proxier.endpointsChanges.update(&namespacedName, nil, endpoints) && proxier.isInitialized() { + if proxier.endpointsChanges.Update(nil, endpoints, newEndpointsInfo) && proxier.isInitialized() { proxier.syncRunner.Run() } } func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) { - namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name} - if proxier.endpointsChanges.update(&namespacedName, oldEndpoints, endpoints) && proxier.isInitialized() { + if proxier.endpointsChanges.Update(oldEndpoints, endpoints, newEndpointsInfo) && proxier.isInitialized() { proxier.syncRunner.Run() } } func (proxier *Proxier) OnEndpointsDelete(endpoints *api.Endpoints) { - namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name} - if proxier.endpointsChanges.update(&namespacedName, endpoints, nil) && proxier.isInitialized() { + if proxier.endpointsChanges.Update(endpoints, nil, newEndpointsInfo) && proxier.isInitialized() { proxier.syncRunner.Run() } } @@ -750,152 +643,6 @@ func (proxier *Proxier) OnEndpointsSynced() { proxier.syncProxyRules() } -// is updated by this function (based on the given changes). -// map is cleared after applying them. -func updateEndpointsMap( - endpointsMap proxyEndpointsMap, - changes *endpointsChangeMap, - hostname string) (result updateEndpointMapResult) { - result.staleEndpoints = make(map[endpointServicePair]bool) - result.staleServiceNames = make(map[proxy.ServicePortName]bool) - - func() { - changes.lock.Lock() - defer changes.lock.Unlock() - for _, change := range changes.items { - endpointsMap.unmerge(change.previous) - endpointsMap.merge(change.current) - detectStaleConnections(change.previous, change.current, result.staleEndpoints, result.staleServiceNames) - } - changes.items = make(map[types.NamespacedName]*endpointsChange) - }() - - // TODO: If this will appear to be computationally expensive, consider - // computing this incrementally similarly to endpointsMap. - result.hcEndpoints = make(map[types.NamespacedName]int) - localIPs := getLocalIPs(endpointsMap) - for nsn, ips := range localIPs { - result.hcEndpoints[nsn] = len(ips) - } - - return result -} - -// and are modified by this function with detected stale connections. -func detectStaleConnections(oldEndpointsMap, newEndpointsMap proxyEndpointsMap, staleEndpoints map[endpointServicePair]bool, staleServiceNames map[proxy.ServicePortName]bool) { - for svcPortName, epList := range oldEndpointsMap { - for _, ep := range epList { - stale := true - for i := range newEndpointsMap[svcPortName] { - if *newEndpointsMap[svcPortName][i] == *ep { - stale = false - break - } - } - if stale { - glog.V(4).Infof("Stale endpoint %v -> %v", svcPortName, ep.endpoint) - staleEndpoints[endpointServicePair{endpoint: ep.endpoint, servicePortName: svcPortName}] = true - } - } - } - - for svcPortName, epList := range newEndpointsMap { - // For udp service, if its backend changes from 0 to non-0. There may exist a conntrack entry that could blackhole traffic to the service. - if len(epList) > 0 && len(oldEndpointsMap[svcPortName]) == 0 { - staleServiceNames[svcPortName] = true - } - } -} - -func getLocalIPs(endpointsMap proxyEndpointsMap) map[types.NamespacedName]sets.String { - localIPs := make(map[types.NamespacedName]sets.String) - for svcPortName := range endpointsMap { - for _, ep := range endpointsMap[svcPortName] { - if ep.isLocal { - // If the endpoint has a bad format, utilproxy.IPPart() will log an - // error and ep.IPPart() will return a null string. - if ip := ep.IPPart(); ip != "" { - nsn := svcPortName.NamespacedName - if localIPs[nsn] == nil { - localIPs[nsn] = sets.NewString() - } - localIPs[nsn].Insert(ip) - } - } - } - } - return localIPs -} - -// Translates single Endpoints object to proxyEndpointsMap. -// This function is used for incremental updated of endpointsMap. -// -// NOTE: endpoints object should NOT be modified. -func endpointsToEndpointsMap(endpoints *api.Endpoints, hostname string) proxyEndpointsMap { - if endpoints == nil { - return nil - } - - endpointsMap := make(proxyEndpointsMap) - // We need to build a map of portname -> all ip:ports for that - // portname. Explode Endpoints.Subsets[*] into this structure. - for i := range endpoints.Subsets { - ss := &endpoints.Subsets[i] - for i := range ss.Ports { - port := &ss.Ports[i] - if port.Port == 0 { - glog.Warningf("ignoring invalid endpoint port %s", port.Name) - continue - } - svcPortName := proxy.ServicePortName{ - NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, - Port: port.Name, - } - for i := range ss.Addresses { - addr := &ss.Addresses[i] - if addr.IP == "" { - glog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name) - continue - } - epInfo := &endpointsInfo{ - endpoint: net.JoinHostPort(addr.IP, strconv.Itoa(int(port.Port))), - isLocal: addr.NodeName != nil && *addr.NodeName == hostname, - } - endpointsMap[svcPortName] = append(endpointsMap[svcPortName], epInfo) - } - if glog.V(3) { - newEPList := []string{} - for _, ep := range endpointsMap[svcPortName] { - newEPList = append(newEPList, ep.endpoint) - } - glog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList) - } - } - } - return endpointsMap -} - -// Translates single Service object to proxyServiceMap. -// -// NOTE: service object should NOT be modified. -func serviceToServiceMap(service *api.Service) proxyServiceMap { - if service == nil { - return nil - } - svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - if utilproxy.ShouldSkipService(svcName, service) { - return nil - } - - serviceMap := make(proxyServiceMap) - for i := range service.Spec.Ports { - servicePort := &service.Spec.Ports[i] - svcPortName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name} - serviceMap[svcPortName] = newServiceInfo(svcPortName, servicePort, service) - } - return serviceMap -} - // portProtoHash takes the ServicePortName and protocol for a service // returns the associated 16 character hash. This is computed by hashing (sha256) // then encoding to base32 and truncating to 16 chars. We do this because IPTables @@ -936,25 +683,17 @@ func servicePortEndpointChainName(servicePortName string, protocol string, endpo return utiliptables.Chain("KUBE-SEP-" + encoded[:16]) } -type endpointServicePair struct { - endpoint string - servicePortName proxy.ServicePortName -} - -func (esp *endpointServicePair) IPPart() string { - return utilproxy.IPPart(esp.endpoint) -} - // After a UDP endpoint has been removed, we must flush any pending conntrack entries to it, or else we // risk sending more traffic to it, all of which will be lost (because UDP). // This assumes the proxier mutex is held -func (proxier *Proxier) deleteEndpointConnections(connectionMap map[endpointServicePair]bool) { - for epSvcPair := range connectionMap { - if svcInfo, ok := proxier.serviceMap[epSvcPair.servicePortName]; ok && svcInfo.protocol == api.ProtocolUDP { - endpointIP := utilproxy.IPPart(epSvcPair.endpoint) - err := utilproxy.ClearUDPConntrackForPeers(proxier.exec, svcInfo.clusterIP.String(), endpointIP) +// TODO: move it to util +func (proxier *Proxier) deleteEndpointConnections(connectionMap []proxy.ServiceEndpoint) { + for _, epSvcPair := range connectionMap { + if svcInfo, ok := proxier.serviceMap[epSvcPair.ServicePortName]; ok && svcInfo.Protocol() == api.ProtocolUDP { + endpointIP := utilproxy.IPPart(epSvcPair.Endpoint) + err := utilproxy.ClearUDPConntrackForPeers(proxier.exec, svcInfo.ClusterIP(), endpointIP) if err != nil { - glog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.servicePortName.String(), err) + glog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.ServicePortName.String(), err) } } } @@ -981,17 +720,15 @@ func (proxier *Proxier) syncProxyRules() { // We assume that if this was called, we really want to sync them, // even if nothing changed in the meantime. In other words, callers are // responsible for detecting no-op changes and not calling this function. - serviceUpdateResult := updateServiceMap( - proxier.serviceMap, &proxier.serviceChanges) - endpointUpdateResult := updateEndpointsMap( - proxier.endpointsMap, &proxier.endpointsChanges, proxier.hostname) + serviceUpdateResult := proxy.UpdateServiceMap(proxier.serviceMap, proxier.serviceChanges) + endpointUpdateResult := proxy.UpdateEndpointsMap(proxier.endpointsMap, proxier.endpointsChanges) - staleServices := serviceUpdateResult.staleServices + staleServices := serviceUpdateResult.UDPStaleClusterIP // merge stale services gathered from updateEndpointsMap - for svcPortName := range endpointUpdateResult.staleServiceNames { - if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.protocol == api.ProtocolUDP { - glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.clusterIP.String()) - staleServices.Insert(svcInfo.clusterIP.String()) + for _, svcPortName := range endpointUpdateResult.StaleServiceNames { + if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.Protocol() == api.ProtocolUDP { + glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.ClusterIP()) + staleServices.Insert(svcInfo.ClusterIP()) } } @@ -1164,7 +901,12 @@ func (proxier *Proxier) syncProxyRules() { // Build rules for each service. var svcNameString string - for svcName, svcInfo := range proxier.serviceMap { + for svcName, svc := range proxier.serviceMap { + svcInfo, ok := svc.(*serviceInfo) + if !ok { + glog.Errorf("Failed to cast serviceInfo %q", svcName.String()) + continue + } isIPv6 := utilproxy.IsIPv6(svcInfo.clusterIP) protocol := strings.ToLower(string(svcInfo.protocol)) svcNameString = svcInfo.serviceNameString @@ -1224,7 +966,7 @@ func (proxier *Proxier) syncProxyRules() { lp := utilproxy.LocalPort{ Description: "externalIP for " + svcNameString, IP: externalIP, - Port: svcInfo.port, + Port: svcInfo.Port(), Protocol: protocol, } if proxier.portsMap[lp] != nil { @@ -1448,8 +1190,13 @@ func (proxier *Proxier) syncProxyRules() { endpointChains = endpointChains[:0] var endpointChain utiliptables.Chain for _, ep := range proxier.endpointsMap[svcName] { - endpoints = append(endpoints, ep) - endpointChain = ep.endpointChain(svcNameString, protocol) + epInfo, ok := ep.(*endpointsInfo) + if !ok { + glog.Errorf("Failed to cast endpointsInfo %q", ep.String()) + continue + } + endpoints = append(endpoints, epInfo) + endpointChain = epInfo.endpointChain(svcNameString, protocol) endpointChains = append(endpointChains, endpointChain) // Create the endpoint chain, retaining counters if possible. @@ -1476,7 +1223,7 @@ func (proxier *Proxier) syncProxyRules() { // Now write loadbalancing & DNAT rules. n := len(endpointChains) for i, endpointChain := range endpointChains { - epIP := endpoints[i].IPPart() + epIP := endpoints[i].IP() if epIP == "" { // Error parsing this endpoint has been logged. Skip to next endpoint. continue @@ -1687,10 +1434,10 @@ func (proxier *Proxier) syncProxyRules() { // Update healthchecks. The endpoints list might include services that are // not "OnlyLocal", but the services list will not, and the healthChecker // will just drop those endpoints. - if err := proxier.healthChecker.SyncServices(serviceUpdateResult.hcServices); err != nil { + if err := proxier.healthChecker.SyncServices(serviceUpdateResult.HCServiceNodePorts); err != nil { glog.Errorf("Error syncing healtcheck services: %v", err) } - if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.hcEndpoints); err != nil { + if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.HCEndpointsLocalIPSize); err != nil { glog.Errorf("Error syncing healthcheck endoints: %v", err) } @@ -1701,7 +1448,7 @@ func (proxier *Proxier) syncProxyRules() { glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) } } - proxier.deleteEndpointConnections(endpointUpdateResult.staleEndpoints) + proxier.deleteEndpointConnections(endpointUpdateResult.StaleEndpoints) } // Join all words with spaces, terminate with newline and write to buf. diff --git a/pkg/proxy/iptables/proxier_test.go b/pkg/proxy/iptables/proxier_test.go index 6308d4cc0f6..441a79f0198 100644 --- a/pkg/proxy/iptables/proxier_test.go +++ b/pkg/proxy/iptables/proxier_test.go @@ -18,22 +18,19 @@ package iptables import ( "bytes" + "fmt" + "net" "reflect" "strconv" + "strings" "testing" "time" - "github.com/davecgh/go-spew/spew" "github.com/golang/glog" - "fmt" - "net" - "strings" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/sets" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/proxy" utilproxy "k8s.io/kubernetes/pkg/proxy/util" @@ -200,8 +197,8 @@ func TestDeleteEndpointConnections(t *testing.T) { svcIP string svcPort int protocol api.Protocol - endpoint string // IP:port endpoint - epSvcPair endpointServicePair // Will be generated by test + endpoint string // IP:port endpoint + epSvcPair proxy.ServiceEndpoint // Will be generated by test simulatedErr string }{ { @@ -253,16 +250,16 @@ func TestDeleteEndpointConnections(t *testing.T) { // Create a service map that has service info entries for all test cases // and generate an endpoint service pair for each test case - serviceMap := make(map[proxy.ServicePortName]*serviceInfo) + serviceMap := make(map[proxy.ServicePortName]proxy.ServicePort) for i, tc := range testCases { svc := proxy.ServicePortName{ NamespacedName: types.NamespacedName{Namespace: "ns1", Name: tc.svcName}, Port: "p80", } serviceMap[svc] = newFakeServiceInfo(svc, net.ParseIP(tc.svcIP), 80, tc.protocol, false) - testCases[i].epSvcPair = endpointServicePair{ - endpoint: tc.endpoint, - servicePortName: svc, + testCases[i].epSvcPair = proxy.ServiceEndpoint{ + Endpoint: tc.endpoint, + ServicePortName: svc, } } @@ -298,7 +295,7 @@ func TestDeleteEndpointConnections(t *testing.T) { priorExecs := fexec.CommandCalls priorGlogErrs := glog.Stats.Error.Lines() - input := map[endpointServicePair]bool{tc.epSvcPair: true} + input := []proxy.ServiceEndpoint{tc.epSvcPair} fakeProxier.deleteEndpointConnections(input) // For UDP connections, check the executed conntrack command @@ -391,10 +388,10 @@ func NewFakeProxier(ipt utiliptables.Interface) *Proxier { // invocation into a Run() method. p := &Proxier{ exec: &fakeexec.FakeExec{}, - serviceMap: make(proxyServiceMap), - serviceChanges: newServiceChangeMap(), - endpointsMap: make(proxyEndpointsMap), - endpointsChanges: newEndpointsChangeMap(testHostname), + serviceMap: make(proxy.ServiceMap), + serviceChanges: proxy.NewServiceChangeTracker(), + endpointsMap: make(proxy.EndpointsMap), + endpointsChanges: proxy.NewEndpointChangeTracker(testHostname), iptables: ipt, clusterCIDR: "10.0.0.0/24", hostname: testHostname, @@ -720,7 +717,6 @@ func TestLoadBalancer(t *testing.T) { proto := strings.ToLower(string(api.ProtocolTCP)) fwChain := string(serviceFirewallChainName(svcPortName.String(), proto)) svcChain := string(servicePortChainName(svcPortName.String(), proto)) - //lbChain := string(serviceLBChainName(svcPortName.String(), proto)) kubeSvcRules := ipt.GetRules(string(kubeServicesChain)) if !hasJump(kubeSvcRules, fwChain, svcLBIP, svcPort) { @@ -1111,24 +1107,24 @@ func TestBuildServiceMapAddRemove(t *testing.T) { for i := range services { fp.OnServiceAdd(services[i]) } - result := updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result := proxy.UpdateServiceMap(fp.serviceMap, fp.serviceChanges) if len(fp.serviceMap) != 8 { t.Errorf("expected service map length 8, got %v", fp.serviceMap) } // The only-local-loadbalancer ones get added - if len(result.hcServices) != 1 { - t.Errorf("expected 1 healthcheck port, got %v", result.hcServices) + if len(result.HCServiceNodePorts) != 1 { + t.Errorf("expected 1 healthcheck port, got %v", result.HCServiceNodePorts) } else { nsn := makeNSN("somewhere", "only-local-load-balancer") - if port, found := result.hcServices[nsn]; !found || port != 345 { - t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, result.hcServices) + if port, found := result.HCServiceNodePorts[nsn]; !found || port != 345 { + t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, result.HCServiceNodePorts) } } - if len(result.staleServices) != 0 { + if len(result.UDPStaleClusterIP) != 0 { // Services only added, so nothing stale yet - t.Errorf("expected stale UDP services length 0, got %d", len(result.staleServices)) + t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP)) } // Remove some stuff @@ -1144,24 +1140,24 @@ func TestBuildServiceMapAddRemove(t *testing.T) { fp.OnServiceDelete(services[2]) fp.OnServiceDelete(services[3]) - result = updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result = proxy.UpdateServiceMap(fp.serviceMap, fp.serviceChanges) if len(fp.serviceMap) != 1 { t.Errorf("expected service map length 1, got %v", fp.serviceMap) } - if len(result.hcServices) != 0 { - t.Errorf("expected 0 healthcheck ports, got %v", result.hcServices) + if len(result.HCServiceNodePorts) != 0 { + t.Errorf("expected 0 healthcheck ports, got %v", result.HCServiceNodePorts) } // All services but one were deleted. While you'd expect only the ClusterIPs // from the three deleted services here, we still have the ClusterIP for // the not-deleted service, because one of it's ServicePorts was deleted. expectedStaleUDPServices := []string{"172.16.55.10", "172.16.55.4", "172.16.55.11", "172.16.55.12"} - if len(result.staleServices) != len(expectedStaleUDPServices) { - t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.staleServices.UnsortedList()) + if len(result.UDPStaleClusterIP) != len(expectedStaleUDPServices) { + t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.UDPStaleClusterIP.UnsortedList()) } for _, ip := range expectedStaleUDPServices { - if !result.staleServices.Has(ip) { + if !result.UDPStaleClusterIP.Has(ip) { t.Errorf("expected stale UDP service service %s", ip) } } @@ -1184,18 +1180,18 @@ func TestBuildServiceMapServiceHeadless(t *testing.T) { ) // Headless service should be ignored - result := updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result := proxy.UpdateServiceMap(fp.serviceMap, fp.serviceChanges) if len(fp.serviceMap) != 0 { t.Errorf("expected service map length 0, got %d", len(fp.serviceMap)) } // No proxied services, so no healthchecks - if len(result.hcServices) != 0 { - t.Errorf("expected healthcheck ports length 0, got %d", len(result.hcServices)) + if len(result.HCServiceNodePorts) != 0 { + t.Errorf("expected healthcheck ports length 0, got %d", len(result.HCServiceNodePorts)) } - if len(result.staleServices) != 0 { - t.Errorf("expected stale UDP services length 0, got %d", len(result.staleServices)) + if len(result.UDPStaleClusterIP) != 0 { + t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP)) } } @@ -1212,16 +1208,16 @@ func TestBuildServiceMapServiceTypeExternalName(t *testing.T) { }), ) - result := updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result := proxy.UpdateServiceMap(fp.serviceMap, fp.serviceChanges) if len(fp.serviceMap) != 0 { t.Errorf("expected service map length 0, got %v", fp.serviceMap) } // No proxied services, so no healthchecks - if len(result.hcServices) != 0 { - t.Errorf("expected healthcheck ports length 0, got %v", result.hcServices) + if len(result.HCServiceNodePorts) != 0 { + t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts) } - if len(result.staleServices) != 0 { - t.Errorf("expected stale UDP services length 0, got %v", result.staleServices) + if len(result.UDPStaleClusterIP) != 0 { + t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP) } } @@ -1252,328 +1248,57 @@ func TestBuildServiceMapServiceUpdate(t *testing.T) { fp.OnServiceAdd(servicev1) - result := updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result := proxy.UpdateServiceMap(fp.serviceMap, fp.serviceChanges) if len(fp.serviceMap) != 2 { t.Errorf("expected service map length 2, got %v", fp.serviceMap) } - if len(result.hcServices) != 0 { - t.Errorf("expected healthcheck ports length 0, got %v", result.hcServices) + if len(result.HCServiceNodePorts) != 0 { + t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts) } - if len(result.staleServices) != 0 { + if len(result.UDPStaleClusterIP) != 0 { // Services only added, so nothing stale yet - t.Errorf("expected stale UDP services length 0, got %d", len(result.staleServices)) + t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP)) } // Change service to load-balancer fp.OnServiceUpdate(servicev1, servicev2) - result = updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result = proxy.UpdateServiceMap(fp.serviceMap, fp.serviceChanges) if len(fp.serviceMap) != 2 { t.Errorf("expected service map length 2, got %v", fp.serviceMap) } - if len(result.hcServices) != 1 { - t.Errorf("expected healthcheck ports length 1, got %v", result.hcServices) + if len(result.HCServiceNodePorts) != 1 { + t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts) } - if len(result.staleServices) != 0 { - t.Errorf("expected stale UDP services length 0, got %v", result.staleServices.UnsortedList()) + if len(result.UDPStaleClusterIP) != 0 { + t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.UnsortedList()) } // No change; make sure the service map stays the same and there are // no health-check changes fp.OnServiceUpdate(servicev2, servicev2) - result = updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result = proxy.UpdateServiceMap(fp.serviceMap, fp.serviceChanges) if len(fp.serviceMap) != 2 { t.Errorf("expected service map length 2, got %v", fp.serviceMap) } - if len(result.hcServices) != 1 { - t.Errorf("expected healthcheck ports length 1, got %v", result.hcServices) + if len(result.HCServiceNodePorts) != 1 { + t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts) } - if len(result.staleServices) != 0 { - t.Errorf("expected stale UDP services length 0, got %v", result.staleServices.UnsortedList()) + if len(result.UDPStaleClusterIP) != 0 { + t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.UnsortedList()) } // And back to ClusterIP fp.OnServiceUpdate(servicev2, servicev1) - result = updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result = proxy.UpdateServiceMap(fp.serviceMap, fp.serviceChanges) if len(fp.serviceMap) != 2 { t.Errorf("expected service map length 2, got %v", fp.serviceMap) } - if len(result.hcServices) != 0 { - t.Errorf("expected healthcheck ports length 0, got %v", result.hcServices) + if len(result.HCServiceNodePorts) != 0 { + t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts) } - if len(result.staleServices) != 0 { + if len(result.UDPStaleClusterIP) != 0 { // Services only added, so nothing stale yet - t.Errorf("expected stale UDP services length 0, got %d", len(result.staleServices)) - } -} - -func Test_getLocalIPs(t *testing.T) { - testCases := []struct { - endpointsMap map[proxy.ServicePortName][]*endpointsInfo - expected map[types.NamespacedName]sets.String - }{{ - // Case[0]: nothing - endpointsMap: map[proxy.ServicePortName][]*endpointsInfo{}, - expected: map[types.NamespacedName]sets.String{}, - }, { - // Case[1]: unnamed port - endpointsMap: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", ""): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - expected: map[types.NamespacedName]sets.String{}, - }, { - // Case[2]: unnamed port local - endpointsMap: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", ""): { - {endpoint: "1.1.1.1:11", isLocal: true}, - }, - }, - expected: map[types.NamespacedName]sets.String{ - {Namespace: "ns1", Name: "ep1"}: sets.NewString("1.1.1.1"), - }, - }, { - // Case[3]: named local and non-local ports for the same IP. - endpointsMap: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - {endpoint: "1.1.1.2:11", isLocal: true}, - }, - makeServicePortName("ns1", "ep1", "p12"): { - {endpoint: "1.1.1.1:12", isLocal: false}, - {endpoint: "1.1.1.2:12", isLocal: true}, - }, - }, - expected: map[types.NamespacedName]sets.String{ - {Namespace: "ns1", Name: "ep1"}: sets.NewString("1.1.1.2"), - }, - }, { - // Case[4]: named local and non-local ports for different IPs. - endpointsMap: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - makeServicePortName("ns2", "ep2", "p22"): { - {endpoint: "2.2.2.2:22", isLocal: true}, - {endpoint: "2.2.2.22:22", isLocal: true}, - }, - makeServicePortName("ns2", "ep2", "p23"): { - {endpoint: "2.2.2.3:23", isLocal: true}, - }, - makeServicePortName("ns4", "ep4", "p44"): { - {endpoint: "4.4.4.4:44", isLocal: true}, - {endpoint: "4.4.4.5:44", isLocal: false}, - }, - makeServicePortName("ns4", "ep4", "p45"): { - {endpoint: "4.4.4.6:45", isLocal: true}, - }, - }, - expected: map[types.NamespacedName]sets.String{ - {Namespace: "ns2", Name: "ep2"}: sets.NewString("2.2.2.2", "2.2.2.22", "2.2.2.3"), - {Namespace: "ns4", Name: "ep4"}: sets.NewString("4.4.4.4", "4.4.4.6"), - }, - }, { - // Case[5]: named port local and bad endpoints IP - endpointsMap: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "bad ip:11", isLocal: true}, - }, - }, - expected: map[types.NamespacedName]sets.String{}, - }} - - for tci, tc := range testCases { - // outputs - localIPs := getLocalIPs(tc.endpointsMap) - - if !reflect.DeepEqual(localIPs, tc.expected) { - t.Errorf("[%d] expected %#v, got %#v", tci, tc.expected, localIPs) - } - } -} - -// This is a coarse test, but it offers some modicum of confidence as the code is evolved. -func Test_endpointsToEndpointsMap(t *testing.T) { - testCases := []struct { - newEndpoints *api.Endpoints - expected map[proxy.ServicePortName][]*endpointsInfo - }{{ - // Case[0]: nothing - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {}), - expected: map[proxy.ServicePortName][]*endpointsInfo{}, - }, { - // Case[1]: no changes, unnamed port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "", - Port: 11, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", ""): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - }, { - // Case[2]: no changes, named port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "port", - Port: 11, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "port"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - }, { - // Case[3]: new port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Port: 11, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", ""): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - }, { - // Case[4]: remove port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {}), - expected: map[proxy.ServicePortName][]*endpointsInfo{}, - }, { - // Case[5]: new IP and port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }, { - IP: "2.2.2.2", - }}, - Ports: []api.EndpointPort{{ - Name: "p1", - Port: 11, - }, { - Name: "p2", - Port: 22, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p1"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - {endpoint: "2.2.2.2:11", isLocal: false}, - }, - makeServicePortName("ns1", "ep1", "p2"): { - {endpoint: "1.1.1.1:22", isLocal: false}, - {endpoint: "2.2.2.2:22", isLocal: false}, - }, - }, - }, { - // Case[6]: remove IP and port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "p1", - Port: 11, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p1"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - }, { - // Case[7]: rename port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "p2", - Port: 11, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p2"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - }, { - // Case[8]: renumber port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "p1", - Port: 22, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p1"): { - {endpoint: "1.1.1.1:22", isLocal: false}, - }, - }, - }} - - for tci, tc := range testCases { - // outputs - newEndpoints := endpointsToEndpointsMap(tc.newEndpoints, "host") - - if len(newEndpoints) != len(tc.expected) { - t.Errorf("[%d] expected %d new, got %d: %v", tci, len(tc.expected), len(newEndpoints), spew.Sdump(newEndpoints)) - } - for x := range tc.expected { - if len(newEndpoints[x]) != len(tc.expected[x]) { - t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(tc.expected[x]), x, len(newEndpoints[x])) - } else { - for i := range newEndpoints[x] { - if *(newEndpoints[x][i]) != *(tc.expected[x][i]) { - t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, tc.expected[x][i], *(newEndpoints[x][i])) - } - } - } - } + t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP)) } } @@ -1619,7 +1344,7 @@ func makeServiceMap(proxier *Proxier, allServices ...*api.Service) { proxier.servicesSynced = true } -func compareEndpointsMaps(t *testing.T, tci int, newMap, expected map[proxy.ServicePortName][]*endpointsInfo) { +func compareEndpointsMaps(t *testing.T, tci int, newMap proxy.EndpointsMap, expected map[proxy.ServicePortName][]*endpointsInfo) { if len(newMap) != len(expected) { t.Errorf("[%d] expected %d results, got %d: %v", tci, len(expected), len(newMap), newMap) } @@ -1628,8 +1353,13 @@ func compareEndpointsMaps(t *testing.T, tci int, newMap, expected map[proxy.Serv t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(expected[x]), x, len(newMap[x])) } else { for i := range expected[x] { - if *(newMap[x][i]) != *(expected[x][i]) { - t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newMap[x][i]) + newEp, ok := newMap[x][i].(*endpointsInfo) + if !ok { + t.Errorf("Failed to cast endpointsInfo") + continue + } + if *newEp != *(expected[x][i]) { + t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newEp) } } } @@ -1950,14 +1680,14 @@ func Test_updateEndpointsMap(t *testing.T) { currentEndpoints []*api.Endpoints oldEndpoints map[proxy.ServicePortName][]*endpointsInfo expectedResult map[proxy.ServicePortName][]*endpointsInfo - expectedStaleEndpoints []endpointServicePair + expectedStaleEndpoints []proxy.ServiceEndpoint expectedStaleServiceNames map[proxy.ServicePortName]bool expectedHealthchecks map[types.NamespacedName]int }{{ // Case[0]: nothing oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{}, expectedResult: map[proxy.ServicePortName][]*endpointsInfo{}, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{}, }, { @@ -1978,7 +1708,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{}, }, { @@ -1999,7 +1729,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: true}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{ makeNSN("ns1", "ep1"): 1, @@ -2028,7 +1758,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.2:12", isLocal: false}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{}, }, { @@ -2061,7 +1791,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.3:13", isLocal: false}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{ makeNSN("ns1", "ep1"): 1, @@ -2128,7 +1858,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "2.2.2.2:22", isLocal: true}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{ makeNSN("ns1", "ep1"): 2, @@ -2148,7 +1878,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: true}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{ makeServicePortName("ns1", "ep1", ""): true, }, @@ -2169,9 +1899,9 @@ func Test_updateEndpointsMap(t *testing.T) { }, }, expectedResult: map[proxy.ServicePortName][]*endpointsInfo{}, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "1.1.1.1:11", - servicePortName: makeServicePortName("ns1", "ep1", ""), + expectedStaleEndpoints: []proxy.ServiceEndpoint{{ + Endpoint: "1.1.1.1:11", + ServicePortName: makeServicePortName("ns1", "ep1", ""), }}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{}, @@ -2198,7 +1928,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.2:12", isLocal: true}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{ makeServicePortName("ns1", "ep1", "p12"): true, }, @@ -2228,15 +1958,15 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "1.1.1.2:11", - servicePortName: makeServicePortName("ns1", "ep1", "p11"), + expectedStaleEndpoints: []proxy.ServiceEndpoint{{ + Endpoint: "1.1.1.2:11", + ServicePortName: makeServicePortName("ns1", "ep1", "p11"), }, { - endpoint: "1.1.1.1:12", - servicePortName: makeServicePortName("ns1", "ep1", "p12"), + Endpoint: "1.1.1.1:12", + ServicePortName: makeServicePortName("ns1", "ep1", "p12"), }, { - endpoint: "1.1.1.2:12", - servicePortName: makeServicePortName("ns1", "ep1", "p12"), + Endpoint: "1.1.1.2:12", + ServicePortName: makeServicePortName("ns1", "ep1", "p12"), }}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{}, @@ -2261,7 +1991,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.2:12", isLocal: true}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{ makeServicePortName("ns1", "ep1", "p12"): true, }, @@ -2289,9 +2019,9 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "1.1.1.2:12", - servicePortName: makeServicePortName("ns1", "ep1", "p12"), + expectedStaleEndpoints: []proxy.ServiceEndpoint{{ + Endpoint: "1.1.1.2:12", + ServicePortName: makeServicePortName("ns1", "ep1", "p12"), }}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{}, @@ -2313,9 +2043,9 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "1.1.1.1:11", - servicePortName: makeServicePortName("ns1", "ep1", "p11"), + expectedStaleEndpoints: []proxy.ServiceEndpoint{{ + Endpoint: "1.1.1.1:11", + ServicePortName: makeServicePortName("ns1", "ep1", "p11"), }}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{ makeServicePortName("ns1", "ep1", "p11-2"): true, @@ -2339,9 +2069,9 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:22", isLocal: false}, }, }, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "1.1.1.1:11", - servicePortName: makeServicePortName("ns1", "ep1", "p11"), + expectedStaleEndpoints: []proxy.ServiceEndpoint{{ + Endpoint: "1.1.1.1:11", + ServicePortName: makeServicePortName("ns1", "ep1", "p11"), }}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{}, @@ -2396,21 +2126,21 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "4.4.4.4:44", isLocal: true}, }, }, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "2.2.2.2:22", - servicePortName: makeServicePortName("ns2", "ep2", "p22"), + expectedStaleEndpoints: []proxy.ServiceEndpoint{{ + Endpoint: "2.2.2.2:22", + ServicePortName: makeServicePortName("ns2", "ep2", "p22"), }, { - endpoint: "2.2.2.22:22", - servicePortName: makeServicePortName("ns2", "ep2", "p22"), + Endpoint: "2.2.2.22:22", + ServicePortName: makeServicePortName("ns2", "ep2", "p22"), }, { - endpoint: "2.2.2.3:23", - servicePortName: makeServicePortName("ns2", "ep2", "p23"), + Endpoint: "2.2.2.3:23", + ServicePortName: makeServicePortName("ns2", "ep2", "p23"), }, { - endpoint: "4.4.4.5:44", - servicePortName: makeServicePortName("ns4", "ep4", "p44"), + Endpoint: "4.4.4.5:44", + ServicePortName: makeServicePortName("ns4", "ep4", "p44"), }, { - endpoint: "4.4.4.6:45", - servicePortName: makeServicePortName("ns4", "ep4", "p45"), + Endpoint: "4.4.4.6:45", + ServicePortName: makeServicePortName("ns4", "ep4", "p45"), }}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{ makeServicePortName("ns1", "ep1", "p12"): true, @@ -2434,7 +2164,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{ makeServicePortName("ns1", "ep1", ""): true, }, @@ -2454,7 +2184,7 @@ func Test_updateEndpointsMap(t *testing.T) { fp.OnEndpointsAdd(tc.previousEndpoints[i]) } } - updateEndpointsMap(fp.endpointsMap, &fp.endpointsChanges, fp.hostname) + proxy.UpdateEndpointsMap(fp.endpointsMap, fp.endpointsChanges) compareEndpointsMaps(t, tci, fp.endpointsMap, tc.oldEndpoints) // Now let's call appropriate handlers to get to state we want to be. @@ -2474,27 +2204,40 @@ func Test_updateEndpointsMap(t *testing.T) { fp.OnEndpointsUpdate(prev, curr) } } - result := updateEndpointsMap(fp.endpointsMap, &fp.endpointsChanges, fp.hostname) + result := proxy.UpdateEndpointsMap(fp.endpointsMap, fp.endpointsChanges) newMap := fp.endpointsMap compareEndpointsMaps(t, tci, newMap, tc.expectedResult) - if len(result.staleEndpoints) != len(tc.expectedStaleEndpoints) { - t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedStaleEndpoints), len(result.staleEndpoints), result.staleEndpoints) + if len(result.StaleEndpoints) != len(tc.expectedStaleEndpoints) { + t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedStaleEndpoints), len(result.StaleEndpoints), result.StaleEndpoints) } for _, x := range tc.expectedStaleEndpoints { - if result.staleEndpoints[x] != true { - t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.staleEndpoints) + found := false + for _, stale := range result.StaleEndpoints { + if stale == x { + found = true + break + } + } + if !found { + t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.StaleEndpoints) } } - if len(result.staleServiceNames) != len(tc.expectedStaleServiceNames) { - t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedStaleServiceNames), len(result.staleServiceNames), result.staleServiceNames) + if len(result.StaleServiceNames) != len(tc.expectedStaleServiceNames) { + t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedStaleServiceNames), len(result.StaleServiceNames), result.StaleServiceNames) } for svcName := range tc.expectedStaleServiceNames { - if result.staleServiceNames[svcName] != true { - t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.staleServiceNames) + found := false + for _, stale := range result.StaleServiceNames { + if stale == svcName { + found = true + } + } + if !found { + t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.StaleServiceNames) } } - if !reflect.DeepEqual(result.hcEndpoints, tc.expectedHealthchecks) { - t.Errorf("[%d] expected healthchecks %v, got %v", tci, tc.expectedHealthchecks, result.hcEndpoints) + if !reflect.DeepEqual(result.HCEndpointsLocalIPSize, tc.expectedHealthchecks) { + t.Errorf("[%d] expected healthchecks %v, got %v", tci, tc.expectedHealthchecks, result.HCEndpointsLocalIPSize) } } } From 8dd4cbe88b1b60dfdc8c051a11617b6f9dc77cfb Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Sat, 3 Feb 2018 17:51:17 +0800 Subject: [PATCH 18/53] ipvs part changes --- pkg/proxy/ipvs/proxier.go | 445 +++++++---------------------- pkg/proxy/ipvs/proxier_test.go | 498 ++++++++------------------------- 2 files changed, 222 insertions(+), 721 deletions(-) diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index e86d1019a79..e567f915175 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -24,7 +24,6 @@ import ( "bytes" "fmt" "net" - "reflect" "strconv" "strings" "sync" @@ -109,12 +108,12 @@ type Proxier struct { // services that happened since last syncProxyRules call. For a single object, // changes are accumulated, i.e. previous is state from before all of them, // current is state after applying all of those. - endpointsChanges endpointsChangeMap - serviceChanges serviceChangeMap + endpointsChanges *proxy.EndpointChangeTracker + serviceChanges *proxy.ServiceChangeTracker mu sync.Mutex // protects the following fields - serviceMap proxyServiceMap - endpointsMap proxyEndpointsMap + serviceMap proxy.ServiceMap + endpointsMap proxy.EndpointsMap portsMap map[utilproxy.LocalPort]utilproxy.Closeable // endpointsSynced and servicesSynced are set to true when corresponding // objects are synced after startup. This is used to avoid updating ipvs rules @@ -302,10 +301,10 @@ func NewProxier(ipt utiliptables.Interface, proxier := &Proxier{ portsMap: make(map[utilproxy.LocalPort]utilproxy.Closeable), - serviceMap: make(proxyServiceMap), - serviceChanges: newServiceChangeMap(), - endpointsMap: make(proxyEndpointsMap), - endpointsChanges: newEndpointsChangeMap(hostname), + serviceMap: make(proxy.ServiceMap), + serviceChanges: proxy.NewServiceChangeTracker(), + endpointsMap: make(proxy.EndpointsMap), + endpointsChanges: proxy.NewEndpointChangeTracker(hostname), syncPeriod: syncPeriod, minSyncPeriod: minSyncPeriod, iptables: ipt, @@ -343,8 +342,6 @@ func NewProxier(ipt utiliptables.Interface, return proxier, nil } -type proxyServiceMap map[proxy.ServicePortName]*serviceInfo - // internal struct for string service information type serviceInfo struct { clusterIP net.IP @@ -362,37 +359,8 @@ type serviceInfo struct { serviceNameString string } -// is updated by this function (based on the given changes). -// map is cleared after applying them. -func updateServiceMap( - serviceMap proxyServiceMap, - changes *serviceChangeMap) (result updateServiceMapResult) { - result.staleServices = sets.NewString() - - func() { - changes.lock.Lock() - defer changes.lock.Unlock() - for _, change := range changes.items { - existingPorts := serviceMap.merge(change.current) - serviceMap.unmerge(change.previous, existingPorts, result.staleServices) - } - changes.items = make(map[types.NamespacedName]*serviceChange) - }() - - // TODO: If this will appear to be computationally expensive, consider - // computing this incrementally similarly to serviceMap. - result.hcServices = make(map[types.NamespacedName]uint16) - for svcPortName, info := range serviceMap { - if info.healthCheckNodePort != 0 { - result.hcServices[svcPortName.NamespacedName] = uint16(info.healthCheckNodePort) - } - } - - return result -} - -// returns a new serviceInfo struct -func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, service *api.Service) *serviceInfo { +// returns a new proxy.ServicePort which abstracts a serviceInfo +func newServiceInfo(port *api.ServicePort, service *api.Service) proxy.ServicePort { onlyNodeLocalEndpoints := false if apiservice.RequestsOnlyLocalTraffic(service) { onlyNodeLocalEndpoints = true @@ -418,10 +386,13 @@ func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, se copy(info.loadBalancerSourceRanges, service.Spec.LoadBalancerSourceRanges) copy(info.externalIPs, service.Spec.ExternalIPs) + svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} + svcPortName := proxy.ServicePortName{NamespacedName: svcName, Port: port.Name} + if apiservice.NeedsHealthCheck(service) { p := service.Spec.HealthCheckNodePort if p == 0 { - glog.Errorf("Service %q has no healthcheck nodeport", svcPortName.NamespacedName.String()) + glog.Errorf("Service %q has no healthcheck nodeport", svcName.String()) } else { info.healthCheckNodePort = int(p) } @@ -433,103 +404,32 @@ func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, se return info } -func (sm *proxyServiceMap) merge(other proxyServiceMap) sets.String { - existingPorts := sets.NewString() - for svcPortName, info := range other { - existingPorts.Insert(svcPortName.Port) - _, exists := (*sm)[svcPortName] - if !exists { - glog.V(1).Infof("Adding new service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) - } else { - glog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) - } - (*sm)[svcPortName] = info - } - return existingPorts +// ClusterIP is part of ServicePort interface. +func (info *serviceInfo) ClusterIP() string { + return info.clusterIP.String() } -func (sm *proxyServiceMap) unmerge(other proxyServiceMap, existingPorts, staleServices sets.String) { - for svcPortName := range other { - if existingPorts.Has(svcPortName.Port) { - continue - } - info, exists := (*sm)[svcPortName] - if exists { - glog.V(1).Infof("Removing service port %q", svcPortName) - if info.protocol == api.ProtocolUDP { - staleServices.Insert(info.clusterIP.String()) - } - delete(*sm, svcPortName) - } else { - glog.Errorf("Service port %q removed, but doesn't exists", svcPortName) - } - } +// Port is part of ServicePort interface. +func (info *serviceInfo) Port() int { + return info.port } -type serviceChangeMap struct { - lock sync.Mutex - items map[types.NamespacedName]*serviceChange +// Protocol is part of ServicePort interface. +func (info *serviceInfo) Protocol() api.Protocol { + return info.protocol } -type serviceChange struct { - previous proxyServiceMap - current proxyServiceMap +// String is part of ServicePort interface. +func (info *serviceInfo) String() string { + return fmt.Sprintf("%s:%d/%s", info.clusterIP, info.port, info.protocol) } -type updateEndpointMapResult struct { - hcEndpoints map[types.NamespacedName]int - staleEndpoints map[endpointServicePair]bool - staleServiceNames map[proxy.ServicePortName]bool +// HealthCheckNodePort is part of ServicePort interface. +func (info *serviceInfo) HealthCheckNodePort() int { + return info.healthCheckNodePort } -type updateServiceMapResult struct { - hcServices map[types.NamespacedName]uint16 - staleServices sets.String -} - -func newServiceChangeMap() serviceChangeMap { - return serviceChangeMap{ - items: make(map[types.NamespacedName]*serviceChange), - } -} - -func (scm *serviceChangeMap) update(namespacedName *types.NamespacedName, previous, current *api.Service) bool { - scm.lock.Lock() - defer scm.lock.Unlock() - - change, exists := scm.items[*namespacedName] - if !exists { - change = &serviceChange{} - change.previous = serviceToServiceMap(previous) - scm.items[*namespacedName] = change - } - change.current = serviceToServiceMap(current) - if reflect.DeepEqual(change.previous, change.current) { - delete(scm.items, *namespacedName) - } - return len(scm.items) > 0 -} - -// Translates single Service object to proxyServiceMap. -// -// NOTE: service object should NOT be modified. -func serviceToServiceMap(service *api.Service) proxyServiceMap { - if service == nil { - return nil - } - svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - if utilproxy.ShouldSkipService(svcName, service) { - return nil - } - - serviceMap := make(proxyServiceMap) - for i := range service.Spec.Ports { - servicePort := &service.Spec.Ports[i] - svcPortName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name} - serviceMap[svcPortName] = newServiceInfo(svcPortName, servicePort, service) - } - return serviceMap -} +var _ proxy.ServicePort = &serviceInfo{} // internal struct for endpoints information type endpointsInfo struct { @@ -537,12 +437,26 @@ type endpointsInfo struct { isLocal bool } -func (e *endpointsInfo) String() string { - return fmt.Sprintf("%v", *e) +// returns a new proxy.Endpoint which abstracts a endpointsInfo +func newEndpointsInfo(IP string, port int, isLocal bool) proxy.Endpoint { + return &endpointsInfo{ + endpoint: net.JoinHostPort(IP, strconv.Itoa(port)), + isLocal: isLocal, + } } -// IPPart returns just the IP part of the endpoint. -func (e *endpointsInfo) IPPart() string { +// IsLocal is part of proxy.Endpoint interface. +func (e *endpointsInfo) IsLocal() bool { + return e.isLocal +} + +// String is part of proxy.Endpoint interface. +func (e *endpointsInfo) String() string { + return fmt.Sprintf("%v", e.endpoint) +} + +// IP returns just the IP part of the endpoint, it's a part of proxy.Endpoints interface. +func (e *endpointsInfo) IP() string { return utilproxy.IPPart(e.endpoint) } @@ -551,164 +465,18 @@ func (e *endpointsInfo) PortPart() (int, error) { return utilproxy.PortPart(e.endpoint) } -type endpointServicePair struct { - endpoint string - servicePortName proxy.ServicePortName +// Equal is part of proxy.Endpoint interface. +func (e *endpointsInfo) Equal(other proxy.Endpoint) bool { + o, ok := other.(*endpointsInfo) + if !ok { + glog.Errorf("Failed to cast endpointsInfo") + return false + } + return e.endpoint == o.endpoint && + e.isLocal == o.isLocal } -type proxyEndpointsMap map[proxy.ServicePortName][]*endpointsInfo - -type endpointsChange struct { - previous proxyEndpointsMap - current proxyEndpointsMap -} - -type endpointsChangeMap struct { - lock sync.Mutex - hostname string - items map[types.NamespacedName]*endpointsChange -} - -// and are modified by this function with detected stale connections. -func detectStaleConnections(oldEndpointsMap, newEndpointsMap proxyEndpointsMap, staleEndpoints map[endpointServicePair]bool, staleServiceNames map[proxy.ServicePortName]bool) { - for svcPortName, epList := range oldEndpointsMap { - for _, ep := range epList { - stale := true - for i := range newEndpointsMap[svcPortName] { - if *newEndpointsMap[svcPortName][i] == *ep { - stale = false - break - } - } - if stale { - glog.V(4).Infof("Stale endpoint %v -> %v", svcPortName, ep.endpoint) - staleEndpoints[endpointServicePair{endpoint: ep.endpoint, servicePortName: svcPortName}] = true - } - } - } - - for svcPortName, epList := range newEndpointsMap { - // For udp service, if its backend changes from 0 to non-0. There may exist a conntrack entry that could blackhole traffic to the service. - if len(epList) > 0 && len(oldEndpointsMap[svcPortName]) == 0 { - staleServiceNames[svcPortName] = true - } - } -} - -// is updated by this function (based on the given changes). -// map is cleared after applying them. -func updateEndpointsMap( - endpointsMap proxyEndpointsMap, - changes *endpointsChangeMap, - hostname string) (result updateEndpointMapResult) { - result.staleEndpoints = make(map[endpointServicePair]bool) - result.staleServiceNames = make(map[proxy.ServicePortName]bool) - - func() { - changes.lock.Lock() - defer changes.lock.Unlock() - for _, change := range changes.items { - endpointsMap.unmerge(change.previous) - endpointsMap.merge(change.current) - detectStaleConnections(change.previous, change.current, result.staleEndpoints, result.staleServiceNames) - } - changes.items = make(map[types.NamespacedName]*endpointsChange) - }() - - // TODO: If this will appear to be computationally expensive, consider - // computing this incrementally similarly to endpointsMap. - result.hcEndpoints = make(map[types.NamespacedName]int) - localIPs := getLocalIPs(endpointsMap) - for nsn, ips := range localIPs { - result.hcEndpoints[nsn] = len(ips) - } - - return result -} - -// Translates single Endpoints object to proxyEndpointsMap. -// This function is used for incremental updated of endpointsMap. -// -// NOTE: endpoints object should NOT be modified. -func endpointsToEndpointsMap(endpoints *api.Endpoints, hostname string) proxyEndpointsMap { - if endpoints == nil { - return nil - } - - endpointsMap := make(proxyEndpointsMap) - // We need to build a map of portname -> all ip:ports for that - // portname. Explode Endpoints.Subsets[*] into this structure. - for i := range endpoints.Subsets { - ss := &endpoints.Subsets[i] - for i := range ss.Ports { - port := &ss.Ports[i] - if port.Port == 0 { - glog.Warningf("ignoring invalid endpoint port %s", port.Name) - continue - } - svcPort := proxy.ServicePortName{ - NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, - Port: port.Name, - } - for i := range ss.Addresses { - addr := &ss.Addresses[i] - if addr.IP == "" { - glog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name) - continue - } - epInfo := &endpointsInfo{ - endpoint: net.JoinHostPort(addr.IP, strconv.Itoa(int(port.Port))), - isLocal: addr.NodeName != nil && *addr.NodeName == hostname, - } - endpointsMap[svcPort] = append(endpointsMap[svcPort], epInfo) - } - if glog.V(3) { - newEPList := []string{} - for _, ep := range endpointsMap[svcPort] { - newEPList = append(newEPList, ep.endpoint) - } - glog.Infof("Setting endpoints for %q to %+v", svcPort, newEPList) - } - } - } - return endpointsMap -} - -func newEndpointsChangeMap(hostname string) endpointsChangeMap { - return endpointsChangeMap{ - hostname: hostname, - items: make(map[types.NamespacedName]*endpointsChange), - } -} - -func (ecm *endpointsChangeMap) update(namespacedName *types.NamespacedName, previous, current *api.Endpoints) bool { - ecm.lock.Lock() - defer ecm.lock.Unlock() - - change, exists := ecm.items[*namespacedName] - if !exists { - change = &endpointsChange{} - change.previous = endpointsToEndpointsMap(previous, ecm.hostname) - ecm.items[*namespacedName] = change - } - change.current = endpointsToEndpointsMap(current, ecm.hostname) - if reflect.DeepEqual(change.previous, change.current) { - delete(ecm.items, *namespacedName) - } - return len(ecm.items) > 0 -} - -func (em proxyEndpointsMap) merge(other proxyEndpointsMap) { - for svcPort := range other { - em[svcPort] = other[svcPort] - } -} - -func (em proxyEndpointsMap) unmerge(other proxyEndpointsMap) { - for svcPort := range other { - delete(em, svcPort) - } -} +var _ proxy.Endpoint = &endpointsInfo{} // KernelHandler can handle the current installed kernel modules. type KernelHandler interface { @@ -891,24 +659,21 @@ func (proxier *Proxier) isInitialized() bool { // OnServiceAdd is called whenever creation of new service object is observed. func (proxier *Proxier) OnServiceAdd(service *api.Service) { - namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - if proxier.serviceChanges.update(&namespacedName, nil, service) && proxier.isInitialized() { + if proxier.serviceChanges.Update(nil, service, newServiceInfo) && proxier.isInitialized() { proxier.syncRunner.Run() } } // OnServiceUpdate is called whenever modification of an existing service object is observed. func (proxier *Proxier) OnServiceUpdate(oldService, service *api.Service) { - namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - if proxier.serviceChanges.update(&namespacedName, oldService, service) && proxier.isInitialized() { + if proxier.serviceChanges.Update(oldService, service, newServiceInfo) && proxier.isInitialized() { proxier.syncRunner.Run() } } // OnServiceDelete is called whenever deletion of an existing service object is observed. func (proxier *Proxier) OnServiceDelete(service *api.Service) { - namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - if proxier.serviceChanges.update(&namespacedName, service, nil) && proxier.isInitialized() { + if proxier.serviceChanges.Update(service, nil, newServiceInfo) && proxier.isInitialized() { proxier.syncRunner.Run() } } @@ -926,24 +691,21 @@ func (proxier *Proxier) OnServiceSynced() { // OnEndpointsAdd is called whenever creation of new endpoints object is observed. func (proxier *Proxier) OnEndpointsAdd(endpoints *api.Endpoints) { - namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name} - if proxier.endpointsChanges.update(&namespacedName, nil, endpoints) && proxier.isInitialized() { + if proxier.endpointsChanges.Update(nil, endpoints, newEndpointsInfo) && proxier.isInitialized() { proxier.syncRunner.Run() } } // OnEndpointsUpdate is called whenever modification of an existing endpoints object is observed. func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) { - namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name} - if proxier.endpointsChanges.update(&namespacedName, oldEndpoints, endpoints) && proxier.isInitialized() { + if proxier.endpointsChanges.Update(oldEndpoints, endpoints, newEndpointsInfo) && proxier.isInitialized() { proxier.syncRunner.Run() } } // OnEndpointsDelete is called whenever deletion of an existing endpoints object is observed. func (proxier *Proxier) OnEndpointsDelete(endpoints *api.Endpoints) { - namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name} - if proxier.endpointsChanges.update(&namespacedName, endpoints, nil) && proxier.isInitialized() { + if proxier.endpointsChanges.Update(endpoints, nil, newEndpointsInfo) && proxier.isInitialized() { proxier.syncRunner.Run() } } @@ -977,17 +739,15 @@ func (proxier *Proxier) syncProxyRules() { // We assume that if this was called, we really want to sync them, // even if nothing changed in the meantime. In other words, callers are // responsible for detecting no-op changes and not calling this function. - serviceUpdateResult := updateServiceMap( - proxier.serviceMap, &proxier.serviceChanges) - endpointUpdateResult := updateEndpointsMap( - proxier.endpointsMap, &proxier.endpointsChanges, proxier.hostname) + serviceUpdateResult := proxy.UpdateServiceMap(proxier.serviceMap, proxier.serviceChanges) + endpointUpdateResult := proxy.UpdateEndpointsMap(proxier.endpointsMap, proxier.endpointsChanges) - staleServices := serviceUpdateResult.staleServices + staleServices := serviceUpdateResult.UDPStaleClusterIP // merge stale services gathered from updateEndpointsMap - for svcPortName := range endpointUpdateResult.staleServiceNames { - if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.protocol == api.ProtocolUDP { - glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.clusterIP.String()) - staleServices.Insert(svcInfo.clusterIP.String()) + for _, svcPortName := range endpointUpdateResult.StaleServiceNames { + if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.Protocol() == api.ProtocolUDP { + glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.ClusterIP()) + staleServices.Insert(svcInfo.ClusterIP()) } } @@ -1090,15 +850,25 @@ func (proxier *Proxier) syncProxyRules() { } // Build IPVS rules for each service. - for svcName, svcInfo := range proxier.serviceMap { + for svcName, svc := range proxier.serviceMap { + svcInfo, ok := svc.(*serviceInfo) + if !ok { + glog.Errorf("Failed to cast serviceInfo %q", svcName.String()) + continue + } protocol := strings.ToLower(string(svcInfo.protocol)) // Precompute svcNameString; with many services the many calls // to ServicePortName.String() show up in CPU profiles. svcNameString := svcName.String() // Handle traffic that loops back to the originator with SNAT. - for _, ep := range proxier.endpointsMap[svcName] { - epIP := ep.IPPart() + for _, e := range proxier.endpointsMap[svcName] { + ep, ok := e.(*endpointsInfo) + if !ok { + glog.Errorf("Failed to cast endpointsInfo %q", e.String()) + continue + } + epIP := ep.IP() epPort, err := ep.PortPart() // Error parsing this endpoint has been logged. Skip to next endpoint. if epIP == "" || err != nil { @@ -1532,10 +1302,10 @@ func (proxier *Proxier) syncProxyRules() { // Update healthchecks. The endpoints list might include services that are // not "OnlyLocal", but the services list will not, and the healthChecker // will just drop those endpoints. - if err := proxier.healthChecker.SyncServices(serviceUpdateResult.hcServices); err != nil { + if err := proxier.healthChecker.SyncServices(serviceUpdateResult.HCServiceNodePorts); err != nil { glog.Errorf("Error syncing healtcheck services: %v", err) } - if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.hcEndpoints); err != nil { + if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.HCEndpointsLocalIPSize); err != nil { glog.Errorf("Error syncing healthcheck endpoints: %v", err) } @@ -1546,19 +1316,19 @@ func (proxier *Proxier) syncProxyRules() { glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) } } - proxier.deleteEndpointConnections(endpointUpdateResult.staleEndpoints) + proxier.deleteEndpointConnections(endpointUpdateResult.StaleEndpoints) } // After a UDP endpoint has been removed, we must flush any pending conntrack entries to it, or else we // risk sending more traffic to it, all of which will be lost (because UDP). // This assumes the proxier mutex is held -func (proxier *Proxier) deleteEndpointConnections(connectionMap map[endpointServicePair]bool) { - for epSvcPair := range connectionMap { - if svcInfo, ok := proxier.serviceMap[epSvcPair.servicePortName]; ok && svcInfo.protocol == api.ProtocolUDP { - endpointIP := utilproxy.IPPart(epSvcPair.endpoint) - err := utilproxy.ClearUDPConntrackForPeers(proxier.exec, svcInfo.clusterIP.String(), endpointIP) +func (proxier *Proxier) deleteEndpointConnections(connectionMap []proxy.ServiceEndpoint) { + for _, epSvcPair := range connectionMap { + if svcInfo, ok := proxier.serviceMap[epSvcPair.ServicePortName]; ok && svcInfo.Protocol() == api.ProtocolUDP { + endpointIP := utilproxy.IPPart(epSvcPair.Endpoint) + err := utilproxy.ClearUDPConntrackForPeers(proxier.exec, svcInfo.ClusterIP(), endpointIP) if err != nil { - glog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.servicePortName.String(), err) + glog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.ServicePortName.String(), err) } } } @@ -1619,8 +1389,13 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode } for _, eps := range proxier.endpointsMap[svcPortName] { - if !onlyNodeLocalEndpoints || onlyNodeLocalEndpoints && eps.isLocal { - newEndpoints.Insert(eps.endpoint) + epInfo, ok := eps.(*endpointsInfo) + if !ok { + glog.Errorf("Failed to cast endpointsInfo") + continue + } + if !onlyNodeLocalEndpoints || onlyNodeLocalEndpoints && epInfo.isLocal { + newEndpoints.Insert(epInfo.endpoint) } } @@ -1759,26 +1534,6 @@ func writeLine(buf *bytes.Buffer, words ...string) { } } -func getLocalIPs(endpointsMap proxyEndpointsMap) map[types.NamespacedName]sets.String { - localIPs := make(map[types.NamespacedName]sets.String) - for svcPortName := range endpointsMap { - for _, ep := range endpointsMap[svcPortName] { - if ep.isLocal { - // If the endpoint has a bad format, utilproxy.IPPart() will log an - // error and ep.IPPart() will return a null string. - if ip := ep.IPPart(); ip != "" { - nsn := svcPortName.NamespacedName - if localIPs[nsn] == nil { - localIPs[nsn] = sets.NewString() - } - localIPs[nsn].Insert(ip) - } - } - } - } - return localIPs -} - // listenPortOpener opens ports by calling bind() and listen(). type listenPortOpener struct{} diff --git a/pkg/proxy/ipvs/proxier_test.go b/pkg/proxy/ipvs/proxier_test.go index 88f7df51897..fb242566015 100644 --- a/pkg/proxy/ipvs/proxier_test.go +++ b/pkg/proxy/ipvs/proxier_test.go @@ -40,8 +40,6 @@ import ( iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing" utilipvs "k8s.io/kubernetes/pkg/util/ipvs" ipvstest "k8s.io/kubernetes/pkg/util/ipvs/testing" - - "github.com/davecgh/go-spew/spew" ) const testHostname = "test-hostname" @@ -121,10 +119,10 @@ func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset u } return &Proxier{ exec: fexec, - serviceMap: make(proxyServiceMap), - serviceChanges: newServiceChangeMap(), - endpointsMap: make(proxyEndpointsMap), - endpointsChanges: newEndpointsChangeMap(testHostname), + serviceMap: make(proxy.ServiceMap), + serviceChanges: proxy.NewServiceChangeTracker(), + endpointsMap: make(proxy.EndpointsMap), + endpointsChanges: proxy.NewEndpointChangeTracker(testHostname), iptables: ipt, ipvs: ipvs, ipset: ipset, @@ -997,24 +995,24 @@ func TestBuildServiceMapAddRemove(t *testing.T) { for i := range services { fp.OnServiceAdd(services[i]) } - result := updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result := proxy.UpdateServiceMap(fp.serviceMap, fp.serviceChanges) if len(fp.serviceMap) != 8 { t.Errorf("expected service map length 8, got %v", fp.serviceMap) } // The only-local-loadbalancer ones get added - if len(result.hcServices) != 1 { - t.Errorf("expected 1 healthcheck port, got %v", result.hcServices) + if len(result.HCServiceNodePorts) != 1 { + t.Errorf("expected 1 healthcheck port, got %v", result.HCServiceNodePorts) } else { nsn := makeNSN("somewhere", "only-local-load-balancer") - if port, found := result.hcServices[nsn]; !found || port != 345 { - t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, result.hcServices) + if port, found := result.HCServiceNodePorts[nsn]; !found || port != 345 { + t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, result.HCServiceNodePorts) } } - if len(result.staleServices) != 0 { + if len(result.UDPStaleClusterIP) != 0 { // Services only added, so nothing stale yet - t.Errorf("expected stale UDP services length 0, got %d", len(result.staleServices)) + t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP)) } // Remove some stuff @@ -1030,24 +1028,24 @@ func TestBuildServiceMapAddRemove(t *testing.T) { fp.OnServiceDelete(services[2]) fp.OnServiceDelete(services[3]) - result = updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result = proxy.UpdateServiceMap(fp.serviceMap, fp.serviceChanges) if len(fp.serviceMap) != 1 { t.Errorf("expected service map length 1, got %v", fp.serviceMap) } - if len(result.hcServices) != 0 { - t.Errorf("expected 0 healthcheck ports, got %v", result.hcServices) + if len(result.HCServiceNodePorts) != 0 { + t.Errorf("expected 0 healthcheck ports, got %v", result.HCServiceNodePorts) } // All services but one were deleted. While you'd expect only the ClusterIPs // from the three deleted services here, we still have the ClusterIP for // the not-deleted service, because one of it's ServicePorts was deleted. expectedStaleUDPServices := []string{"172.16.55.10", "172.16.55.4", "172.16.55.11", "172.16.55.12"} - if len(result.staleServices) != len(expectedStaleUDPServices) { - t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.staleServices.List()) + if len(result.UDPStaleClusterIP) != len(expectedStaleUDPServices) { + t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.UDPStaleClusterIP.List()) } for _, ip := range expectedStaleUDPServices { - if !result.staleServices.Has(ip) { + if !result.UDPStaleClusterIP.Has(ip) { t.Errorf("expected stale UDP service service %s", ip) } } @@ -1072,18 +1070,18 @@ func TestBuildServiceMapServiceHeadless(t *testing.T) { ) // Headless service should be ignored - result := updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result := proxy.UpdateServiceMap(fp.serviceMap, fp.serviceChanges) if len(fp.serviceMap) != 0 { t.Errorf("expected service map length 0, got %d", len(fp.serviceMap)) } // No proxied services, so no healthchecks - if len(result.hcServices) != 0 { - t.Errorf("expected healthcheck ports length 0, got %d", len(result.hcServices)) + if len(result.HCServiceNodePorts) != 0 { + t.Errorf("expected healthcheck ports length 0, got %d", len(result.HCServiceNodePorts)) } - if len(result.staleServices) != 0 { - t.Errorf("expected stale UDP services length 0, got %d", len(result.staleServices)) + if len(result.UDPStaleClusterIP) != 0 { + t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP)) } } @@ -1102,16 +1100,16 @@ func TestBuildServiceMapServiceTypeExternalName(t *testing.T) { }), ) - result := updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result := proxy.UpdateServiceMap(fp.serviceMap, fp.serviceChanges) if len(fp.serviceMap) != 0 { t.Errorf("expected service map length 0, got %v", fp.serviceMap) } // No proxied services, so no healthchecks - if len(result.hcServices) != 0 { - t.Errorf("expected healthcheck ports length 0, got %v", result.hcServices) + if len(result.HCServiceNodePorts) != 0 { + t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts) } - if len(result.staleServices) != 0 { - t.Errorf("expected stale UDP services length 0, got %v", result.staleServices) + if len(result.UDPStaleClusterIP) != 0 { + t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP) } } @@ -1144,57 +1142,57 @@ func TestBuildServiceMapServiceUpdate(t *testing.T) { fp.OnServiceAdd(servicev1) - result := updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result := proxy.UpdateServiceMap(fp.serviceMap, fp.serviceChanges) if len(fp.serviceMap) != 2 { t.Errorf("expected service map length 2, got %v", fp.serviceMap) } - if len(result.hcServices) != 0 { - t.Errorf("expected healthcheck ports length 0, got %v", result.hcServices) + if len(result.HCServiceNodePorts) != 0 { + t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts) } - if len(result.staleServices) != 0 { + if len(result.UDPStaleClusterIP) != 0 { // Services only added, so nothing stale yet - t.Errorf("expected stale UDP services length 0, got %d", len(result.staleServices)) + t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP)) } // Change service to load-balancer fp.OnServiceUpdate(servicev1, servicev2) - result = updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result = proxy.UpdateServiceMap(fp.serviceMap, fp.serviceChanges) if len(fp.serviceMap) != 2 { t.Errorf("expected service map length 2, got %v", fp.serviceMap) } - if len(result.hcServices) != 1 { - t.Errorf("expected healthcheck ports length 1, got %v", result.hcServices) + if len(result.HCServiceNodePorts) != 1 { + t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts) } - if len(result.staleServices) != 0 { - t.Errorf("expected stale UDP services length 0, got %v", result.staleServices.List()) + if len(result.UDPStaleClusterIP) != 0 { + t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.List()) } // No change; make sure the service map stays the same and there are // no health-check changes fp.OnServiceUpdate(servicev2, servicev2) - result = updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result = proxy.UpdateServiceMap(fp.serviceMap, fp.serviceChanges) if len(fp.serviceMap) != 2 { t.Errorf("expected service map length 2, got %v", fp.serviceMap) } - if len(result.hcServices) != 1 { - t.Errorf("expected healthcheck ports length 1, got %v", result.hcServices) + if len(result.HCServiceNodePorts) != 1 { + t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts) } - if len(result.staleServices) != 0 { - t.Errorf("expected stale UDP services length 0, got %v", result.staleServices.List()) + if len(result.UDPStaleClusterIP) != 0 { + t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.List()) } // And back to ClusterIP fp.OnServiceUpdate(servicev2, servicev1) - result = updateServiceMap(fp.serviceMap, &fp.serviceChanges) + result = proxy.UpdateServiceMap(fp.serviceMap, fp.serviceChanges) if len(fp.serviceMap) != 2 { t.Errorf("expected service map length 2, got %v", fp.serviceMap) } - if len(result.hcServices) != 0 { - t.Errorf("expected healthcheck ports length 0, got %v", result.hcServices) + if len(result.HCServiceNodePorts) != 0 { + t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts) } - if len(result.staleServices) != 0 { + if len(result.UDPStaleClusterIP) != 0 { // Services only added, so nothing stale yet - t.Errorf("expected stale UDP services length 0, got %d", len(result.staleServices)) + t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP)) } } @@ -1570,14 +1568,14 @@ func Test_updateEndpointsMap(t *testing.T) { currentEndpoints []*api.Endpoints oldEndpoints map[proxy.ServicePortName][]*endpointsInfo expectedResult map[proxy.ServicePortName][]*endpointsInfo - expectedStaleEndpoints []endpointServicePair + expectedStaleEndpoints []proxy.ServiceEndpoint expectedStaleServiceNames map[proxy.ServicePortName]bool expectedHealthchecks map[types.NamespacedName]int }{{ // Case[0]: nothing oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{}, expectedResult: map[proxy.ServicePortName][]*endpointsInfo{}, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{}, }, { @@ -1598,7 +1596,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{}, }, { @@ -1619,7 +1617,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: true}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{ makeNSN("ns1", "ep1"): 1, @@ -1648,7 +1646,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.2:12", isLocal: false}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{}, }, { @@ -1681,7 +1679,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.3:13", isLocal: false}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{ makeNSN("ns1", "ep1"): 1, @@ -1748,7 +1746,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "2.2.2.2:22", isLocal: true}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{ makeNSN("ns1", "ep1"): 2, @@ -1768,7 +1766,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: true}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{ makeServicePortName("ns1", "ep1", ""): true, }, @@ -1789,9 +1787,9 @@ func Test_updateEndpointsMap(t *testing.T) { }, }, expectedResult: map[proxy.ServicePortName][]*endpointsInfo{}, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "1.1.1.1:11", - servicePortName: makeServicePortName("ns1", "ep1", ""), + expectedStaleEndpoints: []proxy.ServiceEndpoint{{ + Endpoint: "1.1.1.1:11", + ServicePortName: makeServicePortName("ns1", "ep1", ""), }}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{}, @@ -1818,7 +1816,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.2:12", isLocal: true}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{ makeServicePortName("ns1", "ep1", "p12"): true, }, @@ -1848,15 +1846,15 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "1.1.1.2:11", - servicePortName: makeServicePortName("ns1", "ep1", "p11"), + expectedStaleEndpoints: []proxy.ServiceEndpoint{{ + Endpoint: "1.1.1.2:11", + ServicePortName: makeServicePortName("ns1", "ep1", "p11"), }, { - endpoint: "1.1.1.1:12", - servicePortName: makeServicePortName("ns1", "ep1", "p12"), + Endpoint: "1.1.1.1:12", + ServicePortName: makeServicePortName("ns1", "ep1", "p12"), }, { - endpoint: "1.1.1.2:12", - servicePortName: makeServicePortName("ns1", "ep1", "p12"), + Endpoint: "1.1.1.2:12", + ServicePortName: makeServicePortName("ns1", "ep1", "p12"), }}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{}, @@ -1881,7 +1879,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.2:12", isLocal: true}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{ makeServicePortName("ns1", "ep1", "p12"): true, }, @@ -1909,9 +1907,9 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "1.1.1.2:12", - servicePortName: makeServicePortName("ns1", "ep1", "p12"), + expectedStaleEndpoints: []proxy.ServiceEndpoint{{ + Endpoint: "1.1.1.2:12", + ServicePortName: makeServicePortName("ns1", "ep1", "p12"), }}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{}, @@ -1933,9 +1931,9 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "1.1.1.1:11", - servicePortName: makeServicePortName("ns1", "ep1", "p11"), + expectedStaleEndpoints: []proxy.ServiceEndpoint{{ + Endpoint: "1.1.1.1:11", + ServicePortName: makeServicePortName("ns1", "ep1", "p11"), }}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{ makeServicePortName("ns1", "ep1", "p11-2"): true, @@ -1959,9 +1957,9 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:22", isLocal: false}, }, }, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "1.1.1.1:11", - servicePortName: makeServicePortName("ns1", "ep1", "p11"), + expectedStaleEndpoints: []proxy.ServiceEndpoint{{ + Endpoint: "1.1.1.1:11", + ServicePortName: makeServicePortName("ns1", "ep1", "p11"), }}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, expectedHealthchecks: map[types.NamespacedName]int{}, @@ -2016,21 +2014,21 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "4.4.4.4:44", isLocal: true}, }, }, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "2.2.2.2:22", - servicePortName: makeServicePortName("ns2", "ep2", "p22"), + expectedStaleEndpoints: []proxy.ServiceEndpoint{{ + Endpoint: "2.2.2.2:22", + ServicePortName: makeServicePortName("ns2", "ep2", "p22"), }, { - endpoint: "2.2.2.22:22", - servicePortName: makeServicePortName("ns2", "ep2", "p22"), + Endpoint: "2.2.2.22:22", + ServicePortName: makeServicePortName("ns2", "ep2", "p22"), }, { - endpoint: "2.2.2.3:23", - servicePortName: makeServicePortName("ns2", "ep2", "p23"), + Endpoint: "2.2.2.3:23", + ServicePortName: makeServicePortName("ns2", "ep2", "p23"), }, { - endpoint: "4.4.4.5:44", - servicePortName: makeServicePortName("ns4", "ep4", "p44"), + Endpoint: "4.4.4.5:44", + ServicePortName: makeServicePortName("ns4", "ep4", "p44"), }, { - endpoint: "4.4.4.6:45", - servicePortName: makeServicePortName("ns4", "ep4", "p45"), + Endpoint: "4.4.4.6:45", + ServicePortName: makeServicePortName("ns4", "ep4", "p45"), }}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{ makeServicePortName("ns1", "ep1", "p12"): true, @@ -2054,7 +2052,7 @@ func Test_updateEndpointsMap(t *testing.T) { {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedStaleEndpoints: []endpointServicePair{}, + expectedStaleEndpoints: []proxy.ServiceEndpoint{}, expectedStaleServiceNames: map[proxy.ServicePortName]bool{ makeServicePortName("ns1", "ep1", ""): true, }, @@ -2076,7 +2074,7 @@ func Test_updateEndpointsMap(t *testing.T) { fp.OnEndpointsAdd(tc.previousEndpoints[i]) } } - updateEndpointsMap(fp.endpointsMap, &fp.endpointsChanges, fp.hostname) + proxy.UpdateEndpointsMap(fp.endpointsMap, fp.endpointsChanges) compareEndpointsMaps(t, tci, fp.endpointsMap, tc.oldEndpoints) // Now let's call appropriate handlers to get to state we want to be. @@ -2096,313 +2094,61 @@ func Test_updateEndpointsMap(t *testing.T) { fp.OnEndpointsUpdate(prev, curr) } } - result := updateEndpointsMap(fp.endpointsMap, &fp.endpointsChanges, fp.hostname) + result := proxy.UpdateEndpointsMap(fp.endpointsMap, fp.endpointsChanges) newMap := fp.endpointsMap compareEndpointsMaps(t, tci, newMap, tc.expectedResult) - if len(result.staleEndpoints) != len(tc.expectedStaleEndpoints) { - t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedStaleEndpoints), len(result.staleEndpoints), result.staleEndpoints) + if len(result.StaleEndpoints) != len(tc.expectedStaleEndpoints) { + t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedStaleEndpoints), len(result.StaleEndpoints), result.StaleEndpoints) } for _, x := range tc.expectedStaleEndpoints { - if result.staleEndpoints[x] != true { - t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.staleEndpoints) + found := false + for _, stale := range result.StaleEndpoints { + if stale == x { + found = true + break + } + } + if !found { + t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.StaleEndpoints) } } - if len(result.staleServiceNames) != len(tc.expectedStaleServiceNames) { - t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedStaleServiceNames), len(result.staleServiceNames), result.staleServiceNames) + if len(result.StaleServiceNames) != len(tc.expectedStaleServiceNames) { + t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedStaleServiceNames), len(result.StaleServiceNames), result.StaleServiceNames) } for svcName := range tc.expectedStaleServiceNames { - if result.staleServiceNames[svcName] != true { - t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.staleServiceNames) + found := false + for _, stale := range result.StaleServiceNames { + if stale == svcName { + found = true + break + } + } + if !found { + t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.StaleServiceNames) } } - if !reflect.DeepEqual(result.hcEndpoints, tc.expectedHealthchecks) { - t.Errorf("[%d] expected healthchecks %v, got %v", tci, tc.expectedHealthchecks, result.hcEndpoints) + if !reflect.DeepEqual(result.HCEndpointsLocalIPSize, tc.expectedHealthchecks) { + t.Errorf("[%d] expected healthchecks %v, got %v", tci, tc.expectedHealthchecks, result.HCEndpointsLocalIPSize) } } } -func compareEndpointsMaps(t *testing.T, tci int, newMap, expected map[proxy.ServicePortName][]*endpointsInfo) { +func compareEndpointsMaps(t *testing.T, tci int, newMap proxy.EndpointsMap, expected map[proxy.ServicePortName][]*endpointsInfo) { if len(newMap) != len(expected) { t.Errorf("[%d] expected %d results, got %d: %v", tci, len(expected), len(newMap), newMap) } for x := range expected { if len(newMap[x]) != len(expected[x]) { - t.Errorf("[%d] expected %d Endpoints for %v, got %d", tci, len(expected[x]), x, len(newMap[x])) + t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(expected[x]), x, len(newMap[x])) } else { for i := range expected[x] { - if *(newMap[x][i]) != *(expected[x][i]) { - t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newMap[x][i]) + newEp, ok := newMap[x][i].(*endpointsInfo) + if !ok { + t.Errorf("Failed to cast endpointsInfo") + continue } - } - } - } -} - -func Test_getLocalIPs(t *testing.T) { - testCases := []struct { - endpointsMap map[proxy.ServicePortName][]*endpointsInfo - expected map[types.NamespacedName]sets.String - }{{ - // Case[0]: nothing - endpointsMap: map[proxy.ServicePortName][]*endpointsInfo{}, - expected: map[types.NamespacedName]sets.String{}, - }, { - // Case[1]: unnamed port - endpointsMap: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", ""): { - {"1.1.1.1:11", false}, - }, - }, - expected: map[types.NamespacedName]sets.String{}, - }, { - // Case[2]: unnamed port local - endpointsMap: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", ""): { - {"1.1.1.1:11", true}, - }, - }, - expected: map[types.NamespacedName]sets.String{ - {Namespace: "ns1", Name: "ep1"}: sets.NewString("1.1.1.1"), - }, - }, { - // Case[3]: named local and non-local ports for the same IP. - endpointsMap: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {"1.1.1.1:11", false}, - {"1.1.1.2:11", true}, - }, - makeServicePortName("ns1", "ep1", "p12"): { - {"1.1.1.1:12", false}, - {"1.1.1.2:12", true}, - }, - }, - expected: map[types.NamespacedName]sets.String{ - {Namespace: "ns1", Name: "ep1"}: sets.NewString("1.1.1.2"), - }, - }, { - // Case[4]: named local and non-local ports for different IPs. - endpointsMap: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {"1.1.1.1:11", false}, - }, - makeServicePortName("ns2", "ep2", "p22"): { - {"2.2.2.2:22", true}, - {"2.2.2.22:22", true}, - }, - makeServicePortName("ns2", "ep2", "p23"): { - {"2.2.2.3:23", true}, - }, - makeServicePortName("ns4", "ep4", "p44"): { - {"4.4.4.4:44", true}, - {"4.4.4.5:44", false}, - }, - makeServicePortName("ns4", "ep4", "p45"): { - {"4.4.4.6:45", true}, - }, - }, - expected: map[types.NamespacedName]sets.String{ - {Namespace: "ns2", Name: "ep2"}: sets.NewString("2.2.2.2", "2.2.2.22", "2.2.2.3"), - {Namespace: "ns4", Name: "ep4"}: sets.NewString("4.4.4.4", "4.4.4.6"), - }, - }, { - // Case[5]: named port local and bad endpoints IP - endpointsMap: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "bad ip:11", isLocal: true}, - }, - }, - expected: map[types.NamespacedName]sets.String{}, - }} - - for tci, tc := range testCases { - // outputs - localIPs := getLocalIPs(tc.endpointsMap) - - if !reflect.DeepEqual(localIPs, tc.expected) { - t.Errorf("[%d] expected %#v, got %#v", tci, tc.expected, localIPs) - } - } -} - -// This is a coarse test, but it offers some modicum of confidence as the code is evolved. -func Test_endpointsToEndpointsMap(t *testing.T) { - testCases := []struct { - newEndpoints *api.Endpoints - expected map[proxy.ServicePortName][]*endpointsInfo - }{{ - // Case[0]: nothing - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {}), - expected: map[proxy.ServicePortName][]*endpointsInfo{}, - }, { - // Case[1]: no changes, unnamed port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "", - Port: 11, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", ""): { - {"1.1.1.1:11", false}, - }, - }, - }, { - // Case[2]: no changes, named port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "port", - Port: 11, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "port"): { - {"1.1.1.1:11", false}, - }, - }, - }, { - // Case[3]: new port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Port: 11, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", ""): { - {"1.1.1.1:11", false}, - }, - }, - }, { - // Case[4]: remove port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {}), - expected: map[proxy.ServicePortName][]*endpointsInfo{}, - }, { - // Case[5]: new IP and port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }, { - IP: "2.2.2.2", - }}, - Ports: []api.EndpointPort{{ - Name: "p1", - Port: 11, - }, { - Name: "p2", - Port: 22, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p1"): { - {"1.1.1.1:11", false}, - {"2.2.2.2:11", false}, - }, - makeServicePortName("ns1", "ep1", "p2"): { - {"1.1.1.1:22", false}, - {"2.2.2.2:22", false}, - }, - }, - }, { - // Case[6]: remove IP and port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "p1", - Port: 11, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p1"): { - {"1.1.1.1:11", false}, - }, - }, - }, { - // Case[7]: rename port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "p2", - Port: 11, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p2"): { - {"1.1.1.1:11", false}, - }, - }, - }, { - // Case[8]: renumber port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "p1", - Port: 22, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p1"): { - {"1.1.1.1:22", false}, - }, - }, - }} - - for tci, tc := range testCases { - // outputs - newEndpoints := endpointsToEndpointsMap(tc.newEndpoints, "host") - - if len(newEndpoints) != len(tc.expected) { - t.Errorf("[%d] expected %d new, got %d: %v", tci, len(tc.expected), len(newEndpoints), spew.Sdump(newEndpoints)) - } - for x := range tc.expected { - if len(newEndpoints[x]) != len(tc.expected[x]) { - t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(tc.expected[x]), x, len(newEndpoints[x])) - } else { - for i := range newEndpoints[x] { - if *(newEndpoints[x][i]) != *(tc.expected[x][i]) { - t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, tc.expected[x][i], *(newEndpoints[x][i])) - } + if *newEp != *(expected[x][i]) { + t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newEp) } } } From b7dbaab96a6f9daa8ef53ba24f565a13921bece4 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Fri, 9 Feb 2018 17:26:22 +0800 Subject: [PATCH 19/53] update bazel BUILD --- pkg/proxy/BUILD | 30 +++++++++++++++++++++++++++++- pkg/proxy/iptables/BUILD | 3 --- pkg/proxy/ipvs/BUILD | 1 - 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/pkg/proxy/BUILD b/pkg/proxy/BUILD index 597c49c4654..82053ce2532 100644 --- a/pkg/proxy/BUILD +++ b/pkg/proxy/BUILD @@ -3,16 +3,25 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( name = "go_default_library", srcs = [ "doc.go", + "endpoints.go", + "service.go", "types.go", ], importpath = "k8s.io/kubernetes/pkg/proxy", - deps = ["//vendor/k8s.io/apimachinery/pkg/types:go_default_library"], + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/proxy/util:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], ) filegroup( @@ -39,3 +48,22 @@ filegroup( ], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = [ + "endpoints_test.go", + "service_test.go", + ], + embed = [":go_default_library"], + importpath = "k8s.io/kubernetes/pkg/proxy", + deps = [ + "//pkg/api/service:go_default_library", + "//pkg/apis/core:go_default_library", + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) diff --git a/pkg/proxy/iptables/BUILD b/pkg/proxy/iptables/BUILD index 18fb3c29e81..9a27ec12d1c 100644 --- a/pkg/proxy/iptables/BUILD +++ b/pkg/proxy/iptables/BUILD @@ -27,7 +27,6 @@ go_library( "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", @@ -46,12 +45,10 @@ go_test( "//pkg/util/async:go_default_library", "//pkg/util/iptables:go_default_library", "//pkg/util/iptables/testing:go_default_library", - "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/exec/testing:go_default_library", ], diff --git a/pkg/proxy/ipvs/BUILD b/pkg/proxy/ipvs/BUILD index 0ba9f0e3720..aaf560a062c 100644 --- a/pkg/proxy/ipvs/BUILD +++ b/pkg/proxy/ipvs/BUILD @@ -25,7 +25,6 @@ go_test( "//pkg/util/iptables/testing:go_default_library", "//pkg/util/ipvs:go_default_library", "//pkg/util/ipvs/testing:go_default_library", - "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", From 3ecc49daaa344d3e0d90ac73adfed1c47536db63 Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Wed, 31 Jan 2018 22:50:58 +0200 Subject: [PATCH 20/53] Add HTTPProxyCheck for API servers It makes sense to check all API servers mentioned in the command line and print warnings if they're going to be accessed through proxy. This is similar to what's already done for 'kubeadm init'. --- cmd/kubeadm/app/preflight/checks.go | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index 9add920416f..3c1e12e671b 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -999,19 +999,27 @@ func RunJoinNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.NodeConfigura criCtlChecker) } + var bridgenf6Check Checker for _, server := range cfg.DiscoveryTokenAPIServers { ipstr, _, err := net.SplitHostPort(server) if err == nil { - if ip := net.ParseIP(ipstr); ip != nil { - if ip.To4() == nil && ip.To16() != nil { - checks = append(checks, - FileContentCheck{Path: bridgenf6, Content: []byte{'1'}}, - ) - break // Ensure that check is added only once + checks = append(checks, + HTTPProxyCheck{Proto: "https", Host: ipstr}, + ) + if bridgenf6Check == nil { + if ip := net.ParseIP(ipstr); ip != nil { + if ip.To4() == nil && ip.To16() != nil { + // This check should be added only once + bridgenf6Check = FileContentCheck{Path: bridgenf6, Content: []byte{'1'}} + } } } } } + if bridgenf6Check != nil { + checks = append(checks, bridgenf6Check) + } + return RunChecks(checks, os.Stderr, ignorePreflightErrors) } From 59d131d6124c8fac6400c05be4d74a82af5fd63d Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Fri, 9 Feb 2018 15:33:43 +0200 Subject: [PATCH 21/53] Remove bootstrap kubelet config on reset Made sure /etc/kubernetes/bootstrap-kubelet.conf is removed to ensure it will not be reused when joining the cluster next time. --- cmd/kubeadm/app/cmd/reset.go | 1 + cmd/kubeadm/app/preflight/checks.go | 1 + 2 files changed, 2 insertions(+) diff --git a/cmd/kubeadm/app/cmd/reset.go b/cmd/kubeadm/app/cmd/reset.go index 6b43088895c..2e950fa87e7 100644 --- a/cmd/kubeadm/app/cmd/reset.go +++ b/cmd/kubeadm/app/cmd/reset.go @@ -252,6 +252,7 @@ func resetConfigDir(configPathDir, pkiPathDir string) { filesToClean := []string{ filepath.Join(configPathDir, kubeadmconstants.AdminKubeConfigFileName), filepath.Join(configPathDir, kubeadmconstants.KubeletKubeConfigFileName), + filepath.Join(configPathDir, kubeadmconstants.KubeletBootstrapKubeConfigFileName), filepath.Join(configPathDir, kubeadmconstants.ControllerManagerKubeConfigFileName), filepath.Join(configPathDir, kubeadmconstants.SchedulerKubeConfigFileName), } diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index 9add920416f..68ace4f0a66 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -975,6 +975,7 @@ func RunJoinNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.NodeConfigura DirAvailableCheck{Path: filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ManifestsSubDirName)}, FileAvailableCheck{Path: cfg.CACertPath}, FileAvailableCheck{Path: filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.KubeletKubeConfigFileName)}, + FileAvailableCheck{Path: filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.KubeletBootstrapKubeConfigFileName)}, } if useCRI { checks = append(checks, CRICheck{socket: criSocket, exec: execer}) From 973583e781b4c9cf1409d6fd801264d3853ed00b Mon Sep 17 00:00:00 2001 From: mtanino Date: Tue, 6 Feb 2018 15:44:07 -0500 Subject: [PATCH 22/53] Refactor volumehandler in operationexecutor --- .../volumemanager/reconciler/reconciler.go | 48 +-- .../reconciler/reconciler_test.go | 39 +- .../operationexecutor/operation_executor.go | 406 ++++++------------ .../operation_executor_test.go | 29 +- .../operationexecutor/operation_generator.go | 4 +- pkg/volume/util/volumehelper/volumehelper.go | 17 + 6 files changed, 229 insertions(+), 314 deletions(-) diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler.go b/pkg/kubelet/volumemanager/reconciler/reconciler.go index f7293c6d76d..7d6846fcbc0 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -166,12 +166,10 @@ func (rc *reconciler) reconcile() { // Ensure volumes that should be unmounted are unmounted. for _, mountedVolume := range rc.actualStateOfWorld.GetMountedVolumes() { if !rc.desiredStateOfWorld.PodExistsInVolume(mountedVolume.PodName, mountedVolume.VolumeName) { - volumeHandler, err := operationexecutor.NewVolumeHandler(mountedVolume.VolumeSpec, rc.operationExecutor) - if err != nil { - glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.NewVolumeHandler for UnmountVolume failed"), err).Error()) - continue - } - err = volumeHandler.UnmountVolumeHandler(mountedVolume.MountedVolume, rc.actualStateOfWorld) + // Volume is mounted, unmount it + glog.V(12).Infof(mountedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountVolume", "")) + err := rc.operationExecutor.UnmountVolume( + mountedVolume.MountedVolume, rc.actualStateOfWorld) if err != nil && !nestedpendingoperations.IsAlreadyExists(err) && !exponentialbackoff.IsExponentialBackoff(err) { @@ -236,12 +234,12 @@ func (rc *reconciler) reconcile() { if isRemount { remountingLogStr = "Volume is already mounted to pod, but remount was requested." } - volumeHandler, err := operationexecutor.NewVolumeHandler(volumeToMount.VolumeSpec, rc.operationExecutor) - if err != nil { - glog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.NewVolumeHandler for MountVolume failed"), err).Error()) - continue - } - err = volumeHandler.MountVolumeHandler(rc.waitForAttachTimeout, volumeToMount.VolumeToMount, rc.actualStateOfWorld, isRemount, remountingLogStr) + glog.V(12).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.MountVolume", remountingLogStr)) + err := rc.operationExecutor.MountVolume( + rc.waitForAttachTimeout, + volumeToMount.VolumeToMount, + rc.actualStateOfWorld, + isRemount) if err != nil && !nestedpendingoperations.IsAlreadyExists(err) && !exponentialbackoff.IsExponentialBackoff(err) { @@ -265,12 +263,10 @@ func (rc *reconciler) reconcile() { if !rc.desiredStateOfWorld.VolumeExists(attachedVolume.VolumeName) && !rc.operationExecutor.IsOperationPending(attachedVolume.VolumeName, nestedpendingoperations.EmptyUniquePodName) { if attachedVolume.GloballyMounted { - volumeHandler, err := operationexecutor.NewVolumeHandler(attachedVolume.VolumeSpec, rc.operationExecutor) - if err != nil { - glog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.NewVolumeHandler for UnmountDevice failed"), err).Error()) - continue - } - err = volumeHandler.UnmountDeviceHandler(attachedVolume.AttachedVolume, rc.actualStateOfWorld, rc.mounter) + // Volume is globally mounted to device, unmount it + glog.V(12).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountDevice", "")) + err := rc.operationExecutor.UnmountDevice( + attachedVolume.AttachedVolume, rc.actualStateOfWorld, rc.mounter) if err != nil && !nestedpendingoperations.IsAlreadyExists(err) && !exponentialbackoff.IsExponentialBackoff(err) { @@ -403,14 +399,9 @@ func (rc *reconciler) cleanupMounts(volume podVolume) { PluginName: volume.pluginName, PodUID: types.UID(volume.podName), } - volumeHandler, err := operationexecutor.NewVolumeHandlerWithMode(volume.volumeMode, rc.operationExecutor) - if err != nil { - glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.NewVolumeHandler for UnmountVolume failed"), err).Error()) - return - } // TODO: Currently cleanupMounts only includes UnmountVolume operation. In the next PR, we will add // to unmount both volume and device in the same routine. - err = volumeHandler.UnmountVolumeHandler(mountedVolume, rc.actualStateOfWorld) + err := rc.operationExecutor.UnmountVolume(mountedVolume, rc.actualStateOfWorld) if err != nil { glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("volumeHandler.UnmountVolumeHandler for UnmountVolume failed"), err).Error()) return @@ -435,15 +426,12 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, UID: types.UID(volume.podName), }, } - volumeHandler, err := operationexecutor.NewVolumeHandlerWithMode(volume.volumeMode, rc.operationExecutor) - if err != nil { - return nil, err - } mapperPlugin, err := rc.volumePluginMgr.FindMapperPluginByName(volume.pluginName) if err != nil { return nil, err } - volumeSpec, err := volumeHandler.ReconstructVolumeHandler( + volumeSpec, err := rc.operationExecutor.ReconstructVolumeOperation( + volume.volumeMode, plugin, mapperPlugin, pod.UID, @@ -466,7 +454,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, uniqueVolumeName = volumehelper.GetUniqueVolumeNameForNonAttachableVolume(volume.podName, plugin, volumeSpec) } // Check existence of mount point for filesystem volume or symbolic link for block volume - isExist, checkErr := volumeHandler.CheckVolumeExistence(volume.mountPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin) + isExist, checkErr := rc.operationExecutor.CheckVolumeExistenceOperation(volumeSpec, volume.mountPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin) if checkErr != nil { return nil, err } diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler_test.go b/pkg/kubelet/volumemanager/reconciler/reconciler_test.go index 187171ebda9..22b49d02485 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler_test.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler_test.go @@ -835,6 +835,8 @@ func Test_GenerateMapVolumeFunc_Plugin_Not_Found(t *testing.T) { }, } + // Enable BlockVolume feature gate + utilfeature.DefaultFeatureGate.Set("BlockVolume=true") for name, tc := range testCases { t.Run(name, func(t *testing.T) { volumePluginMgr := &volume.VolumePluginMgr{} @@ -854,14 +856,20 @@ func Test_GenerateMapVolumeFunc_Plugin_Not_Found(t *testing.T) { }, Spec: v1.PodSpec{}, } - volumeToMount := operationexecutor.VolumeToMount{Pod: pod, VolumeSpec: &volume.Spec{}} - err := oex.MapVolume(waitForAttachTimeout, volumeToMount, asw) + volumeMode := v1.PersistentVolumeBlock + tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}} + volumeToMount := operationexecutor.VolumeToMount{ + Pod: pod, + VolumeSpec: tmpSpec} + err := oex.MountVolume(waitForAttachTimeout, volumeToMount, asw, false) // Assert if assert.Error(t, err) { assert.Contains(t, err.Error(), tc.expectedErrMsg) } }) } + // Rollback feature gate to false. + utilfeature.DefaultFeatureGate.Set("BlockVolume=false") } func Test_GenerateUnmapVolumeFunc_Plugin_Not_Found(t *testing.T) { @@ -882,6 +890,8 @@ func Test_GenerateUnmapVolumeFunc_Plugin_Not_Found(t *testing.T) { }, } + // Enable BlockVolume feature gate + utilfeature.DefaultFeatureGate.Set("BlockVolume=true") for name, tc := range testCases { t.Run(name, func(t *testing.T) { volumePluginMgr := &volume.VolumePluginMgr{} @@ -893,14 +903,20 @@ func Test_GenerateUnmapVolumeFunc_Plugin_Not_Found(t *testing.T) { nil, /* fakeRecorder */ false, /* checkNodeCapabilitiesBeforeMount */ nil)) - volumeToUnmount := operationexecutor.MountedVolume{PluginName: "fake-file-plugin"} - err := oex.UnmapVolume(volumeToUnmount, asw) + volumeMode := v1.PersistentVolumeBlock + tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}} + volumeToUnmount := operationexecutor.MountedVolume{ + PluginName: "fake-file-plugin", + VolumeSpec: tmpSpec} + err := oex.UnmountVolume(volumeToUnmount, asw) // Assert if assert.Error(t, err) { assert.Contains(t, err.Error(), tc.expectedErrMsg) } }) } + // Rollback feature gate to false. + utilfeature.DefaultFeatureGate.Set("BlockVolume=false") } func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) { @@ -912,15 +928,17 @@ func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) { "volumePlugin is nil": { volumePlugins: []volume.VolumePlugin{}, expectErr: true, - expectedErrMsg: "UnmapDevice.FindMapperPluginBySpec failed", + expectedErrMsg: "UnmapDevice.FindMapperPluginByName failed", }, "blockVolumePlugin is nil": { volumePlugins: volumetesting.NewFakeFileVolumePlugin(), expectErr: true, - expectedErrMsg: "UnmapDevice.FindMapperPluginBySpec failed to find BlockVolumeMapper plugin. Volume plugin is nil.", + expectedErrMsg: "UnmapDevice.FindMapperPluginByName failed to find BlockVolumeMapper plugin. Volume plugin is nil.", }, } + // Enable BlockVolume feature gate + utilfeature.DefaultFeatureGate.Set("BlockVolume=true") for name, tc := range testCases { t.Run(name, func(t *testing.T) { volumePluginMgr := &volume.VolumePluginMgr{} @@ -933,15 +951,18 @@ func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) { false, /* checkNodeCapabilitiesBeforeMount */ nil)) var mounter mount.Interface - plugins := volumetesting.NewFakeFileVolumePlugin() - deviceToDetach := operationexecutor.AttachedVolume{VolumeSpec: &volume.Spec{}, PluginName: plugins[0].GetPluginName()} - err := oex.UnmapDevice(deviceToDetach, asw, mounter) + volumeMode := v1.PersistentVolumeBlock + tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}} + deviceToDetach := operationexecutor.AttachedVolume{VolumeSpec: tmpSpec, PluginName: "fake-file-plugin"} + err := oex.UnmountDevice(deviceToDetach, asw, mounter) // Assert if assert.Error(t, err) { assert.Contains(t, err.Error(), tc.expectedErrMsg) } }) } + // Rollback feature gate to false. + utilfeature.DefaultFeatureGate.Set("BlockVolume=false") } func waitForMount( diff --git a/pkg/volume/util/operationexecutor/operation_executor.go b/pkg/volume/util/operationexecutor/operation_executor.go index dec31299efd..486add34da3 100644 --- a/pkg/volume/util/operationexecutor/operation_executor.go +++ b/pkg/volume/util/operationexecutor/operation_executor.go @@ -28,9 +28,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - utilfeature "k8s.io/apiserver/pkg/util/feature" expandcache "k8s.io/kubernetes/pkg/controller/volume/expand/cache" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" @@ -83,7 +81,8 @@ type OperationExecutor interface { // Status.VolumesInUse list (operation fails with error if it is). DetachVolume(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error - // MountVolume mounts the volume to the pod specified in volumeToMount. + // If a volume has 'Filesystem' volumeMode, MountVolume mounts the + // volume to the pod specified in volumeToMount. // Specifically it will: // * Wait for the device to finish attaching (for attachable volumes only). // * Mount device to global mount path (for attachable volumes only). @@ -95,38 +94,36 @@ type OperationExecutor interface { // The parameter "isRemount" is informational and used to adjust logging // verbosity. An initial mount is more log-worthy than a remount, for // example. - MountVolume(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, isRemount bool) error - - // UnmountVolume unmounts the volume from the pod specified in - // volumeToUnmount and updates the actual state of the world to reflect that. - UnmountVolume(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error - - // UnmountDevice unmounts the volumes global mount path from the device (for - // attachable volumes only, freeing it for detach. It then updates the - // actual state of the world to reflect that. - UnmountDevice(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error - - // MapVolume is used when the volumeMode is 'Block'. - // This method creates a symbolic link to the volume from both the pod - // specified in volumeToMount and global map path. + // + // For 'Block' volumeMode, this method creates a symbolic link to + // the volume from both the pod specified in volumeToMount and global map path. // Specifically it will: // * Wait for the device to finish attaching (for attachable volumes only). // * Update actual state of world to reflect volume is globally mounted/mapped. // * Map volume to global map path using symbolic link. // * Map the volume to the pod device map path using symbolic link. // * Update actual state of world to reflect volume is mounted/mapped to the pod path. - MapVolume(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater) error + MountVolume(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, isRemount bool) error - // UnmapVolume unmaps symbolic link to the volume from both the pod device - // map path in volumeToUnmount and global map path. + // If a volume has 'Filesystem' volumeMode, UnmountVolume unmounts the + // volume from the pod specified in volumeToUnmount and updates the actual + // state of the world to reflect that. + // + // For 'Block' volumeMode, this method unmaps symbolic link to the volume + // from both the pod device map path in volumeToUnmount and global map path. // And then, updates the actual state of the world to reflect that. - UnmapVolume(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error + UnmountVolume(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error - // UnmapDevice checks number of symbolic links under global map path. - // If number of reference is zero, remove global map path directory and - // free a volume for detach. + // If a volume has 'Filesystem' volumeMode, UnmountDevice unmounts the + // volumes global mount path from the device (for attachable volumes only, + // freeing it for detach. It then updates the actual state of the world to + // reflect that. + // + // For 'Block' volumeMode, this method checks number of symbolic links under + // global map path. If number of reference is zero, remove global map path + // directory and free a volume for detach. // It then updates the actual state of the world to reflect that. - UnmapDevice(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error + UnmountDevice(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error // VerifyControllerAttachedVolume checks if the specified volume is present // in the specified nodes AttachedVolumes Status field. It uses kubeClient @@ -145,6 +142,10 @@ type OperationExecutor interface { IsOperationPending(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) bool // Expand Volume will grow size available to PVC ExpandVolume(*expandcache.PVCWithResizeRequest, expandcache.VolumeResizeMap) error + // ReconstructVolumeOperation construct a new volumeSpec and returns it created by plugin + ReconstructVolumeOperation(volumeMode v1.PersistentVolumeMode, plugin volume.VolumePlugin, mapperPlugin volume.BlockVolumePlugin, uid types.UID, podName volumetypes.UniquePodName, volumeSpecName string, mountPath string, pluginName string) (*volume.Spec, error) + // CheckVolumeExistenceOperation checks volume existence + CheckVolumeExistenceOperation(volumeSpec *volume.Spec, mountPath, volumeName string, mounter mount.Interface, uniqueVolumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName, podUID types.UID, attachable volume.AttachableVolumePlugin) (bool, error) } // NewOperationExecutor returns a new instance of OperationExecutor. @@ -707,13 +708,30 @@ func (oe *operationExecutor) MountVolume( volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, isRemount bool) error { - generatedOperations, err := oe.operationGenerator.GenerateMountVolumeFunc( - waitForAttachTimeout, volumeToMount, actualStateOfWorld, isRemount) + fsVolume, err := volumehelper.CheckVolumeModeFilesystem(volumeToMount.VolumeSpec) if err != nil { return err } + var generatedOperations volumetypes.GeneratedOperations + if fsVolume { + // Filesystem volume case + // Mount/remount a volume when a volume is attached + generatedOperations, err = oe.operationGenerator.GenerateMountVolumeFunc( + waitForAttachTimeout, volumeToMount, actualStateOfWorld, isRemount) + } else { + // Block volume case + // Creates a map to device if a volume is attached + generatedOperations, err = oe.operationGenerator.GenerateMapVolumeFunc( + waitForAttachTimeout, volumeToMount, actualStateOfWorld) + } + if err != nil { + return err + } + // Avoid executing mount/map from multiple pods referencing the + // same volume in parallel podName := nestedpendingoperations.EmptyUniquePodName + // TODO: remove this -- not necessary if !volumeToMount.PluginIsAttachable { // Non-attachable volume plugins can execute mount for multiple pods @@ -729,14 +747,26 @@ func (oe *operationExecutor) MountVolume( func (oe *operationExecutor) UnmountVolume( volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error { - - generatedOperations, err := - oe.operationGenerator.GenerateUnmountVolumeFunc(volumeToUnmount, actualStateOfWorld) + fsVolume, err := volumehelper.CheckVolumeModeFilesystem(volumeToUnmount.VolumeSpec) if err != nil { return err } - - // All volume plugins can execute mount for multiple pods referencing the + var generatedOperations volumetypes.GeneratedOperations + if fsVolume { + // Filesystem volume case + // Unmount a volume if a volume is mounted + generatedOperations, err = oe.operationGenerator.GenerateUnmountVolumeFunc( + volumeToUnmount, actualStateOfWorld) + } else { + // Block volume case + // Unmap a volume if a volume is mapped + generatedOperations, err = oe.operationGenerator.GenerateUnmapVolumeFunc( + volumeToUnmount, actualStateOfWorld) + } + if err != nil { + return err + } + // All volume plugins can execute unmount/unmap for multiple pods referencing the // same volume in parallel podName := volumetypes.UniquePodName(volumeToUnmount.PodUID) @@ -748,14 +778,31 @@ func (oe *operationExecutor) UnmountDevice( deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error { - generatedOperations, err := - oe.operationGenerator.GenerateUnmountDeviceFunc(deviceToDetach, actualStateOfWorld, mounter) + fsVolume, err := volumehelper.CheckVolumeModeFilesystem(deviceToDetach.VolumeSpec) if err != nil { return err } + var generatedOperations volumetypes.GeneratedOperations + if fsVolume { + // Filesystem volume case + // Unmount and detach a device if a volume isn't referenced + generatedOperations, err = oe.operationGenerator.GenerateUnmountDeviceFunc( + deviceToDetach, actualStateOfWorld, mounter) + } else { + // Block volume case + // Detach a device and remove loopback if a volume isn't referenced + generatedOperations, err = oe.operationGenerator.GenerateUnmapDeviceFunc( + deviceToDetach, actualStateOfWorld, mounter) + } + if err != nil { + return err + } + // Avoid executing unmount/unmap device from multiple pods referencing + // the same volume in parallel + podName := nestedpendingoperations.EmptyUniquePodName return oe.pendingOperations.Run( - deviceToDetach.VolumeName, "" /* podName */, generatedOperations) + deviceToDetach.VolumeName, podName, generatedOperations) } func (oe *operationExecutor) ExpandVolume(pvcWithResizeRequest *expandcache.PVCWithResizeRequest, resizeMap expandcache.VolumeResizeMap) error { @@ -769,65 +816,6 @@ func (oe *operationExecutor) ExpandVolume(pvcWithResizeRequest *expandcache.PVCW return oe.pendingOperations.Run(uniqueVolumeKey, "", generatedOperations) } -func (oe *operationExecutor) MapVolume( - waitForAttachTimeout time.Duration, - volumeToMount VolumeToMount, - actualStateOfWorld ActualStateOfWorldMounterUpdater) error { - generatedOperations, err := oe.operationGenerator.GenerateMapVolumeFunc( - waitForAttachTimeout, volumeToMount, actualStateOfWorld) - if err != nil { - return err - } - - // Avoid executing map from multiple pods referencing the - // same volume in parallel - podName := nestedpendingoperations.EmptyUniquePodName - // TODO: remove this -- not necessary - if !volumeToMount.PluginIsAttachable { - // Non-attachable volume plugins can execute mount for multiple pods - // referencing the same volume in parallel - podName = volumehelper.GetUniquePodName(volumeToMount.Pod) - } - - return oe.pendingOperations.Run( - volumeToMount.VolumeName, podName, generatedOperations) -} - -func (oe *operationExecutor) UnmapVolume( - volumeToUnmount MountedVolume, - actualStateOfWorld ActualStateOfWorldMounterUpdater) error { - generatedOperations, err := - oe.operationGenerator.GenerateUnmapVolumeFunc(volumeToUnmount, actualStateOfWorld) - if err != nil { - return err - } - - // All volume plugins can execute unmap for multiple pods referencing the - // same volume in parallel - podName := volumetypes.UniquePodName(volumeToUnmount.PodUID) - - return oe.pendingOperations.Run( - volumeToUnmount.VolumeName, podName, generatedOperations) -} - -func (oe *operationExecutor) UnmapDevice( - deviceToDetach AttachedVolume, - actualStateOfWorld ActualStateOfWorldMounterUpdater, - mounter mount.Interface) error { - generatedOperations, err := - oe.operationGenerator.GenerateUnmapDeviceFunc(deviceToDetach, actualStateOfWorld, mounter) - if err != nil { - return err - } - - // Avoid executing unmap device from multiple pods referencing - // the same volume in parallel - podName := nestedpendingoperations.EmptyUniquePodName - - return oe.pendingOperations.Run( - deviceToDetach.VolumeName, podName, generatedOperations) -} - func (oe *operationExecutor) VerifyControllerAttachedVolume( volumeToMount VolumeToMount, nodeName types.NodeName, @@ -842,177 +830,30 @@ func (oe *operationExecutor) VerifyControllerAttachedVolume( volumeToMount.VolumeName, "" /* podName */, generatedOperations) } -// VolumeStateHandler defines a set of operations for handling mount/unmount/detach/reconstruct volume-related operations -type VolumeStateHandler interface { - // Volume is attached, mount/map it - MountVolumeHandler(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, isRemount bool, remountingLogStr string) error - // Volume is mounted/mapped, unmount/unmap it - UnmountVolumeHandler(mountedVolume MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error - // Volume is not referenced from pod, unmount/unmap and detach it - UnmountDeviceHandler(attachedVolume AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error - // Reconstruct volume from mount path - ReconstructVolumeHandler(plugin volume.VolumePlugin, mapperPlugin volume.BlockVolumePlugin, uid types.UID, podName volumetypes.UniquePodName, volumeSpecName string, mountPath string, pluginName string) (*volume.Spec, error) - // check mount path if volume still exists - CheckVolumeExistence(mountPath, volumeName string, mounter mount.Interface, uniqueVolumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName, podUID types.UID, attachable volume.AttachableVolumePlugin) (bool, error) -} +// ReconstructVolumeOperation return a func to create volumeSpec from mount path +func (oe *operationExecutor) ReconstructVolumeOperation( + volumeMode v1.PersistentVolumeMode, + plugin volume.VolumePlugin, + mapperPlugin volume.BlockVolumePlugin, + uid types.UID, + podName volumetypes.UniquePodName, + volumeSpecName string, + mountPath string, + pluginName string) (*volume.Spec, error) { -// NewVolumeHandler return a new instance of volumeHandler depens on a volumeMode -func NewVolumeHandler(volumeSpec *volume.Spec, oe OperationExecutor) (VolumeStateHandler, error) { - - // TODO: remove feature gate check after no longer needed - var volumeHandler VolumeStateHandler - if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { - volumeMode, err := volumehelper.GetVolumeMode(volumeSpec) + // Filesystem Volume case + if volumeMode == v1.PersistentVolumeFilesystem { + // Create volumeSpec from mount path + glog.V(12).Infof("Starting operationExecutor.ReconstructVolumepodName") + volumeSpec, err := plugin.ConstructVolumeSpec(volumeSpecName, mountPath) if err != nil { return nil, err } - if volumeMode == v1.PersistentVolumeFilesystem { - volumeHandler = NewFilesystemVolumeHandler(oe) - } else { - volumeHandler = NewBlockVolumeHandler(oe) - } - } else { - volumeHandler = NewFilesystemVolumeHandler(oe) + return volumeSpec, nil } - return volumeHandler, nil -} -// NewVolumeHandlerWithMode return a new instance of volumeHandler depens on a volumeMode -func NewVolumeHandlerWithMode(volumeMode v1.PersistentVolumeMode, oe OperationExecutor) (VolumeStateHandler, error) { - var volumeHandler VolumeStateHandler - if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { - if volumeMode == v1.PersistentVolumeFilesystem { - volumeHandler = NewFilesystemVolumeHandler(oe) - } else { - volumeHandler = NewBlockVolumeHandler(oe) - } - } else { - volumeHandler = NewFilesystemVolumeHandler(oe) - } - return volumeHandler, nil -} - -// NewFilesystemVolumeHandler returns a new instance of FilesystemVolumeHandler. -func NewFilesystemVolumeHandler(operationExecutor OperationExecutor) FilesystemVolumeHandler { - return FilesystemVolumeHandler{ - oe: operationExecutor} -} - -// NewBlockVolumeHandler returns a new instance of BlockVolumeHandler. -func NewBlockVolumeHandler(operationExecutor OperationExecutor) BlockVolumeHandler { - return BlockVolumeHandler{ - oe: operationExecutor} -} - -// FilesystemVolumeHandler is VolumeHandler for Filesystem volume -type FilesystemVolumeHandler struct { - oe OperationExecutor -} - -// BlockVolumeHandler is VolumeHandler for Block volume -type BlockVolumeHandler struct { - oe OperationExecutor -} - -// MountVolumeHandler mount/remount a volume when a volume is attached -// This method is handler for filesystem volume -func (f FilesystemVolumeHandler) MountVolumeHandler(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, isRemount bool, remountingLogStr string) error { - glog.V(12).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.MountVolume", remountingLogStr)) - err := f.oe.MountVolume( - waitForAttachTimeout, - volumeToMount, - actualStateOfWorld, - isRemount) - return err -} - -// UnmountVolumeHandler unmount a volume if a volume is mounted -// This method is handler for filesystem volume -func (f FilesystemVolumeHandler) UnmountVolumeHandler(mountedVolume MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error { - glog.V(12).Infof(mountedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountVolume", "")) - err := f.oe.UnmountVolume( - mountedVolume, - actualStateOfWorld) - return err -} - -// UnmountDeviceHandler unmount and detach a device if a volume isn't referenced -// This method is handler for filesystem volume -func (f FilesystemVolumeHandler) UnmountDeviceHandler(attachedVolume AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error { - glog.V(12).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountDevice", "")) - err := f.oe.UnmountDevice( - attachedVolume, - actualStateOfWorld, - mounter) - return err -} - -// ReconstructVolumeHandler create volumeSpec from mount path -// This method is handler for filesystem volume -func (f FilesystemVolumeHandler) ReconstructVolumeHandler(plugin volume.VolumePlugin, _ volume.BlockVolumePlugin, _ types.UID, _ volumetypes.UniquePodName, volumeSpecName string, mountPath string, _ string) (*volume.Spec, error) { - glog.V(4).Infof("Starting operationExecutor.ReconstructVolumepodName volume spec name %s, mount path %s", volumeSpecName, mountPath) - volumeSpec, err := plugin.ConstructVolumeSpec(volumeSpecName, mountPath) - if err != nil { - return nil, err - } - return volumeSpec, nil -} - -// CheckVolumeExistence checks mount path directory if volume still exists, return true if volume is there -// Also return true for non-attachable volume case without mount point check -// This method is handler for filesystem volume -func (f FilesystemVolumeHandler) CheckVolumeExistence(mountPath, volumeName string, mounter mount.Interface, uniqueVolumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName, podUID types.UID, attachable volume.AttachableVolumePlugin) (bool, error) { - if attachable != nil { - var isNotMount bool - var mountCheckErr error - if isNotMount, mountCheckErr = mounter.IsLikelyNotMountPoint(mountPath); mountCheckErr != nil { - return false, fmt.Errorf("Could not check whether the volume %q (spec.Name: %q) pod %q (UID: %q) is mounted with: %v", - uniqueVolumeName, - volumeName, - podName, - podUID, - mountCheckErr) - } - return !isNotMount, nil - } - return true, nil -} - -// MountVolumeHandler creates a map to device if a volume is attached -// This method is handler for block volume -func (b BlockVolumeHandler) MountVolumeHandler(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, _ bool, _ string) error { - glog.V(12).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.MapVolume", "")) - err := b.oe.MapVolume( - waitForAttachTimeout, - volumeToMount, - actualStateOfWorld) - return err -} - -// UnmountVolumeHandler unmap a volume if a volume is mapped -// This method is handler for block volume -func (b BlockVolumeHandler) UnmountVolumeHandler(mountedVolume MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error { - glog.V(12).Infof(mountedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmapVolume", "")) - err := b.oe.UnmapVolume( - mountedVolume, - actualStateOfWorld) - return err -} - -// UnmountDeviceHandler detach a device and remove loopback if a volume isn't referenced -// This method is handler for block volume -func (b BlockVolumeHandler) UnmountDeviceHandler(attachedVolume AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error { - glog.V(12).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmapDevice", "")) - err := b.oe.UnmapDevice( - attachedVolume, - actualStateOfWorld, - mounter) - return err -} - -// ReconstructVolumeHandler create volumeSpec from mount path -// This method is handler for block volume -func (b BlockVolumeHandler) ReconstructVolumeHandler(_ volume.VolumePlugin, mapperPlugin volume.BlockVolumePlugin, uid types.UID, podName volumetypes.UniquePodName, volumeSpecName string, mountPath string, pluginName string) (*volume.Spec, error) { + // Block Volume case + // Create volumeSpec from mount path glog.V(12).Infof("Starting operationExecutor.ReconstructVolume") if mapperPlugin == nil { return nil, fmt.Errorf("Could not find block volume plugin %q (spec.Name: %q) pod %q (UID: %q)", @@ -1031,12 +872,47 @@ func (b BlockVolumeHandler) ReconstructVolumeHandler(_ volume.VolumePlugin, mapp return volumeSpec, nil } -// CheckVolumeExistence checks mount path directory if volume still exists, then return -// true if volume is there. Either plugin is attachable or non-attachable, the plugin -// should have symbolic link associated to raw block device under pod device map -// if volume exists. -// This method is handler for block volume -func (b BlockVolumeHandler) CheckVolumeExistence(mountPath, volumeName string, mounter mount.Interface, uniqueVolumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName, podUID types.UID, _ volume.AttachableVolumePlugin) (bool, error) { +// CheckVolumeExistenceOperation return a func() to check mount path directory if volume still exists +func (oe *operationExecutor) CheckVolumeExistenceOperation( + volumeSpec *volume.Spec, + mountPath, volumeName string, + mounter mount.Interface, + uniqueVolumeName v1.UniqueVolumeName, + podName volumetypes.UniquePodName, + podUID types.UID, + attachable volume.AttachableVolumePlugin) (bool, error) { + fsVolume, err := volumehelper.CheckVolumeModeFilesystem(volumeSpec) + if err != nil { + return false, err + } + + // Filesystem Volume case + // For attachable volume case, check mount path directory if volume is still existing and mounted. + // Return true if volume is mounted. + if fsVolume { + if attachable != nil { + var isNotMount bool + var mountCheckErr error + if isNotMount, mountCheckErr = mounter.IsLikelyNotMountPoint(mountPath); mountCheckErr != nil { + return false, fmt.Errorf("Could not check whether the volume %q (spec.Name: %q) pod %q (UID: %q) is mounted with: %v", + uniqueVolumeName, + volumeName, + podName, + podUID, + mountCheckErr) + } + return !isNotMount, nil + } + // For non-attachable volume case, skip check and return true without mount point check + // since plugins may not have volume mount point. + return true, nil + } + + // Block Volume case + // Check mount path directory if volume still exists, then return true if volume + // is there. Either plugin is attachable or non-attachable, the plugin should + // have symbolic link associated to raw block device under pod device map + // if volume exists. blkutil := util.NewBlockVolumePathHandler() var islinkExist bool var checkErr error diff --git a/pkg/volume/util/operationexecutor/operation_executor_test.go b/pkg/volume/util/operationexecutor/operation_executor_test.go index 6aabd54b1fb..2453a6e630f 100644 --- a/pkg/volume/util/operationexecutor/operation_executor_test.go +++ b/pkg/volume/util/operationexecutor/operation_executor_test.go @@ -231,12 +231,14 @@ func TestOperationExecutor_VerifyControllerAttachedVolumeConcurrently(t *testing } } -func TestOperationExecutor_MapVolume_ConcurrentMapForNonAttachablePlugins(t *testing.T) { +func TestOperationExecutor_MountVolume_ConcurrentMountForNonAttachablePlugins_VolumeMode_Block(t *testing.T) { // Arrange ch, quit, oe := setup() volumesToMount := make([]VolumeToMount, numVolumesToMap) secretName := "secret-volume" volumeName := v1.UniqueVolumeName(secretName) + volumeMode := v1.PersistentVolumeBlock + tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}} // Act for i := range volumesToMount { @@ -247,8 +249,9 @@ func TestOperationExecutor_MapVolume_ConcurrentMapForNonAttachablePlugins(t *tes VolumeName: volumeName, PluginIsAttachable: false, // this field determines whether the plugin is attachable ReportedInUse: true, + VolumeSpec: tmpSpec, } - oe.MapVolume(0 /* waitForAttachTimeOut */, volumesToMount[i], nil /* actualStateOfWorldMounterUpdater */) + oe.MountVolume(0 /* waitForAttachTimeOut */, volumesToMount[i], nil /* actualStateOfWorldMounterUpdater */, false) } // Assert @@ -257,12 +260,14 @@ func TestOperationExecutor_MapVolume_ConcurrentMapForNonAttachablePlugins(t *tes } } -func TestOperationExecutor_MapVolume_ConcurrentMapForAttachablePlugins(t *testing.T) { +func TestOperationExecutor_MountVolume_ConcurrentMountForAttachablePlugins_VolumeMode_Block(t *testing.T) { // Arrange ch, quit, oe := setup() volumesToMount := make([]VolumeToMount, numVolumesToAttach) pdName := "pd-volume" volumeName := v1.UniqueVolumeName(pdName) + volumeMode := v1.PersistentVolumeBlock + tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}} // Act for i := range volumesToMount { @@ -273,8 +278,9 @@ func TestOperationExecutor_MapVolume_ConcurrentMapForAttachablePlugins(t *testin VolumeName: volumeName, PluginIsAttachable: true, // this field determines whether the plugin is attachable ReportedInUse: true, + VolumeSpec: tmpSpec, } - oe.MapVolume(0 /* waitForAttachTimeout */, volumesToMount[i], nil /* actualStateOfWorldMounterUpdater */) + oe.MountVolume(0 /* waitForAttachTimeout */, volumesToMount[i], nil /* actualStateOfWorldMounterUpdater */, false) } // Assert @@ -283,12 +289,14 @@ func TestOperationExecutor_MapVolume_ConcurrentMapForAttachablePlugins(t *testin } } -func TestOperationExecutor_UnmapVolume_ConcurrentUnmapForAllPlugins(t *testing.T) { +func TestOperationExecutor_UnmountVolume_ConcurrentUnmountForAllPlugins_VolumeMode_Block(t *testing.T) { // Arrange ch, quit, oe := setup() volumesToUnmount := make([]MountedVolume, numAttachableVolumesToUnmap+numNonAttachableVolumesToUnmap) pdName := "pd-volume" secretName := "secret-volume" + volumeMode := v1.PersistentVolumeBlock + tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}} // Act for i := 0; i < numNonAttachableVolumesToUnmap+numAttachableVolumesToUnmap; i++ { @@ -299,6 +307,7 @@ func TestOperationExecutor_UnmapVolume_ConcurrentUnmapForAllPlugins(t *testing.T PodName: volumetypes.UniquePodName(podName), VolumeName: v1.UniqueVolumeName(secretName), PodUID: pod.UID, + VolumeSpec: tmpSpec, } } else { pod := getTestPodWithGCEPD(podName, pdName) @@ -306,9 +315,10 @@ func TestOperationExecutor_UnmapVolume_ConcurrentUnmapForAllPlugins(t *testing.T PodName: volumetypes.UniquePodName(podName), VolumeName: v1.UniqueVolumeName(pdName), PodUID: pod.UID, + VolumeSpec: tmpSpec, } } - oe.UnmapVolume(volumesToUnmount[i], nil /* actualStateOfWorldMounterUpdater */) + oe.UnmountVolume(volumesToUnmount[i], nil /* actualStateOfWorldMounterUpdater */) } // Assert @@ -317,19 +327,22 @@ func TestOperationExecutor_UnmapVolume_ConcurrentUnmapForAllPlugins(t *testing.T } } -func TestOperationExecutor_UnmapDeviceConcurrently(t *testing.T) { +func TestOperationExecutor_UnmountDeviceConcurrently_VolumeMode_Block(t *testing.T) { // Arrange ch, quit, oe := setup() attachedVolumes := make([]AttachedVolume, numDevicesToUnmap) pdName := "pd-volume" + volumeMode := v1.PersistentVolumeBlock + tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}} // Act for i := range attachedVolumes { attachedVolumes[i] = AttachedVolume{ VolumeName: v1.UniqueVolumeName(pdName), NodeName: "node-name", + VolumeSpec: tmpSpec, } - oe.UnmapDevice(attachedVolumes[i], nil /* actualStateOfWorldMounterUpdater */, nil /* mount.Interface */) + oe.UnmountDevice(attachedVolumes[i], nil /* actualStateOfWorldMounterUpdater */, nil /* mount.Interface */) } // Assert diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index 848c3b87087..71d71ae5ede 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -1017,10 +1017,10 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( blockVolumePlugin, err := og.volumePluginMgr.FindMapperPluginByName(deviceToDetach.PluginName) if err != nil { - return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.FindMapperPluginBySpec failed", err) + return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.FindMapperPluginByName failed", err) } if blockVolumePlugin == nil { - return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.FindMapperPluginBySpec failed to find BlockVolumeMapper plugin. Volume plugin is nil.", nil) + return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.FindMapperPluginByName failed to find BlockVolumeMapper plugin. Volume plugin is nil.", nil) } blockVolumeUnmapper, newUnmapperErr := blockVolumePlugin.NewBlockVolumeUnmapper( diff --git a/pkg/volume/util/volumehelper/volumehelper.go b/pkg/volume/util/volumehelper/volumehelper.go index 74b14be5de8..d8c83540123 100644 --- a/pkg/volume/util/volumehelper/volumehelper.go +++ b/pkg/volume/util/volumehelper/volumehelper.go @@ -23,6 +23,8 @@ import ( "strings" "k8s.io/api/core/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util/types" @@ -157,3 +159,18 @@ func GetPersistentVolumeClaimVolumeMode(claim *v1.PersistentVolumeClaim) (v1.Per } return "", fmt.Errorf("cannot get volumeMode from pvc: %v", claim.Name) } + +// CheckVolumeModeFilesystem checks VolumeMode. +// If the mode is Filesystem, return true otherwise return false. +func CheckVolumeModeFilesystem(volumeSpec *volume.Spec) (bool, error) { + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + volumeMode, err := GetVolumeMode(volumeSpec) + if err != nil { + return true, err + } + if volumeMode == v1.PersistentVolumeBlock { + return false, nil + } + } + return true, nil +} From bc86537f184da3b6344816578c05b73a9a8ab5dc Mon Sep 17 00:00:00 2001 From: mtanino Date: Fri, 9 Feb 2018 14:09:32 -0500 Subject: [PATCH 23/53] Autogenerated files --- pkg/volume/util/volumehelper/BUILD | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/volume/util/volumehelper/BUILD b/pkg/volume/util/volumehelper/BUILD index 3a57c7dbdfb..507792a9636 100644 --- a/pkg/volume/util/volumehelper/BUILD +++ b/pkg/volume/util/volumehelper/BUILD @@ -10,10 +10,12 @@ go_library( srcs = ["volumehelper.go"], importpath = "k8s.io/kubernetes/pkg/volume/util/volumehelper", deps = [ + "//pkg/features:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util/types:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) From 69d62a9288dc3d302a66febfb7c6c3714415404a Mon Sep 17 00:00:00 2001 From: "Bobby (Babak) Salamat" Date: Thu, 8 Feb 2018 18:19:31 -0800 Subject: [PATCH 24/53] Improve performance of scheduling queue by adding a hash map to track all pods in with a nominatedNodeName. --- pkg/scheduler/core/scheduling_queue.go | 159 ++++++++-------- pkg/scheduler/core/scheduling_queue_test.go | 197 +++++++++++++------- pkg/scheduler/factory/factory.go | 2 +- 3 files changed, 212 insertions(+), 146 deletions(-) diff --git a/pkg/scheduler/core/scheduling_queue.go b/pkg/scheduler/core/scheduling_queue.go index 37f31dadefd..21bbaf23af6 100644 --- a/pkg/scheduler/core/scheduling_queue.go +++ b/pkg/scheduler/core/scheduling_queue.go @@ -51,7 +51,7 @@ type SchedulingQueue interface { AddIfNotPresent(pod *v1.Pod) error AddUnschedulableIfNotPresent(pod *v1.Pod) error Pop() (*v1.Pod, error) - Update(pod *v1.Pod) error + Update(oldPod, newPod *v1.Pod) error Delete(pod *v1.Pod) error MoveAllToActiveQueue() AssignedPodAdded(pod *v1.Pod) @@ -93,8 +93,8 @@ func (f *FIFO) AddUnschedulableIfNotPresent(pod *v1.Pod) error { } // Update updates a pod in the FIFO. -func (f *FIFO) Update(pod *v1.Pod) error { - return f.FIFO.Update(pod) +func (f *FIFO) Update(oldPod, newPod *v1.Pod) error { + return f.FIFO.Update(newPod) } // Delete deletes a pod in the FIFO. @@ -139,6 +139,11 @@ func NewFIFO() *FIFO { return &FIFO{FIFO: cache.NewFIFO(cache.MetaNamespaceKeyFunc)} } +// NominatedNodeName returns nominated node name of a Pod. +func NominatedNodeName(pod *v1.Pod) string { + return pod.Status.NominatedNodeName +} + // UnschedulablePods is an interface for a queue that is used to keep unschedulable // pods. These pods are not actively reevaluated for scheduling. They are moved // to the active scheduling queue on certain events, such as termination of a pod @@ -147,7 +152,6 @@ type UnschedulablePods interface { Add(pod *v1.Pod) Delete(pod *v1.Pod) Update(pod *v1.Pod) - GetPodsWaitingForNode(nodeName string) []*v1.Pod Get(pod *v1.Pod) *v1.Pod Clear() } @@ -167,6 +171,10 @@ type PriorityQueue struct { activeQ *Heap // unschedulableQ holds pods that have been tried and determined unschedulable. unschedulableQ *UnschedulablePodsMap + // nominatedPods is a map keyed by a node name and the value is a list of + // pods which are nominated to run on the node. These are pods which can be in + // the activeQ or unschedulableQ. + nominatedPods map[string][]*v1.Pod // receivedMoveRequest is set to true whenever we receive a request to move a // pod from the unschedulableQ to the activeQ, and is set to false, when we pop // a pod from the activeQ. It indicates if we received a move request when a @@ -183,11 +191,51 @@ func NewPriorityQueue() *PriorityQueue { pq := &PriorityQueue{ activeQ: newHeap(cache.MetaNamespaceKeyFunc, util.HigherPriorityPod), unschedulableQ: newUnschedulablePodsMap(), + nominatedPods: map[string][]*v1.Pod{}, } pq.cond.L = &pq.lock return pq } +// addNominatedPodIfNeeded adds a pod to nominatedPods if it has a NominatedNodeName and it does not +// already exist in the map. Adding an existing pod is not going to update the pod. +func (p *PriorityQueue) addNominatedPodIfNeeded(pod *v1.Pod) { + nnn := NominatedNodeName(pod) + if len(nnn) > 0 { + for _, p := range p.nominatedPods[nnn] { + if p.Name == pod.Name && p.Namespace == pod.Namespace { + glog.Errorf("Pod %v/%v already exists in the nominated map!", pod.Namespace, pod.Name) + return + } + } + p.nominatedPods[nnn] = append(p.nominatedPods[nnn], pod) + } +} + +// deleteNominatedPodIfExists deletes a pod from the nominatedPods. +func (p *PriorityQueue) deleteNominatedPodIfExists(pod *v1.Pod) { + nnn := NominatedNodeName(pod) + if len(nnn) > 0 { + for i, np := range p.nominatedPods[nnn] { + if np.Name == pod.Name && np.Namespace == pod.Namespace { + p.nominatedPods[nnn] = append(p.nominatedPods[nnn][:i], p.nominatedPods[nnn][i+1:]...) + if len(p.nominatedPods[nnn]) == 0 { + delete(p.nominatedPods, nnn) + } + break + } + } + } +} + +// updateNominatedPod updates a pod in the nominatedPods. +func (p *PriorityQueue) updateNominatedPod(oldPod, newPod *v1.Pod) { + // Even if the nominated node name of the Pod is not changed, we must delete and add it again + // to ensure that its pointer is updated. + p.deleteNominatedPodIfExists(oldPod) + p.addNominatedPodIfNeeded(newPod) +} + // Add adds a pod to the active queue. It should be called only when a new pod // is added so there is no chance the pod is already in either queue. func (p *PriorityQueue) Add(pod *v1.Pod) error { @@ -199,8 +247,10 @@ func (p *PriorityQueue) Add(pod *v1.Pod) error { } else { if p.unschedulableQ.Get(pod) != nil { glog.Errorf("Error: pod %v is already in the unschedulable queue.", pod.Name) + p.deleteNominatedPodIfExists(pod) p.unschedulableQ.Delete(pod) } + p.addNominatedPodIfNeeded(pod) p.cond.Broadcast() } return err @@ -221,6 +271,7 @@ func (p *PriorityQueue) AddIfNotPresent(pod *v1.Pod) error { if err != nil { glog.Errorf("Error adding pod %v to the scheduling queue: %v", pod.Name, err) } else { + p.addNominatedPodIfNeeded(pod) p.cond.Broadcast() } return err @@ -245,10 +296,12 @@ func (p *PriorityQueue) AddUnschedulableIfNotPresent(pod *v1.Pod) error { } if !p.receivedMoveRequest && isPodUnschedulable(pod) { p.unschedulableQ.Add(pod) + p.addNominatedPodIfNeeded(pod) return nil } err := p.activeQ.Add(pod) if err == nil { + p.addNominatedPodIfNeeded(pod) p.cond.Broadcast() } return err @@ -267,8 +320,10 @@ func (p *PriorityQueue) Pop() (*v1.Pod, error) { if err != nil { return nil, err } + pod := obj.(*v1.Pod) + p.deleteNominatedPodIfExists(pod) p.receivedMoveRequest = false - return obj.(*v1.Pod), err + return pod, err } // isPodUpdated checks if the pod is updated in a way that it may have become @@ -287,30 +342,33 @@ func isPodUpdated(oldPod, newPod *v1.Pod) bool { // Update updates a pod in the active queue if present. Otherwise, it removes // the item from the unschedulable queue and adds the updated one to the active // queue. -func (p *PriorityQueue) Update(pod *v1.Pod) error { +func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error { p.lock.Lock() defer p.lock.Unlock() // If the pod is already in the active queue, just update it there. - if _, exists, _ := p.activeQ.Get(pod); exists { - err := p.activeQ.Update(pod) + if _, exists, _ := p.activeQ.Get(newPod); exists { + p.updateNominatedPod(oldPod, newPod) + err := p.activeQ.Update(newPod) return err } // If the pod is in the unschedulable queue, updating it may make it schedulable. - if oldPod := p.unschedulableQ.Get(pod); oldPod != nil { - if isPodUpdated(oldPod, pod) { - p.unschedulableQ.Delete(oldPod) - err := p.activeQ.Add(pod) + if usPod := p.unschedulableQ.Get(newPod); usPod != nil { + p.updateNominatedPod(oldPod, newPod) + if isPodUpdated(oldPod, newPod) { + p.unschedulableQ.Delete(usPod) + err := p.activeQ.Add(newPod) if err == nil { p.cond.Broadcast() } return err } - p.unschedulableQ.Update(pod) + p.unschedulableQ.Update(newPod) return nil } // If pod is not in any of the two queue, we put it in the active queue. - err := p.activeQ.Add(pod) + err := p.activeQ.Add(newPod) if err == nil { + p.addNominatedPodIfNeeded(newPod) p.cond.Broadcast() } return err @@ -321,6 +379,7 @@ func (p *PriorityQueue) Update(pod *v1.Pod) error { func (p *PriorityQueue) Delete(pod *v1.Pod) error { p.lock.Lock() defer p.lock.Unlock() + p.deleteNominatedPodIfExists(pod) if _, exists, _ := p.activeQ.Get(pod); exists { return p.activeQ.Delete(pod) } @@ -403,68 +462,34 @@ func (p *PriorityQueue) getUnschedulablePodsWithMatchingAffinityTerm(pod *v1.Pod func (p *PriorityQueue) WaitingPodsForNode(nodeName string) []*v1.Pod { p.lock.RLock() defer p.lock.RUnlock() - pods := p.unschedulableQ.GetPodsWaitingForNode(nodeName) - for _, obj := range p.activeQ.List() { - pod := obj.(*v1.Pod) - if pod.Status.NominatedNodeName == nodeName { - pods = append(pods, pod) - } + if list, ok := p.nominatedPods[nodeName]; ok { + return list } - return pods + return nil } // UnschedulablePodsMap holds pods that cannot be scheduled. This data structure // is used to implement unschedulableQ. type UnschedulablePodsMap struct { // pods is a map key by a pod's full-name and the value is a pointer to the pod. - pods map[string]*v1.Pod - // nominatedPods is a map keyed by a node name and the value is a list of - // pods' full-names which are nominated to run on the node. - nominatedPods map[string][]string - keyFunc func(*v1.Pod) string + pods map[string]*v1.Pod + keyFunc func(*v1.Pod) string } var _ = UnschedulablePods(&UnschedulablePodsMap{}) -// NominatedNodeName returns the nominated node name of a pod. -func NominatedNodeName(pod *v1.Pod) string { - return pod.Status.NominatedNodeName -} - // Add adds a pod to the unschedulable pods. func (u *UnschedulablePodsMap) Add(pod *v1.Pod) { podKey := u.keyFunc(pod) if _, exists := u.pods[podKey]; !exists { u.pods[podKey] = pod - nominatedNodeName := NominatedNodeName(pod) - if len(nominatedNodeName) > 0 { - u.nominatedPods[nominatedNodeName] = append(u.nominatedPods[nominatedNodeName], podKey) - } - } -} - -func (u *UnschedulablePodsMap) deleteFromNominated(pod *v1.Pod) { - nominatedNodeName := NominatedNodeName(pod) - if len(nominatedNodeName) > 0 { - podKey := u.keyFunc(pod) - nps := u.nominatedPods[nominatedNodeName] - for i, np := range nps { - if np == podKey { - u.nominatedPods[nominatedNodeName] = append(nps[:i], nps[i+1:]...) - if len(u.nominatedPods[nominatedNodeName]) == 0 { - delete(u.nominatedPods, nominatedNodeName) - } - break - } - } } } // Delete deletes a pod from the unschedulable pods. func (u *UnschedulablePodsMap) Delete(pod *v1.Pod) { podKey := u.keyFunc(pod) - if p, exists := u.pods[podKey]; exists { - u.deleteFromNominated(p) + if _, exists := u.pods[podKey]; exists { delete(u.pods, podKey) } } @@ -472,20 +497,12 @@ func (u *UnschedulablePodsMap) Delete(pod *v1.Pod) { // Update updates a pod in the unschedulable pods. func (u *UnschedulablePodsMap) Update(pod *v1.Pod) { podKey := u.keyFunc(pod) - oldPod, exists := u.pods[podKey] + _, exists := u.pods[podKey] if !exists { u.Add(pod) return } u.pods[podKey] = pod - oldNominateNodeName := NominatedNodeName(oldPod) - nominatedNodeName := NominatedNodeName(pod) - if oldNominateNodeName != nominatedNodeName { - u.deleteFromNominated(oldPod) - if len(nominatedNodeName) > 0 { - u.nominatedPods[nominatedNodeName] = append(u.nominatedPods[nominatedNodeName], podKey) - } - } } // Get returns the pod if a pod with the same key as the key of the given "pod" @@ -498,28 +515,16 @@ func (u *UnschedulablePodsMap) Get(pod *v1.Pod) *v1.Pod { return nil } -// GetPodsWaitingForNode returns a list of unschedulable pods whose NominatedNodeNames -// are equal to the given nodeName. -func (u *UnschedulablePodsMap) GetPodsWaitingForNode(nodeName string) []*v1.Pod { - var pods []*v1.Pod - for _, key := range u.nominatedPods[nodeName] { - pods = append(pods, u.pods[key]) - } - return pods -} - // Clear removes all the entries from the unschedulable maps. func (u *UnschedulablePodsMap) Clear() { u.pods = make(map[string]*v1.Pod) - u.nominatedPods = make(map[string][]string) } // newUnschedulablePodsMap initializes a new object of UnschedulablePodsMap. func newUnschedulablePodsMap() *UnschedulablePodsMap { return &UnschedulablePodsMap{ - pods: make(map[string]*v1.Pod), - nominatedPods: make(map[string][]string), - keyFunc: util.GetPodFullName, + pods: make(map[string]*v1.Pod), + keyFunc: util.GetPodFullName, } } diff --git a/pkg/scheduler/core/scheduling_queue_test.go b/pkg/scheduler/core/scheduling_queue_test.go index d9065761357..bae5a606eac 100644 --- a/pkg/scheduler/core/scheduling_queue_test.go +++ b/pkg/scheduler/core/scheduling_queue_test.go @@ -27,7 +27,7 @@ import ( ) var mediumPriority = (lowPriority + highPriority) / 2 -var highPriorityPod, medPriorityPod, unschedulablePod = v1.Pod{ +var highPriorityPod, highPriNominatedPod, medPriorityPod, unschedulablePod = v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "hpp", Namespace: "ns1", @@ -36,6 +36,18 @@ var highPriorityPod, medPriorityPod, unschedulablePod = v1.Pod{ Priority: &highPriority, }, }, + v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hpp", + Namespace: "ns1", + }, + Spec: v1.PodSpec{ + Priority: &highPriority, + }, + Status: v1.PodStatus{ + NominatedNodeName: "node1", + }, + }, v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "mpp", @@ -79,6 +91,12 @@ func TestPriorityQueue_Add(t *testing.T) { q.Add(&medPriorityPod) q.Add(&unschedulablePod) q.Add(&highPriorityPod) + expectedNominatedPods := map[string][]*v1.Pod{ + "node1": {&medPriorityPod, &unschedulablePod}, + } + if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) { + t.Errorf("Unexpected nominated map after adding pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods) + } if p, err := q.Pop(); err != nil || p != &highPriorityPod { t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPod.Name, p.Name) } @@ -88,6 +106,61 @@ func TestPriorityQueue_Add(t *testing.T) { if p, err := q.Pop(); err != nil || p != &unschedulablePod { t.Errorf("Expected: %v after Pop, but got: %v", unschedulablePod.Name, p.Name) } + if len(q.nominatedPods) != 0 { + t.Errorf("Expected nomindatePods to be empty: %v", q.nominatedPods) + } +} + +func TestPriorityQueue_AddIfNotPresent(t *testing.T) { + q := NewPriorityQueue() + q.unschedulableQ.Add(&highPriNominatedPod) + q.AddIfNotPresent(&highPriNominatedPod) // Must not add anything. + q.AddIfNotPresent(&medPriorityPod) + q.AddIfNotPresent(&unschedulablePod) + expectedNominatedPods := map[string][]*v1.Pod{ + "node1": {&medPriorityPod, &unschedulablePod}, + } + if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) { + t.Errorf("Unexpected nominated map after adding pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods) + } + if p, err := q.Pop(); err != nil || p != &medPriorityPod { + t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Name) + } + if p, err := q.Pop(); err != nil || p != &unschedulablePod { + t.Errorf("Expected: %v after Pop, but got: %v", unschedulablePod.Name, p.Name) + } + if len(q.nominatedPods) != 0 { + t.Errorf("Expected nomindatePods to be empty: %v", q.nominatedPods) + } + if q.unschedulableQ.Get(&highPriNominatedPod) != &highPriNominatedPod { + t.Errorf("Pod %v was not found in the unschedulableQ.", highPriNominatedPod.Name) + } +} + +func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) { + q := NewPriorityQueue() + q.Add(&highPriNominatedPod) + q.AddUnschedulableIfNotPresent(&highPriNominatedPod) // Must not add anything. + q.AddUnschedulableIfNotPresent(&medPriorityPod) // This should go to activeQ. + q.AddUnschedulableIfNotPresent(&unschedulablePod) + expectedNominatedPods := map[string][]*v1.Pod{ + "node1": {&highPriNominatedPod, &medPriorityPod, &unschedulablePod}, + } + if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) { + t.Errorf("Unexpected nominated map after adding pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods) + } + if p, err := q.Pop(); err != nil || p != &highPriNominatedPod { + t.Errorf("Expected: %v after Pop, but got: %v", highPriNominatedPod.Name, p.Name) + } + if p, err := q.Pop(); err != nil || p != &medPriorityPod { + t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Name) + } + if len(q.nominatedPods) != 1 { + t.Errorf("Expected nomindatePods to have one element: %v", q.nominatedPods) + } + if q.unschedulableQ.Get(&unschedulablePod) != &unschedulablePod { + t.Errorf("Pod %v was not found in the unschedulableQ.", unschedulablePod.Name) + } } func TestPriorityQueue_Pop(t *testing.T) { @@ -96,55 +169,71 @@ func TestPriorityQueue_Pop(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - if p, err := q.Pop(); err != nil || p != &highPriorityPod { - t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPod.Name, p.Name) + if p, err := q.Pop(); err != nil || p != &medPriorityPod { + t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Name) + } + if len(q.nominatedPods) != 0 { + t.Errorf("Expected nomindatePods to be empty: %v", q.nominatedPods) } }() - q.Add(&highPriorityPod) + q.Add(&medPriorityPod) wg.Wait() } func TestPriorityQueue_Update(t *testing.T) { q := NewPriorityQueue() - q.Update(&highPriorityPod) + q.Update(nil, &highPriorityPod) if _, exists, _ := q.activeQ.Get(&highPriorityPod); !exists { t.Errorf("Expected %v to be added to activeQ.", highPriorityPod.Name) } - q.Update(&highPriorityPod) + if len(q.nominatedPods) != 0 { + t.Errorf("Expected nomindatePods to be empty: %v", q.nominatedPods) + } + // Update highPriorityPod and add a nominatedNodeName to it. + q.Update(&highPriorityPod, &highPriNominatedPod) if q.activeQ.data.Len() != 1 { t.Error("Expected only one item in activeQ.") } + if len(q.nominatedPods) != 1 { + t.Errorf("Expected one item in nomindatePods map: %v", q.nominatedPods) + } // Updating an unschedulable pod which is not in any of the two queues, should // add the pod to activeQ. - q.Update(&unschedulablePod) + q.Update(&unschedulablePod, &unschedulablePod) if _, exists, _ := q.activeQ.Get(&unschedulablePod); !exists { t.Errorf("Expected %v to be added to activeQ.", unschedulablePod.Name) } - // Updating a pod that is already in unschedulableQ, should move the pod to - // activeQ. - q.Update(&unschedulablePod) + // Updating a pod that is already in activeQ, should not change it. + q.Update(&unschedulablePod, &unschedulablePod) if len(q.unschedulableQ.pods) != 0 { t.Error("Expected unschedulableQ to be empty.") } if _, exists, _ := q.activeQ.Get(&unschedulablePod); !exists { t.Errorf("Expected: %v to be added to activeQ.", unschedulablePod.Name) } - if p, err := q.Pop(); err != nil || p != &highPriorityPod { + if p, err := q.Pop(); err != nil || p != &highPriNominatedPod { t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPod.Name, p.Name) } } func TestPriorityQueue_Delete(t *testing.T) { q := NewPriorityQueue() - q.Update(&highPriorityPod) + q.Update(&highPriorityPod, &highPriNominatedPod) q.Add(&unschedulablePod) - q.Delete(&highPriorityPod) + q.Delete(&highPriNominatedPod) if _, exists, _ := q.activeQ.Get(&unschedulablePod); !exists { t.Errorf("Expected %v to be in activeQ.", unschedulablePod.Name) } - if _, exists, _ := q.activeQ.Get(&highPriorityPod); exists { + if _, exists, _ := q.activeQ.Get(&highPriNominatedPod); exists { t.Errorf("Didn't expect %v to be in activeQ.", highPriorityPod.Name) } + if len(q.nominatedPods) != 1 { + t.Errorf("Expected nomindatePods to have only 'unschedulablePod': %v", q.nominatedPods) + } + q.Delete(&unschedulablePod) + if len(q.nominatedPods) != 0 { + t.Errorf("Expected nomindatePods to be empty: %v", q.nominatedPods) + } } func TestPriorityQueue_MoveAllToActiveQueue(t *testing.T) { @@ -214,6 +303,23 @@ func TestPriorityQueue_AssignedPodAdded(t *testing.T) { } } +func TestPriorityQueue_WaitingPodsForNode(t *testing.T) { + q := NewPriorityQueue() + q.Add(&medPriorityPod) + q.Add(&unschedulablePod) + q.Add(&highPriorityPod) + if p, err := q.Pop(); err != nil || p != &highPriorityPod { + t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPod.Name, p.Name) + } + expectedList := []*v1.Pod{&medPriorityPod, &unschedulablePod} + if !reflect.DeepEqual(expectedList, q.WaitingPodsForNode("node1")) { + t.Error("Unexpected list of nominated Pods for node.") + } + if q.WaitingPodsForNode("node2") != nil { + t.Error("Expected list of nominated Pods for node2 to be empty.") + } +} + func TestUnschedulablePodsMap(t *testing.T) { var pods = []*v1.Pod{ { @@ -261,22 +367,16 @@ func TestUnschedulablePodsMap(t *testing.T) { } var updatedPods = make([]*v1.Pod, len(pods)) updatedPods[0] = pods[0].DeepCopy() - updatedPods[0].Status.NominatedNodeName = "node3" updatedPods[1] = pods[1].DeepCopy() - updatedPods[1].Status.NominatedNodeName = "node3" updatedPods[3] = pods[3].DeepCopy() - updatedPods[3].Status.NominatedNodeName = "" tests := []struct { - podsToAdd []*v1.Pod - expectedMapAfterAdd map[string]*v1.Pod - expectedNominatedAfterAdd map[string][]string - podsToUpdate []*v1.Pod - expectedMapAfterUpdate map[string]*v1.Pod - expectedNominatedAfterUpdate map[string][]string - podsToDelete []*v1.Pod - expectedMapAfterDelete map[string]*v1.Pod - expectedNominatedAfterDelete map[string][]string + podsToAdd []*v1.Pod + expectedMapAfterAdd map[string]*v1.Pod + podsToUpdate []*v1.Pod + expectedMapAfterUpdate map[string]*v1.Pod + podsToDelete []*v1.Pod + expectedMapAfterDelete map[string]*v1.Pod }{ { podsToAdd: []*v1.Pod{pods[0], pods[1], pods[2], pods[3]}, @@ -286,10 +386,6 @@ func TestUnschedulablePodsMap(t *testing.T) { util.GetPodFullName(pods[2]): pods[2], util.GetPodFullName(pods[3]): pods[3], }, - expectedNominatedAfterAdd: map[string][]string{ - "node1": {util.GetPodFullName(pods[0]), util.GetPodFullName(pods[3])}, - "node3": {util.GetPodFullName(pods[2])}, - }, podsToUpdate: []*v1.Pod{updatedPods[0]}, expectedMapAfterUpdate: map[string]*v1.Pod{ util.GetPodFullName(pods[0]): updatedPods[0], @@ -297,19 +393,11 @@ func TestUnschedulablePodsMap(t *testing.T) { util.GetPodFullName(pods[2]): pods[2], util.GetPodFullName(pods[3]): pods[3], }, - expectedNominatedAfterUpdate: map[string][]string{ - "node1": {util.GetPodFullName(pods[3])}, - "node3": {util.GetPodFullName(pods[2]), util.GetPodFullName(pods[0])}, - }, podsToDelete: []*v1.Pod{pods[0], pods[1]}, expectedMapAfterDelete: map[string]*v1.Pod{ util.GetPodFullName(pods[2]): pods[2], util.GetPodFullName(pods[3]): pods[3], }, - expectedNominatedAfterDelete: map[string][]string{ - "node1": {util.GetPodFullName(pods[3])}, - "node3": {util.GetPodFullName(pods[2])}, - }, }, { podsToAdd: []*v1.Pod{pods[0], pods[3]}, @@ -317,20 +405,13 @@ func TestUnschedulablePodsMap(t *testing.T) { util.GetPodFullName(pods[0]): pods[0], util.GetPodFullName(pods[3]): pods[3], }, - expectedNominatedAfterAdd: map[string][]string{ - "node1": {util.GetPodFullName(pods[0]), util.GetPodFullName(pods[3])}, - }, podsToUpdate: []*v1.Pod{updatedPods[3]}, expectedMapAfterUpdate: map[string]*v1.Pod{ util.GetPodFullName(pods[0]): pods[0], util.GetPodFullName(pods[3]): updatedPods[3], }, - expectedNominatedAfterUpdate: map[string][]string{ - "node1": {util.GetPodFullName(pods[0])}, - }, - podsToDelete: []*v1.Pod{pods[0], pods[3]}, - expectedMapAfterDelete: map[string]*v1.Pod{}, - expectedNominatedAfterDelete: map[string][]string{}, + podsToDelete: []*v1.Pod{pods[0], pods[3]}, + expectedMapAfterDelete: map[string]*v1.Pod{}, }, { podsToAdd: []*v1.Pod{pods[1], pods[2]}, @@ -338,24 +419,15 @@ func TestUnschedulablePodsMap(t *testing.T) { util.GetPodFullName(pods[1]): pods[1], util.GetPodFullName(pods[2]): pods[2], }, - expectedNominatedAfterAdd: map[string][]string{ - "node3": {util.GetPodFullName(pods[2])}, - }, podsToUpdate: []*v1.Pod{updatedPods[1]}, expectedMapAfterUpdate: map[string]*v1.Pod{ util.GetPodFullName(pods[1]): updatedPods[1], util.GetPodFullName(pods[2]): pods[2], }, - expectedNominatedAfterUpdate: map[string][]string{ - "node3": {util.GetPodFullName(pods[2]), util.GetPodFullName(updatedPods[1])}, - }, podsToDelete: []*v1.Pod{pods[2], pods[3]}, expectedMapAfterDelete: map[string]*v1.Pod{ util.GetPodFullName(pods[1]): updatedPods[1], }, - expectedNominatedAfterDelete: map[string][]string{ - "node3": {util.GetPodFullName(updatedPods[1])}, - }, }, } @@ -368,10 +440,7 @@ func TestUnschedulablePodsMap(t *testing.T) { t.Errorf("#%d: Unexpected map after adding pods. Expected: %v, got: %v", i, test.expectedMapAfterAdd, upm.pods) } - if !reflect.DeepEqual(upm.nominatedPods, test.expectedNominatedAfterAdd) { - t.Errorf("#%d: Unexpected nominated map after adding pods. Expected: %v, got: %v", - i, test.expectedNominatedAfterAdd, upm.nominatedPods) - } + if len(test.podsToUpdate) > 0 { for _, p := range test.podsToUpdate { upm.Update(p) @@ -380,10 +449,6 @@ func TestUnschedulablePodsMap(t *testing.T) { t.Errorf("#%d: Unexpected map after updating pods. Expected: %v, got: %v", i, test.expectedMapAfterUpdate, upm.pods) } - if !reflect.DeepEqual(upm.nominatedPods, test.expectedNominatedAfterUpdate) { - t.Errorf("#%d: Unexpected nominated map after updating pods. Expected: %v, got: %v", - i, test.expectedNominatedAfterUpdate, upm.nominatedPods) - } } for _, p := range test.podsToDelete { upm.Delete(p) @@ -392,10 +457,6 @@ func TestUnschedulablePodsMap(t *testing.T) { t.Errorf("#%d: Unexpected map after deleting pods. Expected: %v, got: %v", i, test.expectedMapAfterDelete, upm.pods) } - if !reflect.DeepEqual(upm.nominatedPods, test.expectedNominatedAfterDelete) { - t.Errorf("#%d: Unexpected nominated map after deleting pods. Expected: %v, got: %v", - i, test.expectedNominatedAfterDelete, upm.nominatedPods) - } upm.Clear() if len(upm.pods) != 0 { t.Errorf("Expected the map to be empty, but has %v elements.", len(upm.pods)) diff --git a/pkg/scheduler/factory/factory.go b/pkg/scheduler/factory/factory.go index 6c9636572b5..2f1115f741e 100644 --- a/pkg/scheduler/factory/factory.go +++ b/pkg/scheduler/factory/factory.go @@ -591,7 +591,7 @@ func (c *configFactory) updatePodInSchedulingQueue(oldObj, newObj interface{}) { if c.skipPodUpdate(pod) { return } - if err := c.podQueue.Update(pod); err != nil { + if err := c.podQueue.Update(oldObj.(*v1.Pod), pod); err != nil { runtime.HandleError(fmt.Errorf("unable to update %T: %v", newObj, err)) } } From 7d127962976f52637428af02c340a7eb5e02d852 Mon Sep 17 00:00:00 2001 From: huangjiuyuan Date: Thu, 1 Feb 2018 23:16:17 +0800 Subject: [PATCH 25/53] Add tests for schedulercache --- pkg/scheduler/schedulercache/BUILD | 5 +- .../schedulercache/node_info_test.go | 813 ++++++++++++++++++ 2 files changed, 817 insertions(+), 1 deletion(-) create mode 100644 pkg/scheduler/schedulercache/node_info_test.go diff --git a/pkg/scheduler/schedulercache/BUILD b/pkg/scheduler/schedulercache/BUILD index 19d35198f16..b45ade23233 100644 --- a/pkg/scheduler/schedulercache/BUILD +++ b/pkg/scheduler/schedulercache/BUILD @@ -26,7 +26,10 @@ go_library( go_test( name = "go_default_test", - srcs = ["cache_test.go"], + srcs = [ + "cache_test.go", + "node_info_test.go", + ], embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/scheduler/schedulercache", deps = [ diff --git a/pkg/scheduler/schedulercache/node_info_test.go b/pkg/scheduler/schedulercache/node_info_test.go new file mode 100644 index 00000000000..afe40fad207 --- /dev/null +++ b/pkg/scheduler/schedulercache/node_info_test.go @@ -0,0 +1,813 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schedulercache + +import ( + "fmt" + "reflect" + "testing" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/scheduler/util" +) + +func TestNewResource(t *testing.T) { + tests := []struct { + resourceList v1.ResourceList + expected *Resource + }{ + { + resourceList: map[v1.ResourceName]resource.Quantity{}, + expected: &Resource{}, + }, + { + resourceList: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: *resource.NewScaledQuantity(4, -3), + v1.ResourceMemory: *resource.NewQuantity(2000, resource.BinarySI), + v1.ResourceNvidiaGPU: *resource.NewQuantity(1000, resource.DecimalSI), + v1.ResourcePods: *resource.NewQuantity(80, resource.BinarySI), + v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), + "scalar.test/" + "scalar1": *resource.NewQuantity(1, resource.DecimalSI), + v1.ResourceHugePagesPrefix + "test": *resource.NewQuantity(2, resource.BinarySI), + }, + expected: &Resource{ + MilliCPU: 4, + Memory: 2000, + NvidiaGPU: 1000, + EphemeralStorage: 5000, + AllowedPodNumber: 80, + ScalarResources: map[v1.ResourceName]int64{"scalar.test/scalar1": 1, "hugepages-test": 2}, + }, + }, + } + + for _, test := range tests { + r := NewResource(test.resourceList) + if !reflect.DeepEqual(test.expected, r) { + t.Errorf("expected: %#v, got: %#v", test.expected, r) + } + } +} + +func TestResourceList(t *testing.T) { + tests := []struct { + resource *Resource + expected v1.ResourceList + }{ + { + resource: &Resource{}, + expected: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: *resource.NewScaledQuantity(0, -3), + v1.ResourceMemory: *resource.NewQuantity(0, resource.BinarySI), + v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), + v1.ResourcePods: *resource.NewQuantity(0, resource.BinarySI), + v1.ResourceEphemeralStorage: *resource.NewQuantity(0, resource.BinarySI), + }, + }, + { + resource: &Resource{ + MilliCPU: 4, + Memory: 2000, + NvidiaGPU: 1000, + EphemeralStorage: 5000, + AllowedPodNumber: 80, + ScalarResources: map[v1.ResourceName]int64{"scalar.test/scalar1": 1, "hugepages-test": 2}, + }, + expected: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: *resource.NewScaledQuantity(4, -3), + v1.ResourceMemory: *resource.NewQuantity(2000, resource.BinarySI), + v1.ResourceNvidiaGPU: *resource.NewQuantity(1000, resource.DecimalSI), + v1.ResourcePods: *resource.NewQuantity(80, resource.BinarySI), + v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), + "scalar.test/" + "scalar1": *resource.NewQuantity(1, resource.DecimalSI), + v1.ResourceHugePagesPrefix + "test": *resource.NewQuantity(2, resource.BinarySI), + }, + }, + } + + for _, test := range tests { + rl := test.resource.ResourceList() + if !reflect.DeepEqual(test.expected, rl) { + t.Errorf("expected: %#v, got: %#v", test.expected, rl) + } + } +} + +func TestResourceClone(t *testing.T) { + tests := []struct { + resource *Resource + expected *Resource + }{ + { + resource: &Resource{}, + expected: &Resource{}, + }, + { + resource: &Resource{ + MilliCPU: 4, + Memory: 2000, + NvidiaGPU: 1000, + EphemeralStorage: 5000, + AllowedPodNumber: 80, + ScalarResources: map[v1.ResourceName]int64{"scalar.test/scalar1": 1, "hugepages-test": 2}, + }, + expected: &Resource{ + MilliCPU: 4, + Memory: 2000, + NvidiaGPU: 1000, + EphemeralStorage: 5000, + AllowedPodNumber: 80, + ScalarResources: map[v1.ResourceName]int64{"scalar.test/scalar1": 1, "hugepages-test": 2}, + }, + }, + } + + for _, test := range tests { + r := test.resource.Clone() + // Modify the field to check if the result is a clone of the origin one. + test.resource.MilliCPU += 1000 + if !reflect.DeepEqual(test.expected, r) { + t.Errorf("expected: %#v, got: %#v", test.expected, r) + } + } +} + +func TestResourceAddScalar(t *testing.T) { + tests := []struct { + resource *Resource + scalarName v1.ResourceName + scalarQuantity int64 + expected *Resource + }{ + { + resource: &Resource{}, + scalarName: "scalar1", + scalarQuantity: 100, + expected: &Resource{ + ScalarResources: map[v1.ResourceName]int64{"scalar1": 100}, + }, + }, + { + resource: &Resource{ + MilliCPU: 4, + Memory: 2000, + NvidiaGPU: 1000, + EphemeralStorage: 5000, + AllowedPodNumber: 80, + ScalarResources: map[v1.ResourceName]int64{"hugepages-test": 2}, + }, + scalarName: "scalar2", + scalarQuantity: 200, + expected: &Resource{ + MilliCPU: 4, + Memory: 2000, + NvidiaGPU: 1000, + EphemeralStorage: 5000, + AllowedPodNumber: 80, + ScalarResources: map[v1.ResourceName]int64{"hugepages-test": 2, "scalar2": 200}, + }, + }, + } + + for _, test := range tests { + test.resource.AddScalar(test.scalarName, test.scalarQuantity) + if !reflect.DeepEqual(test.expected, test.resource) { + t.Errorf("expected: %#v, got: %#v", test.expected, test.resource) + } + } +} + +func TestNewNodeInfo(t *testing.T) { + nodeName := "test-node" + pods := []*v1.Pod{ + makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}), + makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}), + } + + expected := &NodeInfo{ + requestedResource: &Resource{ + MilliCPU: 300, + Memory: 1524, + NvidiaGPU: 0, + EphemeralStorage: 0, + AllowedPodNumber: 0, + ScalarResources: map[v1.ResourceName]int64(nil), + }, + nonzeroRequest: &Resource{ + MilliCPU: 300, + Memory: 1524, + NvidiaGPU: 0, + EphemeralStorage: 0, + AllowedPodNumber: 0, + ScalarResources: map[v1.ResourceName]int64(nil), + }, + allocatableResource: &Resource{}, + generation: 2, + usedPorts: util.HostPortInfo{ + "127.0.0.1": map[util.ProtocolPort]struct{}{ + {Protocol: "TCP", Port: 80}: {}, + {Protocol: "TCP", Port: 8080}: {}, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "node_info_cache_test", + Name: "test-1", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("500"), + }, + }, + Ports: []v1.ContainerPort{ + { + HostIP: "127.0.0.1", + HostPort: 80, + Protocol: "TCP", + }, + }, + }, + }, + NodeName: nodeName, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "node_info_cache_test", + Name: "test-2", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("1Ki"), + }, + }, + Ports: []v1.ContainerPort{ + { + HostIP: "127.0.0.1", + HostPort: 8080, + Protocol: "TCP", + }, + }, + }, + }, + NodeName: nodeName, + }, + }, + }, + } + + ni := NewNodeInfo(pods...) + if !reflect.DeepEqual(expected, ni) { + t.Errorf("expected: %#v, got: %#v", expected, ni) + } +} + +func TestNodeInfoClone(t *testing.T) { + nodeName := "test-node" + tests := []struct { + nodeInfo *NodeInfo + expected *NodeInfo + }{ + { + nodeInfo: &NodeInfo{ + requestedResource: &Resource{}, + nonzeroRequest: &Resource{}, + allocatableResource: &Resource{}, + generation: 2, + usedPorts: util.HostPortInfo{ + "127.0.0.1": map[util.ProtocolPort]struct{}{ + {Protocol: "TCP", Port: 80}: {}, + {Protocol: "TCP", Port: 8080}: {}, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "node_info_cache_test", + Name: "test-1", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("500"), + }, + }, + Ports: []v1.ContainerPort{ + { + HostIP: "127.0.0.1", + HostPort: 80, + Protocol: "TCP", + }, + }, + }, + }, + NodeName: nodeName, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "node_info_cache_test", + Name: "test-2", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("1Ki"), + }, + }, + Ports: []v1.ContainerPort{ + { + HostIP: "127.0.0.1", + HostPort: 8080, + Protocol: "TCP", + }, + }, + }, + }, + NodeName: nodeName, + }, + }, + }, + }, + expected: &NodeInfo{ + requestedResource: &Resource{}, + nonzeroRequest: &Resource{}, + allocatableResource: &Resource{}, + generation: 2, + usedPorts: util.HostPortInfo{ + "127.0.0.1": map[util.ProtocolPort]struct{}{ + {Protocol: "TCP", Port: 80}: {}, + {Protocol: "TCP", Port: 8080}: {}, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "node_info_cache_test", + Name: "test-1", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("500"), + }, + }, + Ports: []v1.ContainerPort{ + { + HostIP: "127.0.0.1", + HostPort: 80, + Protocol: "TCP", + }, + }, + }, + }, + NodeName: nodeName, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "node_info_cache_test", + Name: "test-2", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("1Ki"), + }, + }, + Ports: []v1.ContainerPort{ + { + HostIP: "127.0.0.1", + HostPort: 8080, + Protocol: "TCP", + }, + }, + }, + }, + NodeName: nodeName, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + ni := test.nodeInfo.Clone() + // Modify the field to check if the result is a clone of the origin one. + test.nodeInfo.generation += 10 + if !reflect.DeepEqual(test.expected, ni) { + t.Errorf("expected: %#v, got: %#v", test.expected, ni) + } + } +} + +func TestNodeInfoAddPod(t *testing.T) { + nodeName := "test-node" + pods := []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "node_info_cache_test", + Name: "test-1", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("500"), + }, + }, + Ports: []v1.ContainerPort{ + { + HostIP: "127.0.0.1", + HostPort: 80, + Protocol: "TCP", + }, + }, + }, + }, + NodeName: nodeName, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "node_info_cache_test", + Name: "test-2", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("1Ki"), + }, + }, + Ports: []v1.ContainerPort{ + { + HostIP: "127.0.0.1", + HostPort: 8080, + Protocol: "TCP", + }, + }, + }, + }, + NodeName: nodeName, + }, + }, + } + expected := &NodeInfo{ + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + }, + requestedResource: &Resource{ + MilliCPU: 300, + Memory: 1524, + NvidiaGPU: 0, + EphemeralStorage: 0, + AllowedPodNumber: 0, + ScalarResources: map[v1.ResourceName]int64(nil), + }, + nonzeroRequest: &Resource{ + MilliCPU: 300, + Memory: 1524, + NvidiaGPU: 0, + EphemeralStorage: 0, + AllowedPodNumber: 0, + ScalarResources: map[v1.ResourceName]int64(nil), + }, + allocatableResource: &Resource{}, + generation: 2, + usedPorts: util.HostPortInfo{ + "127.0.0.1": map[util.ProtocolPort]struct{}{ + {Protocol: "TCP", Port: 80}: {}, + {Protocol: "TCP", Port: 8080}: {}, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "node_info_cache_test", + Name: "test-1", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("500"), + }, + }, + Ports: []v1.ContainerPort{ + { + HostIP: "127.0.0.1", + HostPort: 80, + Protocol: "TCP", + }, + }, + }, + }, + NodeName: nodeName, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "node_info_cache_test", + Name: "test-2", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("1Ki"), + }, + }, + Ports: []v1.ContainerPort{ + { + HostIP: "127.0.0.1", + HostPort: 8080, + Protocol: "TCP", + }, + }, + }, + }, + NodeName: nodeName, + }, + }, + }, + } + + ni := fakeNodeInfo() + for _, pod := range pods { + ni.AddPod(pod) + } + + if !reflect.DeepEqual(expected, ni) { + t.Errorf("expected: %#v, got: %#v", expected, ni) + } +} + +func TestNodeInfoRemovePod(t *testing.T) { + nodeName := "test-node" + pods := []*v1.Pod{ + makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}), + makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}), + } + + tests := []struct { + pod *v1.Pod + errExpected bool + expectedNodeInfo *NodeInfo + }{ + { + pod: makeBasePod(t, nodeName, "non-exist", "0", "0", "", []v1.ContainerPort{{}}), + errExpected: true, + expectedNodeInfo: &NodeInfo{ + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + }, + requestedResource: &Resource{ + MilliCPU: 300, + Memory: 1524, + NvidiaGPU: 0, + EphemeralStorage: 0, + AllowedPodNumber: 0, + ScalarResources: map[v1.ResourceName]int64(nil), + }, + nonzeroRequest: &Resource{ + MilliCPU: 300, + Memory: 1524, + NvidiaGPU: 0, + EphemeralStorage: 0, + AllowedPodNumber: 0, + ScalarResources: map[v1.ResourceName]int64(nil), + }, + allocatableResource: &Resource{}, + generation: 2, + usedPorts: util.HostPortInfo{ + "127.0.0.1": map[util.ProtocolPort]struct{}{ + {Protocol: "TCP", Port: 80}: {}, + {Protocol: "TCP", Port: 8080}: {}, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "node_info_cache_test", + Name: "test-1", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("500"), + }, + }, + Ports: []v1.ContainerPort{ + { + HostIP: "127.0.0.1", + HostPort: 80, + Protocol: "TCP", + }, + }, + }, + }, + NodeName: nodeName, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "node_info_cache_test", + Name: "test-2", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("1Ki"), + }, + }, + Ports: []v1.ContainerPort{ + { + HostIP: "127.0.0.1", + HostPort: 8080, + Protocol: "TCP", + }, + }, + }, + }, + NodeName: nodeName, + }, + }, + }, + }, + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "node_info_cache_test", + Name: "test-1", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("500"), + }, + }, + Ports: []v1.ContainerPort{ + { + HostIP: "127.0.0.1", + HostPort: 80, + Protocol: "TCP", + }, + }, + }, + }, + NodeName: nodeName, + }, + }, + errExpected: false, + expectedNodeInfo: &NodeInfo{ + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + }, + requestedResource: &Resource{ + MilliCPU: 200, + Memory: 1024, + NvidiaGPU: 0, + EphemeralStorage: 0, + AllowedPodNumber: 0, + ScalarResources: map[v1.ResourceName]int64(nil), + }, + nonzeroRequest: &Resource{ + MilliCPU: 200, + Memory: 1024, + NvidiaGPU: 0, + EphemeralStorage: 0, + AllowedPodNumber: 0, + ScalarResources: map[v1.ResourceName]int64(nil), + }, + allocatableResource: &Resource{}, + generation: 3, + usedPorts: util.HostPortInfo{ + "127.0.0.1": map[util.ProtocolPort]struct{}{ + {Protocol: "TCP", Port: 8080}: {}, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "node_info_cache_test", + Name: "test-2", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("1Ki"), + }, + }, + Ports: []v1.ContainerPort{ + { + HostIP: "127.0.0.1", + HostPort: 8080, + Protocol: "TCP", + }, + }, + }, + }, + NodeName: nodeName, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + ni := fakeNodeInfo(pods...) + + err := ni.RemovePod(test.pod) + if err != nil { + if test.errExpected { + expectedErrorMsg := fmt.Errorf("no corresponding pod %s in pods of node %s", test.pod.Name, ni.node.Name) + if expectedErrorMsg == err { + t.Errorf("expected error: %v, got: %v", expectedErrorMsg, err) + } + } else { + t.Errorf("expected no error, got: %v", err) + } + } + + if !reflect.DeepEqual(test.expectedNodeInfo, ni) { + t.Errorf("expected: %#v, got: %#v", test.expectedNodeInfo, ni) + } + } +} + +func fakeNodeInfo(pods ...*v1.Pod) *NodeInfo { + ni := NewNodeInfo(pods...) + ni.node = &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + } + return ni +} From 7d6b721044d13d5712275fff75e87fc200686f10 Mon Sep 17 00:00:00 2001 From: hangaoshuai Date: Sun, 11 Feb 2018 11:42:31 +0800 Subject: [PATCH 26/53] remove unused function printIndentedJson and printAllPods in test/integration/scheduler --- .../scheduler/local-pv-neg-affinity_test.go | 11 ----------- test/integration/scheduler/util.go | 12 ------------ 2 files changed, 23 deletions(-) diff --git a/test/integration/scheduler/local-pv-neg-affinity_test.go b/test/integration/scheduler/local-pv-neg-affinity_test.go index 7cbf7742539..7b454d6de86 100644 --- a/test/integration/scheduler/local-pv-neg-affinity_test.go +++ b/test/integration/scheduler/local-pv-neg-affinity_test.go @@ -19,7 +19,6 @@ package scheduler // This file tests the VolumeScheduling feature. import ( - "encoding/json" "fmt" "net/http" "net/http/httptest" @@ -311,13 +310,3 @@ func markNodeSelector(pod *v1.Pod, node string) { } pod.Spec.NodeSelector = ns } - -func printIndentedJson(data interface{}) string { - var indentedJSON []byte - - indentedJSON, err := json.MarshalIndent(data, "", "\t") - if err != nil { - return fmt.Sprintf("JSON parse error: %v", err) - } - return string(indentedJSON) -} diff --git a/test/integration/scheduler/util.go b/test/integration/scheduler/util.go index 1ccd34826ea..f3d63843086 100644 --- a/test/integration/scheduler/util.go +++ b/test/integration/scheduler/util.go @@ -381,15 +381,3 @@ func cleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) { } } } - -// printAllPods prints a list of all the pods and their node names. This is used -// for debugging. -func printAllPods(t *testing.T, cs clientset.Interface, nsName string) { - podList, err := cs.CoreV1().Pods(nsName).List(metav1.ListOptions{}) - if err != nil { - t.Logf("Error getting pods: %v", err) - } - for _, pod := range podList.Items { - t.Logf("Pod:\n\tName:%v\n\tNamespace:%v\n\tNode Name:%v\n", pod.Name, pod.Namespace, pod.Spec.NodeName) - } -} From 15530c09146c0f09ec5e383949cec2c3c4ef6707 Mon Sep 17 00:00:00 2001 From: Mike Wilson Date: Sun, 11 Feb 2018 14:25:45 -0500 Subject: [PATCH 27/53] Requesting new credentials when node names change --- .../kubernetes-worker/reactive/kubernetes_worker.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index 1fb25eac48d..6c6ee98dae8 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -927,6 +927,15 @@ def notify_master_gpu_not_enabled(kube_control): kube_control.set_gpu(False) +@when('kube-control.connected') +@when('config.changed.kubelet-extra-args') +def maybe_request_new_credentials(kube_control): + kubelet_extra_args = parse_extra_args('kubelet-extra-args') + cloud_provider = kubelet_extra_args.get('cloud-provider', '') + if data_changed('cloud_provider', cloud_provider): + request_kubelet_and_proxy_credentials(kube_control) + + @when('kube-control.connected') def request_kubelet_and_proxy_credentials(kube_control): """ Request kubelet node authorization with a well formed kubelet user. @@ -935,14 +944,14 @@ def request_kubelet_and_proxy_credentials(kube_control): # The kube-cotrol interface is created to support RBAC. # At this point we might as well do the right thing and return the hostname # even if it will only be used when we enable RBAC - nodeuser = 'system:node:{}'.format(gethostname().lower()) + nodeuser = 'system:node:{}'.format(get_node_name().lower()) kube_control.set_auth_request(nodeuser) @when('kube-control.connected') def catch_change_in_creds(kube_control): """Request a service restart in case credential updates were detected.""" - nodeuser = 'system:node:{}'.format(gethostname().lower()) + nodeuser = 'system:node:{}'.format(get_node_name().lower()) creds = kube_control.get_auth_credentials(nodeuser) if creds \ and data_changed('kube-control.creds', creds) \ From b3fff711619bc3553fcd51620709f0896a18e4b9 Mon Sep 17 00:00:00 2001 From: mlmhl Date: Mon, 12 Feb 2018 09:04:00 +0800 Subject: [PATCH 28/53] format some import statements in scheduler pkg --- pkg/scheduler/algorithm/predicates/predicates.go | 2 +- pkg/scheduler/core/generic_scheduler.go | 2 +- pkg/scheduler/factory/plugins.go | 2 +- pkg/scheduler/scheduler.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/scheduler/algorithm/predicates/predicates.go b/pkg/scheduler/algorithm/predicates/predicates.go index e4fbbcfdf89..d99475ab6be 100644 --- a/pkg/scheduler/algorithm/predicates/predicates.go +++ b/pkg/scheduler/algorithm/predicates/predicates.go @@ -41,10 +41,10 @@ import ( priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" "k8s.io/kubernetes/pkg/scheduler/schedulercache" schedutil "k8s.io/kubernetes/pkg/scheduler/util" + "k8s.io/kubernetes/pkg/scheduler/volumebinder" volumeutil "k8s.io/kubernetes/pkg/volume/util" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) const ( diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index c20c399d394..b6c889279fd 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -39,9 +39,9 @@ import ( "k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/schedulercache" "k8s.io/kubernetes/pkg/scheduler/util" + "k8s.io/kubernetes/pkg/scheduler/volumebinder" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) // FailedPredicateMap declares a map[string][]algorithm.PredicateFailureReason type. diff --git a/pkg/scheduler/factory/plugins.go b/pkg/scheduler/factory/plugins.go index e0ccdbda912..bd33dad00e9 100644 --- a/pkg/scheduler/factory/plugins.go +++ b/pkg/scheduler/factory/plugins.go @@ -28,9 +28,9 @@ import ( "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/volumebinder" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) // PluginFactoryArgs are passed to all plugin factory functions. diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 74590a08698..85b4458fd58 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -36,9 +36,9 @@ import ( "k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/schedulercache" "k8s.io/kubernetes/pkg/scheduler/util" + "k8s.io/kubernetes/pkg/scheduler/volumebinder" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) // Binder knows how to write a binding. From 8a7198b036fada779a3d8aa7b127c2a6504c50df Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 12 Feb 2018 03:22:57 +0000 Subject: [PATCH 29/53] use new account generation method for blob disk fix comments change azureDiskSharedAccountNamePrefix var rename sharedDiskAccountNamePrefix use default vhd container name as "vhds" use one commaon func: SearchStorageAccount fix comments --- .../azure/azure_blobDiskController.go | 138 ++++-------------- .../providers/azure/azure_storage.go | 56 ++----- .../providers/azure/azure_storageaccount.go | 55 ++++++- 3 files changed, 94 insertions(+), 155 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go index 9fefe7a4588..730b7b9a4fa 100644 --- a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go +++ b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go @@ -22,10 +22,8 @@ import ( "fmt" "net/url" "regexp" - "sync" - - "strconv" "strings" + "sync" "sync/atomic" "time" @@ -60,15 +58,11 @@ type BlobDiskController struct { } var ( - defaultContainerName = "" - storageAccountNamePrefix = "" - storageAccountNameMatch = "" - accountsLock = &sync.Mutex{} + accountsLock = &sync.Mutex{} ) func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error) { c := BlobDiskController{common: common} - c.setUniqueStrings() // get accounts accounts, err := c.getAllStorageAccounts() @@ -84,46 +78,26 @@ func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error // CreateVolume creates a VHD blob in a storage account that has storageType and location using the given storage account. // If no storage account is given, search all the storage accounts associated with the resource group and pick one that // fits storage type and location. -func (c *BlobDiskController) CreateVolume(name, storageAccount, storageAccountType, location string, requestGB int) (string, string, int, error) { - var err error - accounts := []accountWithLocation{} - if len(storageAccount) > 0 { - accounts = append(accounts, accountWithLocation{Name: storageAccount}) - } else { - // find a storage account - accounts, err = c.common.cloud.getStorageAccounts(storageAccountType, location) - if err != nil { - // TODO: create a storage account and container - return "", "", 0, err - } +func (c *BlobDiskController) CreateVolume(blobName, accountName, accountType, location string, requestGB int) (string, string, int, error) { + account, key, err := c.common.cloud.ensureStorageAccount(accountName, accountType, location, dedicatedDiskAccountNamePrefix) + if err != nil { + return "", "", 0, fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err) } - for _, account := range accounts { - glog.V(4).Infof("account %s type %s location %s", account.Name, account.StorageType, account.Location) - if (storageAccountType == "" || account.StorageType == storageAccountType) && (location == "" || account.Location == location) || len(storageAccount) > 0 { - // find the access key with this account - key, err := c.common.cloud.getStorageAccesskey(account.Name) - if err != nil { - glog.V(2).Infof("no key found for storage account %s", account.Name) - continue - } - client, err := azstorage.NewBasicClientOnSovereignCloud(account.Name, key, c.common.cloud.Environment) - if err != nil { - return "", "", 0, err - } - blobClient := client.GetBlobService() - - // create a page blob in this account's vhd container - diskName, diskURI, err := c.createVHDBlobDisk(blobClient, account.Name, name, vhdContainerName, int64(requestGB)) - if err != nil { - return "", "", 0, err - } - - glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI) - return diskName, diskURI, requestGB, err - } + client, err := azstorage.NewBasicClientOnSovereignCloud(account, key, c.common.cloud.Environment) + if err != nil { + return "", "", 0, err } - return "", "", 0, fmt.Errorf("failed to find a matching storage account") + blobClient := client.GetBlobService() + + // create a page blob in this account's vhd container + diskName, diskURI, err := c.createVHDBlobDisk(blobClient, account, blobName, vhdContainerName, int64(requestGB)) + if err != nil { + return "", "", 0, err + } + + glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI) + return diskName, diskURI, requestGB, err } // DeleteVolume deletes a VHD blob @@ -252,7 +226,7 @@ func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountT return "", err } - _, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccountName, dataDiskName, defaultContainerName, int64(sizeGB)) + _, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccountName, dataDiskName, vhdContainerName, int64(sizeGB)) if err != nil { return "", err } @@ -281,9 +255,9 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error { return err } - glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, defaultContainerName) + glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, vhdContainerName) - container := blobSvc.GetContainerReference(defaultContainerName) + container := blobSvc.GetContainerReference(vhdContainerName) blob := container.GetBlobReference(vhdName) _, err = blob.DeleteIfExists(nil) @@ -299,19 +273,6 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error { return err } -//Sets unique strings to be used as accountnames && || blob containers names -func (c *BlobDiskController) setUniqueStrings() { - uniqueString := c.common.resourceGroup + c.common.location + c.common.subscriptionID - hash := MakeCRC32(uniqueString) - //used to generate a unique container name used by this cluster PVC - defaultContainerName = hash - - storageAccountNamePrefix = fmt.Sprintf(storageAccountNameTemplate, hash) - // Used to filter relevant accounts (accounts used by shared PVC) - storageAccountNameMatch = storageAccountNamePrefix - // Used as a template to create new names for relevant accounts - storageAccountNamePrefix = storageAccountNamePrefix + "%s" -} func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error) { if account, exists := c.accounts[SAName]; exists && account.key != "" { return c.accounts[SAName].key, nil @@ -426,13 +387,13 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e return err } - container := blobSvc.GetContainerReference(defaultContainerName) + container := blobSvc.GetContainerReference(vhdContainerName) bCreated, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate}) if err != nil { return err } if bCreated { - glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, defaultContainerName) + glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, vhdContainerName) } // flag so we no longer have to check on ARM @@ -459,7 +420,7 @@ func (c *BlobDiskController) getDiskCount(SAName string) (int, error) { } params := azstorage.ListBlobsParameters{} - container := blobSvc.GetContainerReference(defaultContainerName) + container := blobSvc.GetContainerReference(vhdContainerName) response, err := container.ListBlobs(params) if err != nil { return 0, err @@ -481,13 +442,13 @@ func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccount accounts := make(map[string]*storageAccountState) for _, v := range *accountListResult.Value { - if strings.Index(*v.Name, storageAccountNameMatch) != 0 { - continue - } if v.Name == nil || v.Sku == nil { glog.Info("azureDisk - accountListResult Name or Sku is nil") continue } + if !strings.HasPrefix(*v.Name, sharedDiskAccountNamePrefix) { + continue + } glog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name) sastate := &storageAccountState{ @@ -555,7 +516,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam countAccounts := 0 // account of this type. for _, v := range c.accounts { // filter out any stand-alone disks/accounts - if strings.Index(v.name, storageAccountNameMatch) != 0 { + if !strings.HasPrefix(v.name, sharedDiskAccountNamePrefix) { continue } @@ -587,7 +548,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam // if we failed to find storageaccount if SAName == "" { glog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account") - SAName = getAccountNameForNum(c.getNextAccountNum()) + SAName = generateStorageAccountName(sharedDiskAccountNamePrefix) err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true) if err != nil { return "", err @@ -603,7 +564,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam // avg are not create and we should create more accounts if we can if aboveAvg && countAccounts < maxStorageAccounts { glog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing) - SAName = getAccountNameForNum(c.getNextAccountNum()) + SAName = generateStorageAccountName(sharedDiskAccountNamePrefix) err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true) if err != nil { return "", err @@ -620,22 +581,6 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam // we found a storage accounts && [ avg are ok || we reached max sa count ] return SAName, nil } -func (c *BlobDiskController) getNextAccountNum() int { - max := 0 - - for k := range c.accounts { - // filter out accounts that are for standalone - if strings.Index(k, storageAccountNameMatch) != 0 { - continue - } - num := getAccountNumFromName(k) - if num > max { - max = num - } - } - - return max + 1 -} //Gets storage account exist, provisionStatus, Error if any func (c *BlobDiskController) getStorageAccountState(storageAccountName string) (bool, storage.ProvisioningState, error) { @@ -655,27 +600,6 @@ func (c *BlobDiskController) addAccountState(key string, state *storageAccountSt } } -// pads account num with zeros as needed -func getAccountNameForNum(num int) string { - sNum := strconv.Itoa(num) - missingZeros := 3 - len(sNum) - strZero := "" - for missingZeros > 0 { - strZero = strZero + "0" - missingZeros = missingZeros - 1 - } - - sNum = strZero + sNum - return fmt.Sprintf(storageAccountNamePrefix, sNum) -} - -func getAccountNumFromName(accountName string) int { - nameLen := len(accountName) - num, _ := strconv.Atoi(accountName[nameLen-3:]) - - return num -} - func createVHDHeader(size uint64) ([]byte, error) { h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{}) b := new(bytes.Buffer) diff --git a/pkg/cloudprovider/providers/azure/azure_storage.go b/pkg/cloudprovider/providers/azure/azure_storage.go index c05316313d9..03d919daa55 100644 --- a/pkg/cloudprovider/providers/azure/azure_storage.go +++ b/pkg/cloudprovider/providers/azure/azure_storage.go @@ -20,66 +20,28 @@ import ( "fmt" "github.com/Azure/azure-sdk-for-go/arm/storage" - "github.com/Azure/go-autorest/autorest/to" "github.com/golang/glog" ) const ( - defaultStorageAccountType = string(storage.StandardLRS) - fileShareAccountNamePrefix = "f" + defaultStorageAccountType = string(storage.StandardLRS) + fileShareAccountNamePrefix = "f" + sharedDiskAccountNamePrefix = "ds" + dedicatedDiskAccountNamePrefix = "dd" ) // CreateFileShare creates a file share, using a matching storage account func (az *Cloud) CreateFileShare(shareName, accountName, accountType, location string, requestGiB int) (string, string, error) { - if len(accountName) == 0 { - // find a storage account that matches accountType - accounts, err := az.getStorageAccounts(accountType, location) - if err != nil { - return "", "", fmt.Errorf("could not list storage accounts for account type %s: %v", accountType, err) - } - - if len(accounts) > 0 { - accountName = accounts[0].Name - glog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location) - } - - if len(accountName) == 0 { - // not found a matching account, now create a new account in current resource group - accountName = generateStorageAccountName(fileShareAccountNamePrefix) - if location == "" { - location = az.Location - } - if accountType == "" { - accountType = defaultStorageAccountType - } - - glog.V(2).Infof("azureFile - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s", - accountName, az.ResourceGroup, location, accountType) - cp := storage.AccountCreateParameters{ - Sku: &storage.Sku{Name: storage.SkuName(accountType)}, - Tags: &map[string]*string{"created-by": to.StringPtr("azure-file")}, - Location: &location} - cancel := make(chan struct{}) - - _, errchan := az.StorageAccountClient.Create(az.ResourceGroup, accountName, cp, cancel) - err := <-errchan - if err != nil { - return "", "", fmt.Errorf(fmt.Sprintf("Failed to create storage account %s, error: %s", accountName, err)) - } - } - } - - // find the access key with this account - accountKey, err := az.getStorageAccesskey(accountName) + account, key, err := az.ensureStorageAccount(accountName, accountType, location, fileShareAccountNamePrefix) if err != nil { return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err) } - if err := az.createFileShare(accountName, accountKey, shareName, requestGiB); err != nil { - return "", "", fmt.Errorf("failed to create share %s in account %s: %v", shareName, accountName, err) + if err := az.createFileShare(account, key, shareName, requestGiB); err != nil { + return "", "", fmt.Errorf("failed to create share %s in account %s: %v", shareName, account, err) } - glog.V(4).Infof("created share %s in account %s", shareName, accountName) - return accountName, accountKey, nil + glog.V(4).Infof("created share %s in account %s", shareName, account) + return account, key, nil } // DeleteFileShare deletes a file share using storage account name and key diff --git a/pkg/cloudprovider/providers/azure/azure_storageaccount.go b/pkg/cloudprovider/providers/azure/azure_storageaccount.go index 5666da1732c..8425463b563 100644 --- a/pkg/cloudprovider/providers/azure/azure_storageaccount.go +++ b/pkg/cloudprovider/providers/azure/azure_storageaccount.go @@ -19,13 +19,17 @@ package azure import ( "fmt" "strings" + + "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/Azure/go-autorest/autorest/to" + "github.com/golang/glog" ) type accountWithLocation struct { Name, StorageType, Location string } -// getStorageAccounts gets name, type, location of all storage accounts in a resource group which matches matchingAccountType +// getStorageAccounts gets name, type, location of all storage accounts in a resource group which matches matchingAccountType, matchingLocation func (az *Cloud) getStorageAccounts(matchingAccountType, matchingLocation string) ([]accountWithLocation, error) { result, err := az.StorageAccountClient.ListByResourceGroup(az.ResourceGroup) if err != nil { @@ -75,3 +79,52 @@ func (az *Cloud) getStorageAccesskey(account string) (string, error) { } return "", fmt.Errorf("no valid keys") } + +// ensureStorageAccount search storage account, create one storage account(with genAccountNamePrefix) if not found, return accountName, accountKey +func (az *Cloud) ensureStorageAccount(accountName, accountType, location, genAccountNamePrefix string) (string, string, error) { + if len(accountName) == 0 { + // find a storage account that matches accountType + accounts, err := az.getStorageAccounts(accountType, location) + if err != nil { + return "", "", fmt.Errorf("could not list storage accounts for account type %s: %v", accountType, err) + } + + if len(accounts) > 0 { + accountName = accounts[0].Name + glog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location) + } + + if len(accountName) == 0 { + // not found a matching account, now create a new account in current resource group + accountName = generateStorageAccountName(genAccountNamePrefix) + if location == "" { + location = az.Location + } + if accountType == "" { + accountType = defaultStorageAccountType + } + + glog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s", + accountName, az.ResourceGroup, location, accountType) + cp := storage.AccountCreateParameters{ + Sku: &storage.Sku{Name: storage.SkuName(accountType)}, + Tags: &map[string]*string{"created-by": to.StringPtr("azure")}, + Location: &location} + cancel := make(chan struct{}) + + _, errchan := az.StorageAccountClient.Create(az.ResourceGroup, accountName, cp, cancel) + err := <-errchan + if err != nil { + return "", "", fmt.Errorf(fmt.Sprintf("Failed to create storage account %s, error: %s", accountName, err)) + } + } + } + + // find the access key with this account + accountKey, err := az.getStorageAccesskey(accountName) + if err != nil { + return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err) + } + + return accountName, accountKey, nil +} From f8e206e80230b072235abcf43fcc2519b7644491 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Fri, 13 Oct 2017 12:49:21 -0400 Subject: [PATCH 30/53] Remove /ui/ redirect --- cmd/kube-apiserver/app/server.go | 1 - hack/make-rules/test-cmd-util.sh | 7 ++-- pkg/master/master.go | 7 ---- pkg/routes/BUILD | 6 +--- pkg/routes/ui.go | 36 -------------------- test/e2e/ui/dashboard.go | 11 ------ test/integration/openshift/openshift_test.go | 1 - 7 files changed, 4 insertions(+), 65 deletions(-) delete mode 100644 pkg/routes/ui.go diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 5a3a46afd54..8284f586935 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -336,7 +336,6 @@ func CreateKubeAPIServerConfig(s *options.ServerRunOptions, nodeTunneler tunnele EnableCoreControllers: true, EventTTL: s.EventTTL, KubeletClientConfig: s.KubeletConfig, - EnableUISupport: true, EnableLogsSupport: s.EnableLogsHandler, ProxyTransport: proxyTransport, diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index a24cf66a514..a4a9ed5c447 100755 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -3503,10 +3503,8 @@ run_kubectl_local_proxy_tests() { kube::log::status "Testing kubectl local proxy" - # Make sure the UI can be proxied start-proxy - check-curl-proxy-code /ui 307 - check-curl-proxy-code /api/ui 404 + check-curl-proxy-code /api/kubernetes 404 check-curl-proxy-code /api/v1/namespaces 200 if kube::test::if_supports_resource "${metrics}" ; then check-curl-proxy-code /metrics 200 @@ -3524,7 +3522,8 @@ run_kubectl_local_proxy_tests() { # Custom paths let you see everything. start-proxy /custom - check-curl-proxy-code /custom/ui 307 + check-curl-proxy-code /custom/api/kubernetes 404 + check-curl-proxy-code /custom/api/v1/namespaces 200 if kube::test::if_supports_resource "${metrics}" ; then check-curl-proxy-code /custom/metrics 200 fi diff --git a/pkg/master/master.go b/pkg/master/master.go index eb8569d072d..d84dc5fa7ec 100644 --- a/pkg/master/master.go +++ b/pkg/master/master.go @@ -109,7 +109,6 @@ type ExtraConfig struct { // Used to start and monitor tunneling Tunneler tunneler.Tunneler - EnableUISupport bool EnableLogsSupport bool ProxyTransport http.RoundTripper @@ -269,9 +268,6 @@ func (cfg *Config) Complete(informers informers.SharedInformerFactory) Completed glog.Infof("Node port range unspecified. Defaulting to %v.", c.ExtraConfig.ServiceNodePortRange) } - // enable swagger UI only if general UI support is on - c.GenericConfig.EnableSwaggerUI = c.GenericConfig.EnableSwaggerUI && c.ExtraConfig.EnableUISupport - if c.ExtraConfig.EndpointReconcilerConfig.Interval == 0 { c.ExtraConfig.EndpointReconcilerConfig.Interval = DefaultEndpointReconcilerInterval } @@ -304,9 +300,6 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) return nil, err } - if c.ExtraConfig.EnableUISupport { - routes.UIRedirect{}.Install(s.Handler.NonGoRestfulMux) - } if c.ExtraConfig.EnableLogsSupport { routes.Logs{}.Install(s.Handler.GoRestfulContainer) } diff --git a/pkg/routes/BUILD b/pkg/routes/BUILD index b62bff305db..7d7d3690250 100644 --- a/pkg/routes/BUILD +++ b/pkg/routes/BUILD @@ -10,13 +10,9 @@ go_library( srcs = [ "doc.go", "logs.go", - "ui.go", ], importpath = "k8s.io/kubernetes/pkg/routes", - deps = [ - "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server/mux:go_default_library", - ], + deps = ["//vendor/github.com/emicklei/go-restful:go_default_library"], ) filegroup( diff --git a/pkg/routes/ui.go b/pkg/routes/ui.go deleted file mode 100644 index de6ca3c3abd..00000000000 --- a/pkg/routes/ui.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package routes - -import ( - "net/http" - - "k8s.io/apiserver/pkg/server/mux" -) - -const dashboardPath = "/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/" - -// UIRedirect redirects /ui to the kube-ui proxy path. -type UIRedirect struct{} - -func (r UIRedirect) Install(c *mux.PathRecorderMux) { - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - http.Redirect(w, r, dashboardPath, http.StatusTemporaryRedirect) - }) - c.Handle("/ui", handler) - c.HandlePrefix("/ui/", handler) -} diff --git a/test/e2e/ui/dashboard.go b/test/e2e/ui/dashboard.go index 2bc9415f093..7077f7e0148 100644 --- a/test/e2e/ui/dashboard.go +++ b/test/e2e/ui/dashboard.go @@ -86,16 +86,5 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() { return status == http.StatusOK, nil }) Expect(err).NotTo(HaveOccurred()) - - By("Checking that the ApiServer /ui endpoint redirects to a valid server.") - var status int - err = f.ClientSet.CoreV1().RESTClient().Get(). - AbsPath(uiRedirect). - Timeout(framework.SingleCallTimeout). - Do(). - StatusCode(&status). - Error() - Expect(err).NotTo(HaveOccurred()) - Expect(status).To(Equal(http.StatusOK), "Unexpected status from /ui") }) }) diff --git a/test/integration/openshift/openshift_test.go b/test/integration/openshift/openshift_test.go index 40b62455b72..fa42915ae75 100644 --- a/test/integration/openshift/openshift_test.go +++ b/test/integration/openshift/openshift_test.go @@ -32,7 +32,6 @@ func TestMasterExportsSymbols(t *testing.T) { }, ExtraConfig: master.ExtraConfig{ EnableCoreControllers: false, - EnableUISupport: false, EnableLogsSupport: false, }, } From a4e00ff3d8b704871e83bbd0c15dbb310810d06f Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Fri, 10 Nov 2017 10:34:14 +0000 Subject: [PATCH 31/53] kubeadm: add configuration option to not taint master Although tainting the master is normally a good and proper thing to do in some situations (docker for mac in our case, but I suppose minikube and such as well) having a single host configuration is desirable. In linuxkit we have a [workaround](https://github.com/linuxkit/linuxkit/blob/443e47c408cad0f1b29a457700d15b2c85ec407f/projects/kubernetes/kubernetes/kubeadm-init.sh#L19...L22) to remove the taint after initialisation. With the change here we could simply populate /etc/kubeadm/kubeadm.yaml` with `noTaintMaster: true` instead and have it never be tainted in the first place. I have only added this to the config file and not to the CLI since AIUI the latter is somewhat deprecated. The code also arranges to _remove_ an existing taint if it is unwanted. I'm unsure if this behaviour is correct or desirable, I think a reasonable argument could be made for leaving an existing taint in place too. Signed-off-by: Ian Campbell --- cmd/kubeadm/app/apis/kubeadm/types.go | 4 +++ .../app/apis/kubeadm/v1alpha1/types.go | 4 +++ .../v1alpha1/zz_generated.conversion.go | 2 ++ cmd/kubeadm/app/cmd/init.go | 2 +- cmd/kubeadm/app/cmd/phases/markmaster.go | 2 +- .../app/phases/markmaster/markmaster.go | 35 +++++++++++++++---- .../app/phases/markmaster/markmaster_test.go | 21 ++++++++++- 7 files changed, 61 insertions(+), 9 deletions(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/types.go b/cmd/kubeadm/app/apis/kubeadm/types.go index 7373a2ec0c5..2494fc2e20a 100644 --- a/cmd/kubeadm/app/apis/kubeadm/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/types.go @@ -50,6 +50,10 @@ type MasterConfiguration struct { // If not specified, defaults to Node and RBAC, meaning both the node // authorizer and RBAC are enabled. AuthorizationModes []string + // NoTaintMaster will, if set, suppress the tainting of the + // master node allowing workloads to be run on it (e.g. in + // single node configurations). + NoTaintMaster bool // Mark the controller and api server pods as privileged as some cloud // controllers like openstack need escalated privileges under some conditions diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go index d1edd0e1e97..8bb8fad4411 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go @@ -50,6 +50,10 @@ type MasterConfiguration struct { // If not specified, defaults to Node and RBAC, meaning both the node // authorizer and RBAC are enabled. AuthorizationModes []string `json:"authorizationModes,omitempty"` + // NoTaintMaster will, if set, suppress the tainting of the + // master node allowing workloads to be run on it (e.g. in + // single node configurations). + NoTaintMaster bool `json:"noTaintMaster,omitempty"` // Mark the controller and api server pods as privileged as some cloud // controllers like openstack need escalated privileges under some conditions diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go index 8afb09456b2..96338f63bd8 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go @@ -202,6 +202,7 @@ func autoConvert_v1alpha1_MasterConfiguration_To_kubeadm_MasterConfiguration(in out.CloudProvider = in.CloudProvider out.NodeName = in.NodeName out.AuthorizationModes = *(*[]string)(unsafe.Pointer(&in.AuthorizationModes)) + out.NoTaintMaster = in.NoTaintMaster out.PrivilegedPods = in.PrivilegedPods out.Token = in.Token out.TokenTTL = (*v1.Duration)(unsafe.Pointer(in.TokenTTL)) @@ -244,6 +245,7 @@ func autoConvert_kubeadm_MasterConfiguration_To_v1alpha1_MasterConfiguration(in out.CloudProvider = in.CloudProvider out.NodeName = in.NodeName out.AuthorizationModes = *(*[]string)(unsafe.Pointer(&in.AuthorizationModes)) + out.NoTaintMaster = in.NoTaintMaster out.PrivilegedPods = in.PrivilegedPods out.Token = in.Token out.TokenTTL = (*v1.Duration)(unsafe.Pointer(in.TokenTTL)) diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index 658d3237e57..1bfc9de0539 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -388,7 +388,7 @@ func (i *Init) Run(out io.Writer) error { } // PHASE 4: Mark the master with the right label/taint - if err := markmasterphase.MarkMaster(client, i.cfg.NodeName); err != nil { + if err := markmasterphase.MarkMaster(client, i.cfg.NodeName, !i.cfg.NoTaintMaster); err != nil { return fmt.Errorf("error marking master: %v", err) } diff --git a/cmd/kubeadm/app/cmd/phases/markmaster.go b/cmd/kubeadm/app/cmd/phases/markmaster.go index 3b9f4ad4388..7457834f293 100644 --- a/cmd/kubeadm/app/cmd/phases/markmaster.go +++ b/cmd/kubeadm/app/cmd/phases/markmaster.go @@ -75,7 +75,7 @@ func NewCmdMarkMaster() *cobra.Command { client, err := kubeconfigutil.ClientSetFromFile(kubeConfigFile) kubeadmutil.CheckErr(err) - err = markmasterphase.MarkMaster(client, internalcfg.NodeName) + err = markmasterphase.MarkMaster(client, internalcfg.NodeName, !internalcfg.NoTaintMaster) kubeadmutil.CheckErr(err) }, } diff --git a/cmd/kubeadm/app/phases/markmaster/markmaster.go b/cmd/kubeadm/app/phases/markmaster/markmaster.go index 9917f08a92b..d3485470c54 100644 --- a/cmd/kubeadm/app/phases/markmaster/markmaster.go +++ b/cmd/kubeadm/app/phases/markmaster/markmaster.go @@ -32,9 +32,13 @@ import ( ) // MarkMaster taints the master and sets the master label -func MarkMaster(client clientset.Interface, masterName string) error { +func MarkMaster(client clientset.Interface, masterName string, taint bool) error { - fmt.Printf("[markmaster] Will mark node %s as master by adding a label and a taint\n", masterName) + if taint { + fmt.Printf("[markmaster] Will mark node %s as master by adding a label and a taint\n", masterName) + } else { + fmt.Printf("[markmaster] Will mark node %s as master by adding a label\n", masterName) + } // Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned. return wait.Poll(kubeadmconstants.APICallRetryInterval, kubeadmconstants.MarkMasterTimeout, func() (bool, error) { @@ -56,7 +60,7 @@ func MarkMaster(client clientset.Interface, masterName string) error { } // The master node should be tainted and labelled accordingly - markMasterNode(n) + markMasterNode(n, taint) newData, err := json.Marshal(n) if err != nil { @@ -76,15 +80,23 @@ func MarkMaster(client clientset.Interface, masterName string) error { return false, err } - fmt.Printf("[markmaster] Master %s tainted and labelled with key/value: %s=%q\n", masterName, kubeadmconstants.LabelNodeRoleMaster, "") + if taint { + fmt.Printf("[markmaster] Master %s tainted and labelled with key/value: %s=%q\n", masterName, kubeadmconstants.LabelNodeRoleMaster, "") + } else { + fmt.Printf("[markmaster] Master %s labelled with key/value: %s=%q\n", masterName, kubeadmconstants.LabelNodeRoleMaster, "") + } return true, nil }) } -func markMasterNode(n *v1.Node) { +func markMasterNode(n *v1.Node, taint bool) { n.ObjectMeta.Labels[kubeadmconstants.LabelNodeRoleMaster] = "" - addTaintIfNotExists(n, kubeadmconstants.MasterTaint) + if taint { + addTaintIfNotExists(n, kubeadmconstants.MasterTaint) + } else { + delTaintIfExists(n, kubeadmconstants.MasterTaint) + } } func addTaintIfNotExists(n *v1.Node, t v1.Taint) { @@ -96,3 +108,14 @@ func addTaintIfNotExists(n *v1.Node, t v1.Taint) { n.Spec.Taints = append(n.Spec.Taints, t) } + +func delTaintIfExists(n *v1.Node, t v1.Taint) { + var taints []v1.Taint + for _, taint := range n.Spec.Taints { + if taint == t { + continue + } + taints = append(taints, t) + } + n.Spec.Taints = taints +} diff --git a/cmd/kubeadm/app/phases/markmaster/markmaster_test.go b/cmd/kubeadm/app/phases/markmaster/markmaster_test.go index 71380d7df76..d7d5a934501 100644 --- a/cmd/kubeadm/app/phases/markmaster/markmaster_test.go +++ b/cmd/kubeadm/app/phases/markmaster/markmaster_test.go @@ -43,32 +43,51 @@ func TestMarkMaster(t *testing.T) { name string existingLabel string existingTaint *v1.Taint + wantTaint bool expectedPatch string }{ { "master label and taint missing", "", nil, + true, "{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}},\"spec\":{\"taints\":[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"}]}}", }, + { + "master label and taint missing but taint not wanted", + "", + nil, + false, + "{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}}}", + }, { "master label missing", "", &kubeadmconstants.MasterTaint, + true, "{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}}}", }, { "master taint missing", kubeadmconstants.LabelNodeRoleMaster, nil, + true, "{\"spec\":{\"taints\":[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"}]}}", }, { "nothing missing", kubeadmconstants.LabelNodeRoleMaster, &kubeadmconstants.MasterTaint, + true, "{}", }, + { + "nothing missing but taint unwanted", + kubeadmconstants.LabelNodeRoleMaster, + &kubeadmconstants.MasterTaint, + false, + "{\"spec\":{\"taints\":null}}", + }, } for _, tc := range tests { @@ -125,7 +144,7 @@ func TestMarkMaster(t *testing.T) { t.Fatalf("MarkMaster(%s): unexpected error building clientset: %v", tc.name, err) } - err = MarkMaster(cs, hostname) + err = MarkMaster(cs, hostname, tc.wantTaint) if err != nil { t.Errorf("MarkMaster(%s) returned unexpected error: %v", tc.name, err) } From df5fc0941185167d4f86472491381de1cb4c4849 Mon Sep 17 00:00:00 2001 From: "Bobby (Babak) Salamat" Date: Mon, 12 Feb 2018 10:10:20 -0800 Subject: [PATCH 32/53] compare Pods by UID, not by name and namespace --- pkg/scheduler/core/scheduling_queue.go | 6 +++--- pkg/scheduler/core/scheduling_queue_test.go | 4 ++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/pkg/scheduler/core/scheduling_queue.go b/pkg/scheduler/core/scheduling_queue.go index 21bbaf23af6..330aaa12c60 100644 --- a/pkg/scheduler/core/scheduling_queue.go +++ b/pkg/scheduler/core/scheduling_queue.go @@ -202,8 +202,8 @@ func NewPriorityQueue() *PriorityQueue { func (p *PriorityQueue) addNominatedPodIfNeeded(pod *v1.Pod) { nnn := NominatedNodeName(pod) if len(nnn) > 0 { - for _, p := range p.nominatedPods[nnn] { - if p.Name == pod.Name && p.Namespace == pod.Namespace { + for _, np := range p.nominatedPods[nnn] { + if np.UID == pod.UID { glog.Errorf("Pod %v/%v already exists in the nominated map!", pod.Namespace, pod.Name) return } @@ -217,7 +217,7 @@ func (p *PriorityQueue) deleteNominatedPodIfExists(pod *v1.Pod) { nnn := NominatedNodeName(pod) if len(nnn) > 0 { for i, np := range p.nominatedPods[nnn] { - if np.Name == pod.Name && np.Namespace == pod.Namespace { + if np.UID == pod.UID { p.nominatedPods[nnn] = append(p.nominatedPods[nnn][:i], p.nominatedPods[nnn][i+1:]...) if len(p.nominatedPods[nnn]) == 0 { delete(p.nominatedPods, nnn) diff --git a/pkg/scheduler/core/scheduling_queue_test.go b/pkg/scheduler/core/scheduling_queue_test.go index bae5a606eac..05c293fde03 100644 --- a/pkg/scheduler/core/scheduling_queue_test.go +++ b/pkg/scheduler/core/scheduling_queue_test.go @@ -31,6 +31,7 @@ var highPriorityPod, highPriNominatedPod, medPriorityPod, unschedulablePod = v1. ObjectMeta: metav1.ObjectMeta{ Name: "hpp", Namespace: "ns1", + UID: "hppns1", }, Spec: v1.PodSpec{ Priority: &highPriority, @@ -40,6 +41,7 @@ var highPriorityPod, highPriNominatedPod, medPriorityPod, unschedulablePod = v1. ObjectMeta: metav1.ObjectMeta{ Name: "hpp", Namespace: "ns1", + UID: "hppns1", }, Spec: v1.PodSpec{ Priority: &highPriority, @@ -52,6 +54,7 @@ var highPriorityPod, highPriNominatedPod, medPriorityPod, unschedulablePod = v1. ObjectMeta: metav1.ObjectMeta{ Name: "mpp", Namespace: "ns2", + UID: "mppns2", Annotations: map[string]string{ "annot2": "val2", }, @@ -67,6 +70,7 @@ var highPriorityPod, highPriNominatedPod, medPriorityPod, unschedulablePod = v1. ObjectMeta: metav1.ObjectMeta{ Name: "up", Namespace: "ns1", + UID: "upns1", Annotations: map[string]string{ "annot2": "val2", }, From 9ab9ddeb19ca6db1cad11904894a41c87ce76a1b Mon Sep 17 00:00:00 2001 From: Seth Jennings Date: Mon, 12 Feb 2018 14:24:07 -0600 Subject: [PATCH 33/53] kubelet: check for illegal phase transition --- pkg/kubelet/kubelet_pods.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 5fa3dfd535a..506109ede07 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -1359,6 +1359,15 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po spec := &pod.Spec allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...) s.Phase = getPhase(spec, allStatus) + // Check for illegal phase transition + if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded { + // API server shows terminal phase; transitions are not allowed + if s.Phase != pod.Status.Phase { + glog.Errorf("Pod attempted illegal phase transition from %s to %s: %v", pod.Status.Phase, s.Phase, s) + // Force back to phase from the API server + s.Phase = pod.Status.Phase + } + } kl.probeManager.UpdatePodStatus(pod.UID, s) s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase)) s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase)) From 21dbbe14f28038577f4f8bfa96b74c061a72dfd7 Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Fri, 9 Feb 2018 14:51:52 -0800 Subject: [PATCH 34/53] Ignore 0% and 100% eviction thresholds Primarily, this gives a way to explicitly disable eviction, which is necessary to use omitempty on EvictionHard. See: https://github.com/kubernetes/kubernetes/pull/53833#discussion_r166672137 As justification for this approach, neither 0% nor 100% make sense as eviction thresholds; in the "less-than" case, you can't have less than 0% of a resource and 100% perpetually evicts; in the "greater-than" case (assuming we ever add a resource with this semantic), the reasoning is the reverse (not more than 100%, 0% perpetually evicts). --- .../apis/kubeletconfig/v1alpha1/types.go | 4 +-- pkg/kubelet/eviction/helpers.go | 33 ++++++++++++------- pkg/kubelet/eviction/helpers_test.go | 14 ++++++++ test/e2e_node/eviction_test.go | 6 ++-- 4 files changed, 41 insertions(+), 16 deletions(-) diff --git a/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go b/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go index 4b50e51b1f0..596b5f62d8a 100644 --- a/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go +++ b/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go @@ -212,10 +212,10 @@ type KubeletConfiguration struct { SerializeImagePulls *bool `json:"serializeImagePulls"` // Map of signal names to quantities that defines hard eviction thresholds. For example: {"memory.available": "300Mi"}. // +optional - EvictionHard map[string]string `json:"evictionHard"` + EvictionHard map[string]string `json:"evictionHard,omitempty"` // Map of signal names to quantities that defines soft eviction thresholds. For example: {"memory.available": "300Mi"}. // +optional - EvictionSoft map[string]string `json:"evictionSoft"` + EvictionSoft map[string]string `json:"evictionSoft,omitempty"` // Map of signal names to quantities that defines grace periods for each soft eviction signal. For example: {"memory.available": "30s"}. // +optional EvictionSoftGracePeriod map[string]string `json:"evictionSoftGracePeriod"` diff --git a/pkg/kubelet/eviction/helpers.go b/pkg/kubelet/eviction/helpers.go index 0f51de97edc..87c4da77498 100644 --- a/pkg/kubelet/eviction/helpers.go +++ b/pkg/kubelet/eviction/helpers.go @@ -107,7 +107,6 @@ func ParseThresholdConfig(allocatableConfig []string, evictionHard, evictionSoft return nil, err } results = append(results, hardThresholds...) - softThresholds, err := parseThresholdStatements(evictionSoft) if err != nil { return nil, err @@ -151,26 +150,36 @@ func parseThresholdStatements(statements map[string]string) ([]evictionapi.Thres if err != nil { return nil, err } - results = append(results, result) + if result != nil { + results = append(results, *result) + } } return results, nil } -// parseThresholdStatement parses a threshold statement. -func parseThresholdStatement(signal evictionapi.Signal, val string) (evictionapi.Threshold, error) { +// parseThresholdStatement parses a threshold statement and returns a threshold, +// or nil if the threshold should be ignored. +func parseThresholdStatement(signal evictionapi.Signal, val string) (*evictionapi.Threshold, error) { if !validSignal(signal) { - return evictionapi.Threshold{}, fmt.Errorf(unsupportedEvictionSignal, signal) + return nil, fmt.Errorf(unsupportedEvictionSignal, signal) } operator := evictionapi.OpForSignal[signal] if strings.HasSuffix(val, "%") { + // ignore 0% and 100% + if val == "0%" || val == "100%" { + return nil, nil + } percentage, err := parsePercentage(val) if err != nil { - return evictionapi.Threshold{}, err + return nil, err } - if percentage <= 0 { - return evictionapi.Threshold{}, fmt.Errorf("eviction percentage threshold %v must be positive: %s", signal, val) + if percentage < 0 { + return nil, fmt.Errorf("eviction percentage threshold %v must be >= 0%%: %s", signal, val) } - return evictionapi.Threshold{ + if percentage > 100 { + return nil, fmt.Errorf("eviction percentage threshold %v must be <= 100%%: %s", signal, val) + } + return &evictionapi.Threshold{ Signal: signal, Operator: operator, Value: evictionapi.ThresholdValue{ @@ -180,12 +189,12 @@ func parseThresholdStatement(signal evictionapi.Signal, val string) (evictionapi } quantity, err := resource.ParseQuantity(val) if err != nil { - return evictionapi.Threshold{}, err + return nil, err } if quantity.Sign() < 0 || quantity.IsZero() { - return evictionapi.Threshold{}, fmt.Errorf("eviction threshold %v must be positive: %s", signal, &quantity) + return nil, fmt.Errorf("eviction threshold %v must be positive: %s", signal, &quantity) } - return evictionapi.Threshold{ + return &evictionapi.Threshold{ Signal: signal, Operator: operator, Value: evictionapi.ThresholdValue{ diff --git a/pkg/kubelet/eviction/helpers_test.go b/pkg/kubelet/eviction/helpers_test.go index ca78f4583f2..168352ff255 100644 --- a/pkg/kubelet/eviction/helpers_test.go +++ b/pkg/kubelet/eviction/helpers_test.go @@ -288,6 +288,20 @@ func TestParseThresholdConfig(t *testing.T) { }, }, }, + "disable via 0%": { + allocatableConfig: []string{}, + evictionHard: map[string]string{"memory.available": "0%"}, + evictionSoft: map[string]string{"memory.available": "0%"}, + expectErr: false, + expectThresholds: []evictionapi.Threshold{}, + }, + "disable via 100%": { + allocatableConfig: []string{}, + evictionHard: map[string]string{"memory.available": "100%"}, + evictionSoft: map[string]string{"memory.available": "100%"}, + expectErr: false, + expectThresholds: []evictionapi.Threshold{}, + }, "invalid-signal": { allocatableConfig: []string{}, evictionHard: map[string]string{"mem.available": "150Mi"}, diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index 1f24b27d9a8..c2d9c9a67a1 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -170,7 +170,8 @@ var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disrup initialConfig.EvictionMaxPodGracePeriod = 30 initialConfig.EvictionMinimumReclaim = map[string]string{} // Ensure that pods are not evicted because of the eviction-hard threshold - initialConfig.EvictionHard = map[string]string{} + // setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty) + initialConfig.EvictionHard = map[string]string{"memory.available": "0%"} }) runEvictionTest(f, pressureTimeout, expectedNodeCondition, logDiskMetrics, []podEvictSpec{ { @@ -192,7 +193,8 @@ var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Se Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() { tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { initialConfig.FeatureGates[string(features.LocalStorageCapacityIsolation)] = true - initialConfig.EvictionHard = map[string]string{} + // setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty) + initialConfig.EvictionHard = map[string]string{"memory.available": "0%"} }) sizeLimit := resource.MustParse("100Mi") useOverLimit := 101 /* Mb */ From 9450c8a628fc9e8d3e79081c53237810508d8c96 Mon Sep 17 00:00:00 2001 From: Nick Sardo Date: Mon, 12 Feb 2018 14:36:31 -0800 Subject: [PATCH 35/53] Bump GLBC to 0.9.8-alpha.2 and change back to --verbose --- cluster/gce/manifests/glbc.manifest | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cluster/gce/manifests/glbc.manifest b/cluster/gce/manifests/glbc.manifest index 3562620d506..2d515751d8a 100644 --- a/cluster/gce/manifests/glbc.manifest +++ b/cluster/gce/manifests/glbc.manifest @@ -1,19 +1,19 @@ apiVersion: v1 kind: Pod metadata: - name: l7-lb-controller-v0.9.8-alpha.1 + name: l7-lb-controller-v0.9.8-alpha.2 namespace: kube-system annotations: scheduler.alpha.kubernetes.io/critical-pod: '' labels: k8s-app: gcp-lb-controller - version: v0.9.8-alpha.1 + version: v0.9.8-alpha.2 kubernetes.io/name: "GLBC" spec: terminationGracePeriodSeconds: 600 hostNetwork: true containers: - - image: k8s.gcr.io/ingress-gce-glbc-amd64:0.9.8-alpha.1 + - image: k8s.gcr.io/ingress-gce-glbc-amd64:0.9.8-alpha.2 livenessProbe: httpGet: path: /healthz @@ -44,7 +44,7 @@ spec: # TODO: split this out into args when we no longer need to pipe stdout to a file #6428 - sh - -c - - 'exec /glbc -v=3 --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1' + - 'exec /glbc --verbose --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1' volumes: - hostPath: path: /etc/gce.conf From 52593616c48472b9ec0071d680f47b52406bfa26 Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Mon, 12 Feb 2018 14:01:11 -0800 Subject: [PATCH 36/53] bazel: update digest for debian-iptables-amd64 --- build/root/WORKSPACE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/root/WORKSPACE b/build/root/WORKSPACE index 88f35e1eaf7..47f347e25cf 100644 --- a/build/root/WORKSPACE +++ b/build/root/WORKSPACE @@ -59,7 +59,7 @@ http_file( docker_pull( name = "debian-iptables-amd64", - digest = "sha256:a3b936c0fb98a934eecd2cfb91f73658d402b29116084e778ce9ddb68e55383e", + digest = "sha256:fb18678f8203ca1bd2fad2671e3ebd80cb408a1baae423d4ad39c05f4caac4e1", registry = "k8s.gcr.io", repository = "debian-iptables-amd64", tag = "v10", # ignored, but kept here for documentation From 746e247e8750acb5f635132c74e0e28f2ac9dc73 Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Thu, 1 Feb 2018 16:55:58 -0800 Subject: [PATCH 37/53] Add etcd 3.x minor version rollback support to migrate-if-needed.sh --- cluster/gce/manifests/etcd.manifest | 3 + cluster/images/etcd/Dockerfile | 2 +- cluster/images/etcd/Makefile | 134 +++++++++++++++++- cluster/images/etcd/README.md | 8 ++ cluster/images/etcd/migrate-if-needed.sh | 173 ++++++++++++----------- cluster/images/etcd/start-stop-etcd.sh | 68 +++++++++ 6 files changed, 302 insertions(+), 86 deletions(-) create mode 100755 cluster/images/etcd/start-stop-etcd.sh diff --git a/cluster/gce/manifests/etcd.manifest b/cluster/gce/manifests/etcd.manifest index 8fbc0afcc11..0983145ca19 100644 --- a/cluster/gce/manifests/etcd.manifest +++ b/cluster/gce/manifests/etcd.manifest @@ -33,6 +33,9 @@ }, { "name": "DATA_DIRECTORY", "value": "/var/etcd/data{{ suffix }}" + }, + { "name": "INITIAL_CLUSTER", + "value": "{{ etcd_cluster }}" } ], "livenessProbe": { diff --git a/cluster/images/etcd/Dockerfile b/cluster/images/etcd/Dockerfile index 1d75d1787f4..5e7eaf9f51b 100644 --- a/cluster/images/etcd/Dockerfile +++ b/cluster/images/etcd/Dockerfile @@ -16,4 +16,4 @@ FROM BASEIMAGE EXPOSE 2379 2380 4001 7001 COPY etcd* etcdctl* /usr/local/bin/ -COPY migrate-if-needed.sh attachlease rollback /usr/local/bin/ +COPY migrate-if-needed.sh start-stop-etcd.sh attachlease rollback /usr/local/bin/ diff --git a/cluster/images/etcd/Makefile b/cluster/images/etcd/Makefile index 401811cad10..f0477d4555e 100644 --- a/cluster/images/etcd/Makefile +++ b/cluster/images/etcd/Makefile @@ -28,6 +28,8 @@ # That binary will be set to the last tag from $(TAGS). TAGS?=2.2.1 2.3.7 3.0.17 3.1.11 3.2.14 REGISTRY_TAG?=3.2.14 +# ROLLBACK_REGISTRY_TAG specified the tag that REGISTRY_TAG may be rolled back to. +ROLLBACK_REGISTRY_TAG?=3.1.11 ARCH?=amd64 REGISTRY?=k8s.gcr.io # golang version should match the golang version from https://github.com/coreos/etcd/releases for REGISTRY_TAG version of etcd. @@ -57,10 +59,10 @@ build: find ./ -maxdepth 1 -type f | xargs -I {} cp {} $(TEMP_DIR) # Compile attachlease - docker run -i -v $(shell pwd)/../../../:/go/src/k8s.io/kubernetes -v $(TEMP_DIR):/build -e GOARCH=$(ARCH) golang:$(GOLANG_VERSION) \ + docker run --interactive -v $(shell pwd)/../../../:/go/src/k8s.io/kubernetes -v $(TEMP_DIR):/build -e GOARCH=$(ARCH) golang:$(GOLANG_VERSION) \ /bin/bash -c "CGO_ENABLED=0 go build -o /build/attachlease k8s.io/kubernetes/cluster/images/etcd/attachlease" # Compile rollback - docker run -i -v $(shell pwd)/../../../:/go/src/k8s.io/kubernetes -v $(TEMP_DIR):/build -e GOARCH=$(ARCH) golang:$(GOLANG_VERSION) \ + docker run --interactive -v $(shell pwd)/../../../:/go/src/k8s.io/kubernetes -v $(TEMP_DIR):/build -e GOARCH=$(ARCH) golang:$(GOLANG_VERSION) \ /bin/bash -c "CGO_ENABLED=0 go build -o /build/rollback k8s.io/kubernetes/cluster/images/etcd/rollback" @@ -81,7 +83,7 @@ else # For each release create a tmp dir 'etcd_release_tmp_dir' and unpack the release tar there. for tag in $(TAGS); do \ etcd_release_tmp_dir=$(shell mktemp -d); \ - docker run -i -v $$etcd_release_tmp_dir:/etcdbin golang:$(GOLANG_VERSION) /bin/bash -c \ + docker run --interactive -v $$etcd_release_tmp_dir:/etcdbin golang:$(GOLANG_VERSION) /bin/bash -c \ "git clone https://github.com/coreos/etcd /go/src/github.com/coreos/etcd \ && cd /go/src/github.com/coreos/etcd \ && git checkout v$$tag \ @@ -114,5 +116,127 @@ ifeq ($(ARCH),amd64) gcloud docker -- push $(REGISTRY)/etcd:$(REGISTRY_TAG) endif -all: build -.PHONY: build push +ETCD2_ROLLBACK_NEW_TAG=3.0.17 +ETCD2_ROLLBACK_OLD_TAG=2.2.1 + +# Test a rollback to etcd2 from the earliest etcd3 version. +test-rollback-etcd2: + mkdir -p $(TEMP_DIR)/rollback-etcd2 + cd $(TEMP_DIR)/rollback-etcd2 + + @echo "Starting $(ETCD2_ROLLBACK_NEW_TAG) etcd and writing some sample data." + docker run --tty --interactive -v $(TEMP_DIR)/rollback-etcd2:/var/etcd \ + -e "TARGET_STORAGE=etcd3" \ + -e "TARGET_VERSION=$(ETCD2_ROLLBACK_NEW_TAG)" \ + -e "DATA_DIRECTORY=/var/etcd/data" \ + gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \ + 'INITIAL_CLUSTER=etcd-$$(hostname)=http://localhost:2380 \ + /usr/local/bin/migrate-if-needed.sh && \ + source /usr/local/bin/start-stop-etcd.sh && \ + START_STORAGE=etcd3 START_VERSION=$(ETCD2_ROLLBACK_NEW_TAG) start_etcd && \ + ETCDCTL_API=3 /usr/local/bin/etcdctl-$(ETCD2_ROLLBACK_NEW_TAG) --endpoints http://127.0.0.1:$${ETCD_PORT} put /registry/k1 value1 && \ + stop_etcd && \ + [ $$(cat /var/etcd/data/version.txt) = $(ETCD2_ROLLBACK_NEW_TAG)/etcd3 ]' + + @echo "Rolling back to the previous version of etcd and recording keyspace to a flat file." + docker run --tty --interactive -v $(TEMP_DIR)/rollback-etcd2:/var/etcd \ + -e "TARGET_STORAGE=etcd2" \ + -e "TARGET_VERSION=$(ETCD2_ROLLBACK_OLD_TAG)" \ + -e "DATA_DIRECTORY=/var/etcd/data" \ + gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \ + 'INITIAL_CLUSTER=etcd-$$(hostname)=http://localhost:2380 \ + /usr/local/bin/migrate-if-needed.sh && \ + source /usr/local/bin/start-stop-etcd.sh && \ + START_STORAGE=etcd2 START_VERSION=$(ETCD2_ROLLBACK_OLD_TAG) start_etcd && \ + /usr/local/bin/etcdctl-$(ETCD2_ROLLBACK_OLD_TAG) --endpoint 127.0.0.1:$${ETCD_PORT} get /registry/k1 > /var/etcd/keyspace.txt && \ + stop_etcd' + + @echo "Checking if rollback successfully downgraded etcd to $(ETCD2_ROLLBACK_OLD_TAG)" + docker run --tty --interactive -v $(TEMP_DIR)/rollback-etcd2:/var/etcd \ + gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \ + '[ $$(cat /var/etcd/data/version.txt) = $(ETCD2_ROLLBACK_OLD_TAG)/etcd2 ] && \ + grep -q value1 /var/etcd/keyspace.txt' + +# Test a rollback from the latest version to the previous version. +test-rollback: + mkdir -p $(TEMP_DIR)/rollback-test + cd $(TEMP_DIR)/rollback-test + + @echo "Starting $(REGISTRY_TAG) etcd and writing some sample data." + docker run --tty --interactive -v $(TEMP_DIR)/rollback-test:/var/etcd \ + -e "TARGET_STORAGE=etcd3" \ + -e "TARGET_VERSION=$(REGISTRY_TAG)" \ + -e "DATA_DIRECTORY=/var/etcd/data" \ + gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \ + 'INITIAL_CLUSTER=etcd-$$(hostname)=http://localhost:2380 \ + /usr/local/bin/migrate-if-needed.sh && \ + source /usr/local/bin/start-stop-etcd.sh && \ + START_STORAGE=etcd3 START_VERSION=$(REGISTRY_TAG) start_etcd && \ + ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints http://127.0.0.1:$${ETCD_PORT} put /registry/k1 value1 && \ + stop_etcd' + + @echo "Rolling back to the previous version of etcd and recording keyspace to a flat file." + docker run --tty --interactive -v $(TEMP_DIR)/rollback-test:/var/etcd \ + -e "TARGET_STORAGE=etcd3" \ + -e "TARGET_VERSION=$(ROLLBACK_REGISTRY_TAG)" \ + -e "DATA_DIRECTORY=/var/etcd/data" \ + gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \ + 'INITIAL_CLUSTER=etcd-$$(hostname)=http://localhost:2380 \ + /usr/local/bin/migrate-if-needed.sh && \ + source /usr/local/bin/start-stop-etcd.sh && \ + START_STORAGE=etcd3 START_VERSION=$(ROLLBACK_REGISTRY_TAG) start_etcd && \ + ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints http://127.0.0.1:$${ETCD_PORT} get --prefix / > /var/etcd/keyspace.txt && \ + stop_etcd' + + @echo "Checking if rollback successfully downgraded etcd to $(ROLLBACK_REGISTRY_TAG)" + docker run --tty --interactive -v $(TEMP_DIR)/rollback-test:/var/etcd \ + gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \ + '[ $$(cat /var/etcd/data/version.txt) = $(ROLLBACK_REGISTRY_TAG)/etcd3 ] && \ + grep -q value1 /var/etcd/keyspace.txt' + +# Test migrating from each supported versions to the latest version. +test-migrate: + for tag in $(TAGS); do \ + echo "Testing migration from $${tag} to $(REGISTRY_TAG)" && \ + mkdir -p $(TEMP_DIR)/migrate-$${tag} && \ + cd $(TEMP_DIR)/migrate-$${tag} && \ + MAJOR_VERSION=$$(echo $${tag} | cut -c 1) && \ + echo "Starting etcd $${tag} and writing sample data to keyspace" && \ + docker run --tty --interactive -v $(TEMP_DIR)/migrate-$${tag}:/var/etcd \ + -e "TARGET_STORAGE=etcd$${MAJOR_VERSION}" \ + -e "TARGET_VERSION=$${tag}" \ + -e "DATA_DIRECTORY=/var/etcd/data" \ + gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \ + "INITIAL_CLUSTER=etcd-\$$(hostname)=http://localhost:2380 \ + /usr/local/bin/migrate-if-needed.sh && \ + source /usr/local/bin/start-stop-etcd.sh && \ + START_STORAGE=etcd$${MAJOR_VERSION} START_VERSION=$${tag} start_etcd && \ + if [ $${MAJOR_VERSION} == 2 ]; then \ + /usr/local/bin/etcdctl --endpoint http://127.0.0.1:\$${ETCD_PORT} set /registry/k1 value1; \ + else \ + ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints http://127.0.0.1:\$${ETCD_PORT} put /registry/k1 value1; \ + fi && \ + stop_etcd" && \ + echo " Migrating from $${tag} to $(REGISTRY_TAG) and capturing keyspace" && \ + docker run --tty --interactive -v $(TEMP_DIR)/migrate-$${tag}:/var/etcd \ + -e "TARGET_STORAGE=etcd3" \ + -e "TARGET_VERSION=$(REGISTRY_TAG)" \ + -e "DATA_DIRECTORY=/var/etcd/data" \ + gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \ + 'INITIAL_CLUSTER=etcd-$$(hostname)=http://localhost:2380 \ + /usr/local/bin/migrate-if-needed.sh && \ + source /usr/local/bin/start-stop-etcd.sh && \ + START_STORAGE=etcd3 START_VERSION=$(REGISTRY_TAG) start_etcd && \ + ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints http://127.0.0.1:$${ETCD_PORT} get --prefix / > /var/etcd/keyspace.txt && \ + stop_etcd' && \ + echo "Checking if migrate from $${tag} successfully upgraded etcd to $(REGISTRY_TAG)" && \ + docker run --tty --interactive -v $(TEMP_DIR)/migrate-$${tag}:/var/etcd \ + gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \ + '[ $$(cat /var/etcd/data/version.txt) = $(REGISTRY_TAG)/etcd3 ] && \ + grep -q value1 /var/etcd/keyspace.txt'; \ + done + +test: test-rollback test-rollback-etcd2 test-migrate + +all: build test +.PHONY: build push test-rollback test-rollback-etcd2 test-migrate test diff --git a/cluster/images/etcd/README.md b/cluster/images/etcd/README.md index df5cfe5b3d7..8e9c9cb5de4 100644 --- a/cluster/images/etcd/README.md +++ b/cluster/images/etcd/README.md @@ -7,6 +7,14 @@ For other architectures, `etcd` is cross-compiled from source. Arch-specific `bu #### How to release +First, run the migration and rollback tests. + +```console +$ make build test +``` + +Next, build and push the docker images for all supported architectures. + ```console # Build for linux/amd64 (default) $ make push ARCH=amd64 diff --git a/cluster/images/etcd/migrate-if-needed.sh b/cluster/images/etcd/migrate-if-needed.sh index 9e65f926d8c..5ef718fae42 100755 --- a/cluster/images/etcd/migrate-if-needed.sh +++ b/cluster/images/etcd/migrate-if-needed.sh @@ -18,7 +18,7 @@ # This script performs etcd upgrade based on the following environmental # variables: # TARGET_STORAGE - API of etcd to be used (supported: 'etcd2', 'etcd3') -# TARGET_VERSION - etcd release to be used (supported: '2.2.1', '2.3.7', '3.0.17') +# TARGET_VERSION - etcd release to be used (supported: '2.2.1', '2.3.7', '3.0.17', '3.1.11', '3.2.14') # DATA_DIRECTORY - directory with etcd data # # The current etcd version and storage format is detected based on the @@ -28,7 +28,8 @@ # The update workflow support the following upgrade steps: # - 2.2.1/etcd2 -> 2.3.7/etcd2 # - 2.3.7/etcd2 -> 3.0.17/etcd2 -# - 3.0.17/etcd2 -> 3.0.17/etcd3 +# - 3.0.17/etcd3 -> 3.1.11/etcd3 +# - 3.1.11/etcd3 -> 3.2.14/etcd3 # # NOTE: The releases supported in this script has to match release binaries # present in the etcd image (to make this script work correctly). @@ -39,6 +40,72 @@ set -o errexit set -o nounset +source $(dirname "$0")/start-stop-etcd.sh + +# Rollback to previous minor version of etcd 3.x, if needed. +# +# Warning: For HA etcd clusters (any cluster with more than one member), all members must be stopped before rolling back, zero +# downtime rollbacks are not supported. +rollback_etcd3_minor_version() { + if [ ${TARGET_MINOR_VERSION} != $((${CURRENT_MINOR_VERSION}-1)) ]; then + echo "Rollback from ${CURRENT_VERSION} to ${TARGET_VERSION} not supported, only rollbacks to the previous minor version are supported." + exit 1 + fi + echo "Performing etcd ${CURRENT_VERSION} -> ${TARGET_VERSION} rollback" + ROLLBACK_BACKUP_DIR="${DATA_DIRECTORY}.bak" + rm -rf "${ROLLBACK_BACKUP_DIR}" + SNAPSHOT_FILE="${DATA_DIRECTORY}.snapshot.db" + rm -rf "${SNAPSHOT_FILE}" + ETCD_CMD="/usr/local/bin/etcd-${CURRENT_VERSION}" + ETCDCTL_CMD="/usr/local/bin/etcdctl-${CURRENT_VERSION}" + + # Start CURRENT_VERSION of etcd. + START_VERSION="${CURRENT_VERSION}" + START_STORAGE="${CURRENT_STORAGE}" + echo "Starting etcd version ${START_VERSION} to capture rollback snapshot." + if ! start_etcd; then + echo "Unable to automatically downgrade etcd: starting etcd version ${START_VERSION} to capture rollback snapshot failed." + echo "See https://coreos.com/etcd/docs/3.2.13/op-guide/recovery.html for manual downgrade options." + exit 1 + else + ETCDCTL_API=3 ${ETCDCTL_CMD} snapshot --endpoints "http://127.0.0.1:${ETCD_PORT}" save "${SNAPSHOT_FILE}" + fi + stop_etcd + + # Backup the data before rolling back. + mv "${DATA_DIRECTORY}" "${ROLLBACK_BACKUP_DIR}" + ETCDCTL_CMD="/usr/local/bin/etcdctl-${TARGET_VERSION}" + NAME="etcd-$(hostname)" + ETCDCTL_API=3 ${ETCDCTL_CMD} snapshot restore "${SNAPSHOT_FILE}" \ + --data-dir "${DATA_DIRECTORY}" --name "${NAME}" --initial-cluster "${INITIAL_CLUSTER}" + + CURRENT_VERSION="${TARGET_VERSION}" + echo "${CURRENT_VERSION}/${CURRENT_STORAGE}" > "${DATA_DIRECTORY}/${VERSION_FILE}" +} + +# Rollback from "3.0.x" version in 'etcd3' mode to "2.2.1" version in 'etcd2' mode, if needed. +rollback_to_etcd2() { + if [ "$(echo ${CURRENT_VERSION} | cut -c1-4)" != "3.0." -o "${TARGET_VERSION}" != "2.2.1" ]; then + echo "etcd3 -> etcd2 downgrade is supported only between 3.0.x and 2.2.1" + return 0 + fi + echo "Backup and remove all existing v2 data" + ROLLBACK_BACKUP_DIR="${DATA_DIRECTORY}.bak" + rm -rf "${ROLLBACK_BACKUP_DIR}" + mkdir -p "${ROLLBACK_BACKUP_DIR}" + cp -r "${DATA_DIRECTORY}" "${ROLLBACK_BACKUP_DIR}" + echo "Performing etcd3 -> etcd2 rollback" + ${ROLLBACK} --data-dir "${DATA_DIRECTORY}" + if [ "$?" -ne "0" ]; then + echo "Rollback to etcd2 failed" + exit 1 + fi + CURRENT_STORAGE="etcd2" + CURRENT_VERSION="2.2.1" + echo "${CURRENT_VERSION}/${CURRENT_STORAGE}" > "${DATA_DIRECTORY}/${VERSION_FILE}" +} + + if [ -z "${TARGET_STORAGE:-}" ]; then echo "TARGET_STORAGE variable unset - unexpected failure" exit 1 @@ -51,6 +118,10 @@ if [ -z "${DATA_DIRECTORY:-}" ]; then echo "DATA_DIRECTORY variable unset - unexpected failure" exit 1 fi +if [ -z "${INITIAL_CLUSTER:-}" ]; then + echo "Warn: INITIAL_CLUSTER variable unset - defaulting to etcd-$(hostname)=http://localhost:2380" + INITIAL_CLUSTER="etcd-$(hostname)=http://localhost:2380" +fi echo "$(date +'%Y-%m-%d %H:%M:%S') Detecting if migration is needed" @@ -68,7 +139,7 @@ fi # NOTE: SUPPORTED_VERSION has to match release binaries present in the # etcd image (to make this script work correctly). # We cannot use array since sh doesn't support it. -SUPPORTED_VERSIONS_STRING="2.2.1 2.3.7 3.0.17" +SUPPORTED_VERSIONS_STRING="2.2.1 2.3.7 3.0.17 3.1.11 3.2.14" SUPPORTED_VERSIONS=$(echo "${SUPPORTED_VERSIONS_STRING}" | tr " " "\n") VERSION_FILE="version.txt" @@ -96,58 +167,6 @@ if [ -z "$(ls -A ${DATA_DIRECTORY})" ]; then exit 0 fi -# Starts 'etcd' version ${START_VERSION} and writes to it: -# 'etcd_version' -> "${START_VERSION}" -# Successful write confirms that etcd is up and running. -# Sets ETCD_PID at the end. -# Returns 0 if etcd was successfully started, non-0 otherwise. -start_etcd() { - # Use random ports, so that apiserver cannot connect to etcd. - ETCD_PORT=18629 - ETCD_PEER_PORT=2380 - # Avoid collisions between etcd and event-etcd. - case "${DATA_DIRECTORY}" in - *event*) - ETCD_PORT=18631 - ETCD_PEER_PORT=2381 - ;; - esac - local ETCD_CMD="${ETCD:-/usr/local/bin/etcd-${START_VERSION}}" - local ETCDCTL_CMD="${ETCDCTL:-/usr/local/bin/etcdctl-${START_VERSION}}" - local API_VERSION="$(echo ${START_STORAGE} | cut -c5-5)" - if [ "${API_VERSION}" = "2" ]; then - ETCDCTL_CMD="${ETCDCTL_CMD} --debug --endpoint=http://127.0.0.1:${ETCD_PORT} set" - else - ETCDCTL_CMD="${ETCDCTL_CMD} --endpoints=http://127.0.0.1:${ETCD_PORT} put" - fi - ${ETCD_CMD} \ - --name="etcd-$(hostname)" \ - --debug \ - --data-dir=${DATA_DIRECTORY} \ - --listen-client-urls http://127.0.0.1:${ETCD_PORT} \ - --advertise-client-urls http://127.0.0.1:${ETCD_PORT} \ - --listen-peer-urls http://127.0.0.1:${ETCD_PEER_PORT} \ - --initial-advertise-peer-urls http://127.0.0.1:${ETCD_PEER_PORT} & - ETCD_PID=$! - # Wait until we can write to etcd. - for i in $(seq 240); do - sleep 0.5 - ETCDCTL_API="${API_VERSION}" ${ETCDCTL_CMD} 'etcd_version' ${START_VERSION} - if [ "$?" -eq "0" ]; then - echo "Etcd on port ${ETCD_PORT} is up." - return 0 - fi - done - echo "Timeout while waiting for etcd on port ${ETCD_PORT}" - return 1 -} - -# Stops etcd with ${ETCD_PID} pid. -stop_etcd() { - kill "${ETCD_PID-}" >/dev/null 2>&1 || : - wait "${ETCD_PID-}" >/dev/null 2>&1 || : -} - ATTACHLEASE="${ATTACHLEASE:-/usr/local/bin/attachlease}" ROLLBACK="${ROLLBACK:-/usr/local/bin/rollback}" @@ -163,6 +182,24 @@ if [ "${CURRENT_VERSION}" = "2.2.1" -a "${CURRENT_VERSION}" != "${TARGET_VERSION echo "Backup done in ${BACKUP_DIR}" fi +CURRENT_MINOR_VERSION="$(echo ${CURRENT_VERSION} | awk -F'.' '{print $2}')" +TARGET_MINOR_VERSION="$(echo ${TARGET_VERSION} | awk -F'.' '{print $2}')" + +# "rollback-if-needed" +case "${CURRENT_STORAGE}-${TARGET_STORAGE}" in + "etcd3-etcd3") + [ ${TARGET_MINOR_VERSION} -lt ${CURRENT_MINOR_VERSION} ] && rollback_etcd3_minor_version + break + ;; + "etcd3-etcd2") + rollback_to_etcd2 + break + ;; + *) + break + ;; +esac + # Do the roll-forward migration if needed. # The migration goes as following: # 1. for all versions starting one after the current version of etcd @@ -227,7 +264,7 @@ for step in ${SUPPORTED_VERSIONS}; do echo "Starting etcd ${step} in v3 mode failed" exit 1 fi - ${ETCDCTL_CMD} rm --recursive "${ETCD_DATA_PREFIX}" + ${ETCDCTL_CMD} --endpoints "http://127.0.0.1:${ETCD_PORT}" rm --recursive "${ETCD_DATA_PREFIX}" # Kill etcd and wait until this is down. stop_etcd echo "Successfully remove v2 data" @@ -239,28 +276,4 @@ for step in ${SUPPORTED_VERSIONS}; do fi done -# Do the rollback of needed. -# NOTE: Rollback is only supported from "3.0.x" version in 'etcd3' mode to -# "2.2.1" version in 'etcd2' mode. -if [ "${CURRENT_STORAGE}" = "etcd3" -a "${TARGET_STORAGE}" = "etcd2" ]; then - if [ "$(echo ${CURRENT_VERSION} | cut -c1-4)" != "3.0." -o "${TARGET_VERSION}" != "2.2.1" ]; then - echo "etcd3 -> etcd2 downgrade is supported only between 3.0.x and 2.2.1" - return 0 - fi - echo "Backup and remove all existing v2 data" - ROLLBACK_BACKUP_DIR="${DATA_DIRECTORY}.bak" - rm -rf "${ROLLBACK_BACKUP_DIR}" - mkdir -p "${ROLLBACK_BACKUP_DIR}" - cp -r "${DATA_DIRECTORY}" "${ROLLBACK_BACKUP_DIR}" - echo "Performing etcd3 -> etcd2 rollback" - ${ROLLBACK} --data-dir "${DATA_DIRECTORY}" - if [ "$?" -ne "0" ]; then - echo "Rollback to etcd2 failed" - exit 1 - fi - CURRENT_STORAGE="etcd2" - CURRENT_VERSION="2.2.1" - echo "${CURRENT_VERSION}/${CURRENT_STORAGE}" > "${DATA_DIRECTORY}/${VERSION_FILE}" -fi - echo "$(date +'%Y-%m-%d %H:%M:%S') Migration finished" diff --git a/cluster/images/etcd/start-stop-etcd.sh b/cluster/images/etcd/start-stop-etcd.sh new file mode 100755 index 00000000000..5a8ba8b7f18 --- /dev/null +++ b/cluster/images/etcd/start-stop-etcd.sh @@ -0,0 +1,68 @@ +#!/bin/sh + +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Starts 'etcd' version ${START_VERSION} and writes to it: +# 'etcd_version' -> "${START_VERSION}" +# Successful write confirms that etcd is up and running. +# Sets ETCD_PID at the end. +# Returns 0 if etcd was successfully started, non-0 otherwise. +start_etcd() { + # Use random ports, so that apiserver cannot connect to etcd. + ETCD_PORT=18629 + ETCD_PEER_PORT=2380 + # Avoid collisions between etcd and event-etcd. + case "${DATA_DIRECTORY}" in + *event*) + ETCD_PORT=18631 + ETCD_PEER_PORT=2381 + ;; + esac + local ETCD_CMD="${ETCD:-/usr/local/bin/etcd-${START_VERSION}}" + local ETCDCTL_CMD="${ETCDCTL:-/usr/local/bin/etcdctl-${START_VERSION}}" + local API_VERSION="$(echo ${START_STORAGE} | cut -c5-5)" + if [ "${API_VERSION}" = "2" ]; then + ETCDCTL_CMD="${ETCDCTL_CMD} --debug --endpoint=http://127.0.0.1:${ETCD_PORT} set" + else + ETCDCTL_CMD="${ETCDCTL_CMD} --endpoints=http://127.0.0.1:${ETCD_PORT} put" + fi + ${ETCD_CMD} \ + --name="etcd-$(hostname)" \ + --initial-cluster="etcd-$(hostname)=http://127.0.0.1:${ETCD_PEER_PORT}" \ + --debug \ + --data-dir=${DATA_DIRECTORY} \ + --listen-client-urls http://127.0.0.1:${ETCD_PORT} \ + --advertise-client-urls http://127.0.0.1:${ETCD_PORT} \ + --listen-peer-urls http://127.0.0.1:${ETCD_PEER_PORT} \ + --initial-advertise-peer-urls http://127.0.0.1:${ETCD_PEER_PORT} & + ETCD_PID=$! + # Wait until we can write to etcd. + for i in $(seq 240); do + sleep 0.5 + ETCDCTL_API="${API_VERSION}" ${ETCDCTL_CMD} 'etcd_version' ${START_VERSION} + if [ "$?" -eq "0" ]; then + echo "Etcd on port ${ETCD_PORT} is up." + return 0 + fi + done + echo "Timeout while waiting for etcd on port ${ETCD_PORT}" + return 1 +} + +# Stops etcd with ${ETCD_PID} pid. +stop_etcd() { + kill "${ETCD_PID-}" >/dev/null 2>&1 || : + wait "${ETCD_PID-}" >/dev/null 2>&1 || : +} From 01f16da594d668de56e259dc24c1e50acad6f66d Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Mon, 12 Feb 2018 15:19:37 -0800 Subject: [PATCH 38/53] bazel: update busybox digest to latest (~1.28.0) --- build/root/WORKSPACE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/root/WORKSPACE b/build/root/WORKSPACE index 47f347e25cf..4e9b42caf13 100644 --- a/build/root/WORKSPACE +++ b/build/root/WORKSPACE @@ -75,7 +75,7 @@ docker_pull( docker_pull( name = "official_busybox", - digest = "sha256:be3c11fdba7cfe299214e46edc642e09514dbb9bbefcd0d3836c05a1e0cd0642", + digest = "sha256:4cee1979ba0bf7db9fc5d28fb7b798ca69ae95a47c5fecf46327720df4ff352d", registry = "index.docker.io", repository = "library/busybox", tag = "latest", # ignored, but kept here for documentation From da77826d085814f753af8dc4a24b24f41aded74a Mon Sep 17 00:00:00 2001 From: "Timothy St. Clair" Date: Mon, 12 Feb 2018 18:56:41 -0600 Subject: [PATCH 39/53] Remove myself (timothysc) from OWNERS files on areas that I do not actively maintain. --- pkg/api/v1/OWNERS | 1 - pkg/apis/OWNERS | 1 - pkg/apis/apps/OWNERS | 1 - pkg/apis/authentication/OWNERS | 1 - pkg/apis/authorization/OWNERS | 1 - pkg/apis/autoscaling/OWNERS | 1 - pkg/apis/batch/OWNERS | 1 - pkg/apis/certificates/OWNERS | 1 - pkg/apis/componentconfig/OWNERS | 1 - pkg/apis/core/OWNERS | 1 - pkg/apis/core/v1/OWNERS | 1 - pkg/apis/extensions/OWNERS | 1 - pkg/apis/rbac/OWNERS | 1 - staging/src/k8s.io/api/OWNERS | 1 - staging/src/k8s.io/api/apps/OWNERS | 1 - staging/src/k8s.io/api/authentication/OWNERS | 1 - staging/src/k8s.io/api/authorization/OWNERS | 1 - staging/src/k8s.io/api/autoscaling/OWNERS | 1 - staging/src/k8s.io/api/batch/OWNERS | 1 - staging/src/k8s.io/api/certificates/OWNERS | 1 - staging/src/k8s.io/api/extensions/OWNERS | 1 - staging/src/k8s.io/api/rbac/OWNERS | 1 - staging/src/k8s.io/apimachinery/pkg/api/errors/OWNERS | 1 - staging/src/k8s.io/apimachinery/pkg/api/resource/OWNERS | 1 - staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS | 1 - staging/src/k8s.io/apiserver/OWNERS | 1 - staging/src/k8s.io/apiserver/pkg/endpoints/testing/OWNERS | 1 - staging/src/k8s.io/apiserver/pkg/registry/generic/OWNERS | 1 - staging/src/k8s.io/apiserver/pkg/registry/rest/OWNERS | 1 - 29 files changed, 29 deletions(-) diff --git a/pkg/api/v1/OWNERS b/pkg/api/v1/OWNERS index c627c33f878..ba0d083cedc 100755 --- a/pkg/api/v1/OWNERS +++ b/pkg/api/v1/OWNERS @@ -27,7 +27,6 @@ reviewers: - ncdc - tallclair - eparis -- timothysc - piosz - jsafrane - dims diff --git a/pkg/apis/OWNERS b/pkg/apis/OWNERS index c8cd82a728e..180d60f9ef2 100644 --- a/pkg/apis/OWNERS +++ b/pkg/apis/OWNERS @@ -33,7 +33,6 @@ reviewers: - yifan-gu - eparis - mwielgus -- timothysc - feiskyer - soltysh - piosz diff --git a/pkg/apis/apps/OWNERS b/pkg/apis/apps/OWNERS index e06ff4c481e..2f260571a70 100755 --- a/pkg/apis/apps/OWNERS +++ b/pkg/apis/apps/OWNERS @@ -9,7 +9,6 @@ reviewers: - saad-ali - ncdc - tallclair -- timothysc - dims - errordeveloper - mml diff --git a/pkg/apis/authentication/OWNERS b/pkg/apis/authentication/OWNERS index 95235234107..2bdfd0ce5bc 100755 --- a/pkg/apis/authentication/OWNERS +++ b/pkg/apis/authentication/OWNERS @@ -4,7 +4,6 @@ reviewers: - wojtek-t - deads2k - sttts -- timothysc - mbohlool - jianhuiz - enj diff --git a/pkg/apis/authorization/OWNERS b/pkg/apis/authorization/OWNERS index a68d7eef577..c1613fc2e02 100755 --- a/pkg/apis/authorization/OWNERS +++ b/pkg/apis/authorization/OWNERS @@ -9,7 +9,6 @@ reviewers: - erictune - sttts - ncdc -- timothysc - dims - mml - mbohlool diff --git a/pkg/apis/autoscaling/OWNERS b/pkg/apis/autoscaling/OWNERS index 76dffecebdb..4a495ec2532 100755 --- a/pkg/apis/autoscaling/OWNERS +++ b/pkg/apis/autoscaling/OWNERS @@ -8,7 +8,6 @@ reviewers: - erictune - sttts - ncdc -- timothysc - piosz - dims - errordeveloper diff --git a/pkg/apis/batch/OWNERS b/pkg/apis/batch/OWNERS index 502f9077113..38935ff169a 100755 --- a/pkg/apis/batch/OWNERS +++ b/pkg/apis/batch/OWNERS @@ -9,7 +9,6 @@ reviewers: - sttts - saad-ali - ncdc -- timothysc - soltysh - dims - errordeveloper diff --git a/pkg/apis/certificates/OWNERS b/pkg/apis/certificates/OWNERS index 6066d2c1218..1d1ab36e756 100755 --- a/pkg/apis/certificates/OWNERS +++ b/pkg/apis/certificates/OWNERS @@ -6,7 +6,6 @@ reviewers: - caesarxuchao - liggitt - sttts -- timothysc - dims - errordeveloper - mbohlool diff --git a/pkg/apis/componentconfig/OWNERS b/pkg/apis/componentconfig/OWNERS index 16e6aa37c33..a1f5f1d9a40 100755 --- a/pkg/apis/componentconfig/OWNERS +++ b/pkg/apis/componentconfig/OWNERS @@ -21,7 +21,6 @@ reviewers: - ncdc - yifan-gu - mwielgus -- timothysc - feiskyer - dims - errordeveloper diff --git a/pkg/apis/core/OWNERS b/pkg/apis/core/OWNERS index 0605b27b2aa..17bebcb369c 100644 --- a/pkg/apis/core/OWNERS +++ b/pkg/apis/core/OWNERS @@ -37,7 +37,6 @@ reviewers: - yifan-gu - eparis - mwielgus -- timothysc - soltysh - piosz - jsafrane diff --git a/pkg/apis/core/v1/OWNERS b/pkg/apis/core/v1/OWNERS index c627c33f878..ba0d083cedc 100755 --- a/pkg/apis/core/v1/OWNERS +++ b/pkg/apis/core/v1/OWNERS @@ -27,7 +27,6 @@ reviewers: - ncdc - tallclair - eparis -- timothysc - piosz - jsafrane - dims diff --git a/pkg/apis/extensions/OWNERS b/pkg/apis/extensions/OWNERS index 82c173c9827..cfac471108c 100755 --- a/pkg/apis/extensions/OWNERS +++ b/pkg/apis/extensions/OWNERS @@ -19,7 +19,6 @@ reviewers: - ncdc - tallclair - mwielgus -- timothysc - soltysh - piosz - dims diff --git a/pkg/apis/rbac/OWNERS b/pkg/apis/rbac/OWNERS index 53710111cde..1aefde049a0 100755 --- a/pkg/apis/rbac/OWNERS +++ b/pkg/apis/rbac/OWNERS @@ -5,7 +5,6 @@ reviewers: - deads2k - sttts - ncdc -- timothysc - dims - krousey - mml diff --git a/staging/src/k8s.io/api/OWNERS b/staging/src/k8s.io/api/OWNERS index 918b10522c3..b2c570008e6 100644 --- a/staging/src/k8s.io/api/OWNERS +++ b/staging/src/k8s.io/api/OWNERS @@ -43,7 +43,6 @@ reviewers: - sttts - tallclair - thockin -- timothysc - vishh - wojtek-t - yifan-gu diff --git a/staging/src/k8s.io/api/apps/OWNERS b/staging/src/k8s.io/api/apps/OWNERS index e06ff4c481e..2f260571a70 100755 --- a/staging/src/k8s.io/api/apps/OWNERS +++ b/staging/src/k8s.io/api/apps/OWNERS @@ -9,7 +9,6 @@ reviewers: - saad-ali - ncdc - tallclair -- timothysc - dims - errordeveloper - mml diff --git a/staging/src/k8s.io/api/authentication/OWNERS b/staging/src/k8s.io/api/authentication/OWNERS index 95235234107..2bdfd0ce5bc 100755 --- a/staging/src/k8s.io/api/authentication/OWNERS +++ b/staging/src/k8s.io/api/authentication/OWNERS @@ -4,7 +4,6 @@ reviewers: - wojtek-t - deads2k - sttts -- timothysc - mbohlool - jianhuiz - enj diff --git a/staging/src/k8s.io/api/authorization/OWNERS b/staging/src/k8s.io/api/authorization/OWNERS index a68d7eef577..c1613fc2e02 100755 --- a/staging/src/k8s.io/api/authorization/OWNERS +++ b/staging/src/k8s.io/api/authorization/OWNERS @@ -9,7 +9,6 @@ reviewers: - erictune - sttts - ncdc -- timothysc - dims - mml - mbohlool diff --git a/staging/src/k8s.io/api/autoscaling/OWNERS b/staging/src/k8s.io/api/autoscaling/OWNERS index 76dffecebdb..4a495ec2532 100755 --- a/staging/src/k8s.io/api/autoscaling/OWNERS +++ b/staging/src/k8s.io/api/autoscaling/OWNERS @@ -8,7 +8,6 @@ reviewers: - erictune - sttts - ncdc -- timothysc - piosz - dims - errordeveloper diff --git a/staging/src/k8s.io/api/batch/OWNERS b/staging/src/k8s.io/api/batch/OWNERS index 502f9077113..38935ff169a 100755 --- a/staging/src/k8s.io/api/batch/OWNERS +++ b/staging/src/k8s.io/api/batch/OWNERS @@ -9,7 +9,6 @@ reviewers: - sttts - saad-ali - ncdc -- timothysc - soltysh - dims - errordeveloper diff --git a/staging/src/k8s.io/api/certificates/OWNERS b/staging/src/k8s.io/api/certificates/OWNERS index 6066d2c1218..1d1ab36e756 100755 --- a/staging/src/k8s.io/api/certificates/OWNERS +++ b/staging/src/k8s.io/api/certificates/OWNERS @@ -6,7 +6,6 @@ reviewers: - caesarxuchao - liggitt - sttts -- timothysc - dims - errordeveloper - mbohlool diff --git a/staging/src/k8s.io/api/extensions/OWNERS b/staging/src/k8s.io/api/extensions/OWNERS index 82c173c9827..cfac471108c 100755 --- a/staging/src/k8s.io/api/extensions/OWNERS +++ b/staging/src/k8s.io/api/extensions/OWNERS @@ -19,7 +19,6 @@ reviewers: - ncdc - tallclair - mwielgus -- timothysc - soltysh - piosz - dims diff --git a/staging/src/k8s.io/api/rbac/OWNERS b/staging/src/k8s.io/api/rbac/OWNERS index 53710111cde..1aefde049a0 100755 --- a/staging/src/k8s.io/api/rbac/OWNERS +++ b/staging/src/k8s.io/api/rbac/OWNERS @@ -5,7 +5,6 @@ reviewers: - deads2k - sttts - ncdc -- timothysc - dims - krousey - mml diff --git a/staging/src/k8s.io/apimachinery/pkg/api/errors/OWNERS b/staging/src/k8s.io/apimachinery/pkg/api/errors/OWNERS index af32c1fdf75..dc6a4c72425 100755 --- a/staging/src/k8s.io/apimachinery/pkg/api/errors/OWNERS +++ b/staging/src/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -16,7 +16,6 @@ reviewers: - janetkuo - tallclair - eparis -- timothysc - dims - hongchaodeng - krousey diff --git a/staging/src/k8s.io/apimachinery/pkg/api/resource/OWNERS b/staging/src/k8s.io/apimachinery/pkg/api/resource/OWNERS index 342ff29145b..c430067f357 100755 --- a/staging/src/k8s.io/apimachinery/pkg/api/resource/OWNERS +++ b/staging/src/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -9,7 +9,6 @@ reviewers: - janetkuo - tallclair - eparis -- timothysc - jbeda - xiang90 - mbohlool diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS index 7f5eb58602f..cdb125a0dd4 100755 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -16,7 +16,6 @@ reviewers: - janetkuo - justinsb - ncdc -- timothysc - soltysh - dims - madhusudancs diff --git a/staging/src/k8s.io/apiserver/OWNERS b/staging/src/k8s.io/apiserver/OWNERS index 52f250446a3..bc61d5ffaef 100644 --- a/staging/src/k8s.io/apiserver/OWNERS +++ b/staging/src/k8s.io/apiserver/OWNERS @@ -14,5 +14,4 @@ reviewers: - sttts - ncdc - tallclair -- timothysc - enj diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/testing/OWNERS b/staging/src/k8s.io/apiserver/pkg/endpoints/testing/OWNERS index af6baf08b71..8ee0deffdab 100755 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/testing/OWNERS +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/testing/OWNERS @@ -4,7 +4,6 @@ reviewers: - caesarxuchao - liggitt - erictune -- timothysc - soltysh - mml - mbohlool diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/OWNERS b/staging/src/k8s.io/apiserver/pkg/registry/generic/OWNERS index 429b15e49b8..75e139342b7 100755 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/OWNERS +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/OWNERS @@ -18,7 +18,6 @@ reviewers: - roberthbailey - ncdc - eparis -- timothysc - jlowdermilk - piosz - dims diff --git a/staging/src/k8s.io/apiserver/pkg/registry/rest/OWNERS b/staging/src/k8s.io/apiserver/pkg/registry/rest/OWNERS index 9d8627ad124..6427750a9e9 100755 --- a/staging/src/k8s.io/apiserver/pkg/registry/rest/OWNERS +++ b/staging/src/k8s.io/apiserver/pkg/registry/rest/OWNERS @@ -14,7 +14,6 @@ reviewers: - roberthbailey - ncdc - eparis -- timothysc - dims - hongchaodeng - krousey From 8f898a376336f626108dfe517cea5bf2ad24a77f Mon Sep 17 00:00:00 2001 From: Nikhita Raghunath Date: Wed, 27 Dec 2017 17:16:31 +0530 Subject: [PATCH 40/53] code-generator: add boilerplate header Currently, the boilerplate header from k8s.io/kubernetes is used. If k8s.io/kubernetes is not in the GOPATH, a panic will occur. To fix this, we first calculate the relative path to k8s.io/code-generator and then use the boilerplate from code-generator. This avoids hard coding the path and works well for all repos. --- staging/BUILD | 1 + .../k8s.io/code-generator/cmd/client-gen/BUILD | 1 + .../code-generator/cmd/client-gen/main.go | 3 ++- .../code-generator/cmd/conversion-gen/BUILD | 1 + .../code-generator/cmd/conversion-gen/main.go | 3 ++- .../code-generator/cmd/deepcopy-gen/BUILD | 1 + .../code-generator/cmd/deepcopy-gen/main.go | 3 ++- .../code-generator/cmd/defaulter-gen/BUILD | 1 + .../code-generator/cmd/defaulter-gen/main.go | 3 ++- .../cmd/go-to-protobuf/protobuf/BUILD | 1 + .../cmd/go-to-protobuf/protobuf/cmd.go | 3 ++- .../code-generator/cmd/import-boss/BUILD | 1 + .../code-generator/cmd/import-boss/main.go | 3 ++- .../code-generator/cmd/informer-gen/BUILD | 1 + .../code-generator/cmd/informer-gen/main.go | 3 ++- .../k8s.io/code-generator/cmd/lister-gen/BUILD | 1 + .../code-generator/cmd/lister-gen/main.go | 3 ++- .../code-generator/cmd/openapi-gen/BUILD | 1 + .../code-generator/cmd/openapi-gen/main.go | 3 ++- .../k8s.io/code-generator/cmd/set-gen/BUILD | 1 + .../k8s.io/code-generator/cmd/set-gen/main.go | 3 ++- staging/src/k8s.io/code-generator/hack/BUILD | 18 ++++++++++++++++++ .../code-generator/hack/boilerplate.go.txt | 16 ++++++++++++++++ .../k8s.io/code-generator/pkg/util/build.go | 9 +++++++++ 24 files changed, 74 insertions(+), 10 deletions(-) create mode 100644 staging/src/k8s.io/code-generator/hack/BUILD create mode 100644 staging/src/k8s.io/code-generator/hack/boilerplate.go.txt diff --git a/staging/BUILD b/staging/BUILD index 38d8326f2ab..ed1965fc2bb 100644 --- a/staging/BUILD +++ b/staging/BUILD @@ -212,6 +212,7 @@ filegroup( "//staging/src/k8s.io/code-generator/cmd/lister-gen:all-srcs", "//staging/src/k8s.io/code-generator/cmd/openapi-gen:all-srcs", "//staging/src/k8s.io/code-generator/cmd/set-gen:all-srcs", + "//staging/src/k8s.io/code-generator/hack:all-srcs", "//staging/src/k8s.io/code-generator/pkg/util:all-srcs", "//staging/src/k8s.io/code-generator/third_party/forked/golang/reflect:all-srcs", "//staging/src/k8s.io/kube-aggregator:all-srcs", diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/client-gen/BUILD index c32c5bf367c..ce49c939efe 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/BUILD @@ -21,6 +21,7 @@ go_library( "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/code-generator/cmd/client-gen/args:go_default_library", "//vendor/k8s.io/code-generator/cmd/client-gen/generators:go_default_library", + "//vendor/k8s.io/code-generator/pkg/util:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/main.go b/staging/src/k8s.io/code-generator/cmd/client-gen/main.go index ca829c30b1b..22c28e35f8f 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/main.go @@ -27,6 +27,7 @@ import ( generatorargs "k8s.io/code-generator/cmd/client-gen/args" "k8s.io/code-generator/cmd/client-gen/generators" + "k8s.io/code-generator/pkg/util" ) func main() { @@ -34,7 +35,7 @@ func main() { // Override defaults. // TODO: move this out of client-gen - genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/clientset_generated/" genericArgs.AddFlags(pflag.CommandLine) diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/conversion-gen/BUILD index 2a2572b8a0e..c0fd6862bc9 100644 --- a/staging/src/k8s.io/code-generator/cmd/conversion-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/BUILD @@ -21,6 +21,7 @@ go_library( "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/code-generator/cmd/conversion-gen/args:go_default_library", "//vendor/k8s.io/code-generator/cmd/conversion-gen/generators:go_default_library", + "//vendor/k8s.io/code-generator/pkg/util:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go b/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go index afb060762b1..f2b91cc2e29 100644 --- a/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go @@ -44,6 +44,7 @@ import ( generatorargs "k8s.io/code-generator/cmd/conversion-gen/args" "k8s.io/code-generator/cmd/conversion-gen/generators" + "k8s.io/code-generator/pkg/util" ) func main() { @@ -51,7 +52,7 @@ func main() { // Override defaults. // TODO: move this out of conversion-gen - genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) diff --git a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/BUILD index 8b004d683a3..ea92990c772 100644 --- a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/BUILD @@ -20,6 +20,7 @@ go_library( "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/code-generator/cmd/deepcopy-gen/args:go_default_library", + "//vendor/k8s.io/code-generator/pkg/util:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/examples/deepcopy-gen/generators:go_default_library", ], diff --git a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go index 5eabfd1b6a3..cce65b772f8 100644 --- a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go @@ -52,6 +52,7 @@ import ( "k8s.io/gengo/examples/deepcopy-gen/generators" generatorargs "k8s.io/code-generator/cmd/deepcopy-gen/args" + "k8s.io/code-generator/pkg/util" ) func main() { @@ -59,7 +60,7 @@ func main() { // Override defaults. // TODO: move this out of deepcopy-gen - genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) diff --git a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/BUILD index 329da12e79c..d1d29fe43cb 100644 --- a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/BUILD @@ -20,6 +20,7 @@ go_library( "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/code-generator/cmd/defaulter-gen/args:go_default_library", + "//vendor/k8s.io/code-generator/pkg/util:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/examples/defaulter-gen/generators:go_default_library", ], diff --git a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go index 4f55680b4f8..9d33f700b33 100644 --- a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go @@ -51,6 +51,7 @@ import ( "k8s.io/gengo/examples/defaulter-gen/generators" generatorargs "k8s.io/code-generator/cmd/defaulter-gen/args" + "k8s.io/code-generator/pkg/util" ) func main() { @@ -58,7 +59,7 @@ func main() { // Override defaults. // TODO: move this out of defaulter-gen - genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) diff --git a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/BUILD b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/BUILD index 17b25ab0066..be8608c8b5d 100644 --- a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/BUILD @@ -21,6 +21,7 @@ go_library( deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/code-generator/pkg/util:go_default_library", "//vendor/k8s.io/code-generator/third_party/forked/golang/reflect:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/generator:go_default_library", diff --git a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go index 06b7638db67..5550732259f 100644 --- a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go +++ b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go @@ -27,6 +27,7 @@ import ( "path/filepath" "strings" + "k8s.io/code-generator/pkg/util" "k8s.io/gengo/args" "k8s.io/gengo/generator" "k8s.io/gengo/namer" @@ -55,7 +56,7 @@ func New() *Generator { sourceTree := args.DefaultSourceTree() common := args.GeneratorArgs{ OutputBase: sourceTree, - GoHeaderFilePath: filepath.Join(sourceTree, "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt"), + GoHeaderFilePath: filepath.Join(sourceTree, util.BoilerplatePath()), } defaultProtoImport := filepath.Join(sourceTree, "k8s.io", "kubernetes", "vendor", "github.com", "gogo", "protobuf", "protobuf") cwd, err := os.Getwd() diff --git a/staging/src/k8s.io/code-generator/cmd/import-boss/BUILD b/staging/src/k8s.io/code-generator/cmd/import-boss/BUILD index 189cc09f12c..ea42fe21a38 100644 --- a/staging/src/k8s.io/code-generator/cmd/import-boss/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/import-boss/BUILD @@ -18,6 +18,7 @@ go_library( importpath = "k8s.io/code-generator/cmd/import-boss", deps = [ "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/code-generator/pkg/util:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/examples/import-boss/generators:go_default_library", ], diff --git a/staging/src/k8s.io/code-generator/cmd/import-boss/main.go b/staging/src/k8s.io/code-generator/cmd/import-boss/main.go index a6fad8efe29..d998994415d 100644 --- a/staging/src/k8s.io/code-generator/cmd/import-boss/main.go +++ b/staging/src/k8s.io/code-generator/cmd/import-boss/main.go @@ -59,6 +59,7 @@ import ( "os" "path/filepath" + "k8s.io/code-generator/pkg/util" "k8s.io/gengo/args" "k8s.io/gengo/examples/import-boss/generators" @@ -69,7 +70,7 @@ func main() { arguments := args.Default() // Override defaults. - arguments.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") + arguments.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) arguments.InputDirs = []string{ "k8s.io/kubernetes/pkg/...", "k8s.io/kubernetes/cmd/...", diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/informer-gen/BUILD index c5b4f917029..2211cdfda7b 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/BUILD @@ -21,6 +21,7 @@ go_library( "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/code-generator/cmd/informer-gen/args:go_default_library", "//vendor/k8s.io/code-generator/cmd/informer-gen/generators:go_default_library", + "//vendor/k8s.io/code-generator/pkg/util:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go index e993e620eb5..bfe826080cc 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go @@ -23,6 +23,7 @@ import ( "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/code-generator/cmd/informer-gen/generators" + "k8s.io/code-generator/pkg/util" "k8s.io/gengo/args" generatorargs "k8s.io/code-generator/cmd/informer-gen/args" @@ -33,7 +34,7 @@ func main() { // Override defaults. // TODO: move out of informer-gen - genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/informers/informers_generated" customArgs.VersionedClientSetPackage = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" customArgs.InternalClientSetPackage = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" diff --git a/staging/src/k8s.io/code-generator/cmd/lister-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/lister-gen/BUILD index 1d7e9a65c73..c39a4ef614e 100644 --- a/staging/src/k8s.io/code-generator/cmd/lister-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/lister-gen/BUILD @@ -21,6 +21,7 @@ go_library( "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/code-generator/cmd/lister-gen/args:go_default_library", "//vendor/k8s.io/code-generator/cmd/lister-gen/generators:go_default_library", + "//vendor/k8s.io/code-generator/pkg/util:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go b/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go index 6c48240aca2..d5ff8e46ee0 100644 --- a/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go @@ -23,6 +23,7 @@ import ( "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/code-generator/cmd/lister-gen/generators" + "k8s.io/code-generator/pkg/util" "k8s.io/gengo/args" generatorargs "k8s.io/code-generator/cmd/lister-gen/args" @@ -33,7 +34,7 @@ func main() { // Override defaults. // TODO: move this out of lister-gen - genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/listers" genericArgs.AddFlags(pflag.CommandLine) diff --git a/staging/src/k8s.io/code-generator/cmd/openapi-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/openapi-gen/BUILD index 2a464aae535..339a2cab96b 100644 --- a/staging/src/k8s.io/code-generator/cmd/openapi-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/openapi-gen/BUILD @@ -20,6 +20,7 @@ go_library( "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/code-generator/cmd/openapi-gen/args:go_default_library", + "//vendor/k8s.io/code-generator/pkg/util:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/generators:go_default_library", ], diff --git a/staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go b/staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go index c324c10bab9..fbafc502577 100644 --- a/staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go @@ -29,6 +29,7 @@ import ( "k8s.io/kube-openapi/pkg/generators" generatorargs "k8s.io/code-generator/cmd/openapi-gen/args" + "k8s.io/code-generator/pkg/util" ) func main() { @@ -36,7 +37,7 @@ func main() { // Override defaults. // TODO: move this out of openapi-gen - genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) diff --git a/staging/src/k8s.io/code-generator/cmd/set-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/set-gen/BUILD index 50c3e45930b..b7eb0835236 100644 --- a/staging/src/k8s.io/code-generator/cmd/set-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/set-gen/BUILD @@ -22,6 +22,7 @@ go_library( importpath = "k8s.io/code-generator/cmd/set-gen", deps = [ "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/code-generator/pkg/util:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/examples/set-gen/generators:go_default_library", ], diff --git a/staging/src/k8s.io/code-generator/cmd/set-gen/main.go b/staging/src/k8s.io/code-generator/cmd/set-gen/main.go index 24af2229f9f..cf8f01d89e5 100644 --- a/staging/src/k8s.io/code-generator/cmd/set-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/set-gen/main.go @@ -28,6 +28,7 @@ import ( "os" "path/filepath" + "k8s.io/code-generator/pkg/util" "k8s.io/gengo/args" "k8s.io/gengo/examples/set-gen/generators" @@ -38,7 +39,7 @@ func main() { arguments := args.Default() // Override defaults. - arguments.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") + arguments.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) arguments.InputDirs = []string{"k8s.io/kubernetes/pkg/util/sets/types"} arguments.OutputPackagePath = "k8s.io/apimachinery/pkg/util/sets" diff --git a/staging/src/k8s.io/code-generator/hack/BUILD b/staging/src/k8s.io/code-generator/hack/BUILD new file mode 100644 index 00000000000..bf12794f661 --- /dev/null +++ b/staging/src/k8s.io/code-generator/hack/BUILD @@ -0,0 +1,18 @@ +exports_files( + glob(["*.txt"]), + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/code-generator/hack/boilerplate.go.txt b/staging/src/k8s.io/code-generator/hack/boilerplate.go.txt new file mode 100644 index 00000000000..59e740c1ee4 --- /dev/null +++ b/staging/src/k8s.io/code-generator/hack/boilerplate.go.txt @@ -0,0 +1,16 @@ +/* +Copyright YEAR The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + diff --git a/staging/src/k8s.io/code-generator/pkg/util/build.go b/staging/src/k8s.io/code-generator/pkg/util/build.go index 9d3e8a8e151..6ea8f52ee09 100644 --- a/staging/src/k8s.io/code-generator/pkg/util/build.go +++ b/staging/src/k8s.io/code-generator/pkg/util/build.go @@ -18,10 +18,14 @@ package util import ( gobuild "go/build" + "path" "path/filepath" + "reflect" "strings" ) +type empty struct{} + // CurrentPackage returns the go package of the current directory, or "" if it cannot // be derived from the GOPATH. func CurrentPackage() string { @@ -50,3 +54,8 @@ func hasSubdir(root, dir string) (rel string, ok bool) { // cut off root return filepath.ToSlash(dir[len(root):]), true } + +// BoilerplatePath uses the boilerplate in code-generator by calculating the relative path to it. +func BoilerplatePath() string { + return path.Join(reflect.TypeOf(empty{}).PkgPath(), "/../../hack/boilerplate.go.txt") +} From ebad418c297ee72c153d5bbae859ca7e371df73b Mon Sep 17 00:00:00 2001 From: Nikhita Raghunath Date: Tue, 13 Feb 2018 12:34:00 +0530 Subject: [PATCH 41/53] add --go-header-file to use kube boilerplate --- hack/update-codegen.sh | 6 ++++-- hack/update-generated-protobuf-dockerized.sh | 3 ++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 9c29807b438..0d258ff070b 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -72,7 +72,7 @@ INTERNAL_DIRS_CSV=$(IFS=',';echo "${INTERNAL_DIRS[*]// /,}";IFS=$) # This can be called with one flag, --verify-only, so it works for both the # update- and verify- scripts. ${clientgen} --input-base="k8s.io/kubernetes/pkg/apis" --input="${INTERNAL_DIRS_CSV}" "$@" -${clientgen} --output-base "${KUBE_ROOT}/vendor" --output-package="k8s.io/client-go" --clientset-name="kubernetes" --input-base="k8s.io/kubernetes/vendor/k8s.io/api" --input="${GV_DIRS_CSV}" "$@" +${clientgen} --output-base "${KUBE_ROOT}/vendor" --output-package="k8s.io/client-go" --clientset-name="kubernetes" --input-base="k8s.io/kubernetes/vendor/k8s.io/api" --input="${GV_DIRS_CSV}" --go-header-file ${KUBE_ROOT}/hack/boilerplate/boilerplate.go.txt "$@" listergen_internal_apis=( $( @@ -91,7 +91,7 @@ $( ) ) listergen_external_apis_csv=$(IFS=,; echo "${listergen_external_apis[*]}") -${listergen} --output-base "${KUBE_ROOT}/vendor" --output-package "k8s.io/client-go/listers" --input-dirs "${listergen_external_apis_csv}" "$@" +${listergen} --output-base "${KUBE_ROOT}/vendor" --output-package "k8s.io/client-go/listers" --input-dirs "${listergen_external_apis_csv}" --go-header-file ${KUBE_ROOT}/hack/boilerplate/boilerplate.go.txt "$@" informergen_internal_apis=( $( @@ -105,6 +105,7 @@ ${informergen} \ --input-dirs "${informergen_internal_apis_csv}" \ --internal-clientset-package k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset \ --listers-package k8s.io/kubernetes/pkg/client/listers \ + --go-header-file ${KUBE_ROOT}/hack/boilerplate/boilerplate.go.txt \ "$@" informergen_external_apis=( @@ -124,6 +125,7 @@ ${informergen} \ --input-dirs "${informergen_external_apis_csv}" \ --versioned-clientset-package k8s.io/client-go/kubernetes \ --listers-package k8s.io/client-go/listers \ + --go-header-file ${KUBE_ROOT}/hack/boilerplate/boilerplate.go.txt \ "$@" # You may add additional calls of code generators like set-gen above. diff --git a/hack/update-generated-protobuf-dockerized.sh b/hack/update-generated-protobuf-dockerized.sh index fda5a1c82aa..cb1839dd23c 100755 --- a/hack/update-generated-protobuf-dockerized.sh +++ b/hack/update-generated-protobuf-dockerized.sh @@ -92,5 +92,6 @@ PATH="${KUBE_ROOT}/_output/bin:${PATH}" \ "${gotoprotobuf}" \ --proto-import="${KUBE_ROOT}/vendor" \ --proto-import="${KUBE_ROOT}/third_party/protobuf" \ - --packages=$(IFS=, ; echo "${PACKAGES[*]}") + --packages=$(IFS=, ; echo "${PACKAGES[*]}") \ + --go-header-file ${KUBE_ROOT}/hack/boilerplate/boilerplate.go.txt \ "$@" From 0cbe0a6034a08975b6b436c7021bf0e879176f97 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Mon, 5 Feb 2018 14:48:30 +0800 Subject: [PATCH 42/53] controller-manager: switch to config/option struct pattern --- .../app/config/config.go | 55 +++++ .../app/controllermanager.go | 161 +++++-------- .../app/options/options.go | 80 +++++-- .../app/options/options_test.go | 18 +- cmd/controller-manager/app/config.go | 60 +++++ .../app/insecure_serving.go | 46 ++++ .../app/options/insecure_serving.go | 112 +++++++++ cmd/controller-manager/app/options/options.go | 214 ++++++++++++++++++ cmd/controller-manager/app/options/utils.go | 139 ------------ cmd/controller-manager/app/serve.go | 51 +++++ .../app/autoscaling.go | 10 +- cmd/kube-controller-manager/app/batch.go | 2 +- .../app/certificates.go | 16 +- .../app/config/config.go | 55 +++++ .../app/controllermanager.go | 189 ++++++---------- cmd/kube-controller-manager/app/core.go | 94 ++++---- cmd/kube-controller-manager/app/extensions.go | 6 +- .../app/options/options.go | 168 ++++++++------ .../app/options/options_test.go | 22 +- .../controller-manager.go | 3 +- cmd/kubeadm/app/preflight/checks.go | 2 +- pkg/master/ports/ports.go | 8 +- pkg/registry/core/rest/storage_core.go | 2 +- test/e2e/apps/daemon_restart.go | 2 +- test/e2e/framework/metrics/metrics_grabber.go | 2 +- test/e2e/framework/util.go | 2 +- test/e2e/network/firewall.go | 2 +- 27 files changed, 977 insertions(+), 544 deletions(-) create mode 100644 cmd/cloud-controller-manager/app/config/config.go create mode 100644 cmd/controller-manager/app/config.go create mode 100644 cmd/controller-manager/app/insecure_serving.go create mode 100644 cmd/controller-manager/app/options/insecure_serving.go create mode 100644 cmd/controller-manager/app/options/options.go delete mode 100644 cmd/controller-manager/app/options/utils.go create mode 100644 cmd/controller-manager/app/serve.go create mode 100644 cmd/kube-controller-manager/app/config/config.go diff --git a/cmd/cloud-controller-manager/app/config/config.go b/cmd/cloud-controller-manager/app/config/config.go new file mode 100644 index 00000000000..ca31d87cd61 --- /dev/null +++ b/cmd/cloud-controller-manager/app/config/config.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "time" + + genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app" +) + +// ExtraConfig are part of Config, also can place your custom config here. +type ExtraConfig struct { + NodeStatusUpdateFrequency time.Duration +} + +// Config is the main context object for the cloud controller manager. +type Config struct { + Generic genericcontrollermanager.Config + Extra ExtraConfig +} + +type completedConfig struct { + Generic genericcontrollermanager.CompletedConfig + Extra *ExtraConfig +} + +// CompletedConfig same as Config, just to swap private object. +type CompletedConfig struct { + // Embed a private pointer that cannot be instantiated outside of this package. + *completedConfig +} + +// Complete fills in any fields not set that are required to have valid data. It's mutating the receiver. +func (c *Config) Complete() *CompletedConfig { + cc := completedConfig{ + c.Generic.Complete(), + &c.Extra, + } + + return &CompletedConfig{&cc} +} diff --git a/cmd/cloud-controller-manager/app/controllermanager.go b/cmd/cloud-controller-manager/app/controllermanager.go index c43a1d8c562..3ffefbad476 100644 --- a/cmd/cloud-controller-manager/app/controllermanager.go +++ b/cmd/cloud-controller-manager/app/controllermanager.go @@ -20,32 +20,24 @@ import ( "fmt" "math/rand" "net" - "net/http" - "net/http/pprof" "os" - goruntime "runtime" - "strconv" "strings" "time" "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" "github.com/spf13/cobra" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apiserver/pkg/server/healthz" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" + cloudcontrollerconfig "k8s.io/kubernetes/cmd/cloud-controller-manager/app/config" "k8s.io/kubernetes/cmd/cloud-controller-manager/app/options" - "k8s.io/kubernetes/pkg/api/legacyscheme" + genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller" cloudcontrollers "k8s.io/kubernetes/pkg/controller/cloud" @@ -62,7 +54,7 @@ const ( // NewCloudControllerManagerCommand creates a *cobra.Command object with default parameters func NewCloudControllerManagerCommand() *cobra.Command { - s := options.NewCloudControllerManagerServer() + s := options.NewCloudControllerManagerOptions() cmd := &cobra.Command{ Use: "cloud-controller-manager", Long: `The Cloud controller manager is a daemon that embeds @@ -70,7 +62,13 @@ the cloud specific control loops shipped with Kubernetes.`, Run: func(cmd *cobra.Command, args []string) { verflag.PrintAndExitIfRequested() - if err := Run(s); err != nil { + c, err := s.Config() + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } + + if err := Run(c.Complete()); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } @@ -83,84 +81,68 @@ the cloud specific control loops shipped with Kubernetes.`, } // resyncPeriod computes the time interval a shared informer waits before resyncing with the api server -func resyncPeriod(s *options.CloudControllerManagerServer) func() time.Duration { +func resyncPeriod(c *cloudcontrollerconfig.CompletedConfig) func() time.Duration { return func() time.Duration { factor := rand.Float64() + 1 - return time.Duration(float64(s.MinResyncPeriod.Nanoseconds()) * factor) + return time.Duration(float64(c.Generic.ComponentConfig.MinResyncPeriod.Nanoseconds()) * factor) } } // Run runs the ExternalCMServer. This should never exit. -func Run(s *options.CloudControllerManagerServer) error { - if s.CloudProvider == "" { - glog.Fatalf("--cloud-provider cannot be empty") - } - - cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) +func Run(c *cloudcontrollerconfig.CompletedConfig) error { + cloud, err := cloudprovider.InitCloudProvider(c.Generic.ComponentConfig.CloudProvider, c.Generic.ComponentConfig.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } - if cloud == nil { glog.Fatalf("cloud provider is nil") } if cloud.HasClusterID() == false { - if s.AllowUntaggedCloud == true { + if c.Generic.ComponentConfig.AllowUntaggedCloud == true { glog.Warning("detected a cluster without a ClusterID. A ClusterID will be required in the future. Please tag your cluster to avoid any future issues") } else { glog.Fatalf("no ClusterID found. A ClusterID is required for the cloud provider to function properly. This check can be bypassed by setting the allow-untagged-cloud option") } } - if c, err := configz.New("componentconfig"); err == nil { - c.Set(s.KubeControllerManagerConfiguration) + // setup /configz endpoint + if cz, err := configz.New("componentconfig"); err == nil { + cz.Set(c.Generic.ComponentConfig) } else { - glog.Errorf("unable to register configz: %s", err) - } - kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig) - if err != nil { - return err + glog.Errorf("unable to register configz: %c", err) } - // Set the ContentType of the requests from kube client - kubeconfig.ContentConfig.ContentType = s.ContentType - // Override kubeconfig qps/burst settings from flags - kubeconfig.QPS = s.KubeAPIQPS - kubeconfig.Burst = int(s.KubeAPIBurst) - kubeClient, err := kubernetes.NewForConfig(restclient.AddUserAgent(kubeconfig, "cloud-controller-manager")) - if err != nil { - glog.Fatalf("Invalid API configuration: %v", err) + // Start the controller manager HTTP server + stopCh := make(chan struct{}) + if c.Generic.InsecureServing != nil { + if err := genericcontrollermanager.Serve(&c.Generic, c.Generic.InsecureServing.Serve, stopCh); err != nil { + return err + } } - leaderElectionClient := kubernetes.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election")) - - // Start the external controller manager server - go startHTTP(s) - - recorder := createRecorder(kubeClient) run := func(stop <-chan struct{}) { rootClientBuilder := controller.SimpleControllerClientBuilder{ - ClientConfig: kubeconfig, + ClientConfig: c.Generic.Kubeconfig, } var clientBuilder controller.ControllerClientBuilder - if s.UseServiceAccountCredentials { + if c.Generic.ComponentConfig.UseServiceAccountCredentials { clientBuilder = controller.SAControllerClientBuilder{ - ClientConfig: restclient.AnonymousClientConfig(kubeconfig), - CoreClient: kubeClient.CoreV1(), - AuthenticationClient: kubeClient.AuthenticationV1(), + ClientConfig: restclient.AnonymousClientConfig(c.Generic.Kubeconfig), + CoreClient: c.Generic.Client.CoreV1(), + AuthenticationClient: c.Generic.Client.AuthenticationV1(), Namespace: "kube-system", } } else { clientBuilder = rootClientBuilder } - if err := StartControllers(s, kubeconfig, rootClientBuilder, clientBuilder, stop, recorder, cloud); err != nil { + if err := startControllers(c, c.Generic.Kubeconfig, rootClientBuilder, clientBuilder, stop, c.Generic.EventRecorder, cloud); err != nil { glog.Fatalf("error running controllers: %v", err) } } - if !s.LeaderElection.LeaderElect { + if !c.Generic.ComponentConfig.LeaderElection.LeaderElect { run(nil) panic("unreachable") } @@ -174,13 +156,13 @@ func Run(s *options.CloudControllerManagerServer) error { id = id + "_" + string(uuid.NewUUID()) // Lock required for leader election - rl, err := resourcelock.New(s.LeaderElection.ResourceLock, + rl, err := resourcelock.New(c.Generic.ComponentConfig.LeaderElection.ResourceLock, "kube-system", "cloud-controller-manager", - leaderElectionClient.CoreV1(), + c.Generic.LeaderElectionClient.CoreV1(), resourcelock.ResourceLockConfig{ Identity: id, - EventRecorder: recorder, + EventRecorder: c.Generic.EventRecorder, }) if err != nil { glog.Fatalf("error creating lock: %v", err) @@ -189,9 +171,9 @@ func Run(s *options.CloudControllerManagerServer) error { // Try and become the leader and start cloud controller manager loops leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{ Lock: rl, - LeaseDuration: s.LeaderElection.LeaseDuration.Duration, - RenewDeadline: s.LeaderElection.RenewDeadline.Duration, - RetryPeriod: s.LeaderElection.RetryPeriod.Duration, + LeaseDuration: c.Generic.ComponentConfig.LeaderElection.LeaseDuration.Duration, + RenewDeadline: c.Generic.ComponentConfig.LeaderElection.RenewDeadline.Duration, + RetryPeriod: c.Generic.ComponentConfig.LeaderElection.RetryPeriod.Duration, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: run, OnStoppedLeading: func() { @@ -202,36 +184,36 @@ func Run(s *options.CloudControllerManagerServer) error { panic("unreachable") } -// StartControllers starts the cloud specific controller loops. -func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restclient.Config, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}, recorder record.EventRecorder, cloud cloudprovider.Interface) error { +// startControllers starts the cloud specific controller loops. +func startControllers(c *cloudcontrollerconfig.CompletedConfig, kubeconfig *restclient.Config, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}, recorder record.EventRecorder, cloud cloudprovider.Interface) error { // Function to build the kube client object client := func(serviceAccountName string) kubernetes.Interface { return clientBuilder.ClientOrDie(serviceAccountName) } - if cloud != nil { // Initialize the cloud provider with a reference to the clientBuilder cloud.Initialize(clientBuilder) } + // TODO: move this setup into Config versionedClient := rootClientBuilder.ClientOrDie("shared-informers") - sharedInformers := informers.NewSharedInformerFactory(versionedClient, resyncPeriod(s)()) + sharedInformers := informers.NewSharedInformerFactory(versionedClient, resyncPeriod(c)()) // Start the CloudNodeController nodeController := cloudcontrollers.NewCloudNodeController( sharedInformers.Core().V1().Nodes(), client("cloud-node-controller"), cloud, - s.NodeMonitorPeriod.Duration, - s.NodeStatusUpdateFrequency.Duration) + c.Generic.ComponentConfig.NodeMonitorPeriod.Duration, + c.Extra.NodeStatusUpdateFrequency) nodeController.Run() - time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) + time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter)) // Start the PersistentVolumeLabelController pvlController := cloudcontrollers.NewPersistentVolumeLabelController(client("pvl-controller"), cloud) threads := 5 go pvlController.Run(threads, stop) - time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) + time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter)) // Start the service controller serviceController, err := servicecontroller.New( @@ -239,34 +221,34 @@ func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restc client("service-controller"), sharedInformers.Core().V1().Services(), sharedInformers.Core().V1().Nodes(), - s.ClusterName, + c.Generic.ComponentConfig.ClusterName, ) if err != nil { glog.Errorf("Failed to start service controller: %v", err) } else { - go serviceController.Run(stop, int(s.ConcurrentServiceSyncs)) - time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) + go serviceController.Run(stop, int(c.Generic.ComponentConfig.ConcurrentServiceSyncs)) + time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter)) } // If CIDRs should be allocated for pods and set on the CloudProvider, then start the route controller - if s.AllocateNodeCIDRs && s.ConfigureCloudRoutes { + if c.Generic.ComponentConfig.AllocateNodeCIDRs && c.Generic.ComponentConfig.ConfigureCloudRoutes { if routes, ok := cloud.Routes(); !ok { glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.") } else { var clusterCIDR *net.IPNet - if len(strings.TrimSpace(s.ClusterCIDR)) != 0 { - _, clusterCIDR, err = net.ParseCIDR(s.ClusterCIDR) + if len(strings.TrimSpace(c.Generic.ComponentConfig.ClusterCIDR)) != 0 { + _, clusterCIDR, err = net.ParseCIDR(c.Generic.ComponentConfig.ClusterCIDR) if err != nil { - glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", s.ClusterCIDR, err) + glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", c.Generic.ComponentConfig.ClusterCIDR, err) } } - routeController := routecontroller.New(routes, client("route-controller"), sharedInformers.Core().V1().Nodes(), s.ClusterName, clusterCIDR) - go routeController.Run(stop, s.RouteReconciliationPeriod.Duration) - time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) + routeController := routecontroller.New(routes, client("route-controller"), sharedInformers.Core().V1().Nodes(), c.Generic.ComponentConfig.ClusterName, clusterCIDR) + go routeController.Run(stop, c.Generic.ComponentConfig.RouteReconciliationPeriod.Duration) + time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter)) } } else { - glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", s.AllocateNodeCIDRs, s.ConfigureCloudRoutes) + glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", c.Generic.ComponentConfig.AllocateNodeCIDRs, c.Generic.ComponentConfig.ConfigureCloudRoutes) } // If apiserver is not running we should wait for some time and fail only then. This is particularly @@ -286,32 +268,3 @@ func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restc select {} } - -func startHTTP(s *options.CloudControllerManagerServer) { - mux := http.NewServeMux() - healthz.InstallHandler(mux) - if s.EnableProfiling { - mux.HandleFunc("/debug/pprof/", pprof.Index) - mux.HandleFunc("/debug/pprof/profile", pprof.Profile) - mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) - mux.HandleFunc("/debug/pprof/trace", pprof.Trace) - if s.EnableContentionProfiling { - goruntime.SetBlockProfileRate(1) - } - } - configz.InstallHandler(mux) - mux.Handle("/metrics", prometheus.Handler()) - - server := &http.Server{ - Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))), - Handler: mux, - } - glog.Fatal(server.ListenAndServe()) -} - -func createRecorder(kubeClient *kubernetes.Clientset) record.EventRecorder { - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) - return eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "cloud-controller-manager"}) -} diff --git a/cmd/cloud-controller-manager/app/options/options.go b/cmd/cloud-controller-manager/app/options/options.go index 5d606b16c14..e783e06ccb0 100644 --- a/cmd/cloud-controller-manager/app/options/options.go +++ b/cmd/cloud-controller-manager/app/options/options.go @@ -17,10 +17,13 @@ limitations under the License. package options import ( + "fmt" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" utilfeature "k8s.io/apiserver/pkg/util/feature" + cloudcontrollerconfig "k8s.io/kubernetes/cmd/cloud-controller-manager/app/config" cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options" "k8s.io/kubernetes/pkg/client/leaderelectionconfig" "k8s.io/kubernetes/pkg/master/ports" @@ -31,39 +34,78 @@ import ( "github.com/spf13/pflag" ) -// CloudControllerManagerServer is the main context object for the controller manager. -type CloudControllerManagerServer struct { - cmoptions.ControllerManagerServer +// CloudControllerManagerOptions is the main context object for the controller manager. +type CloudControllerManagerOptions struct { + Generic cmoptions.GenericControllerManagerOptions // NodeStatusUpdateFrequency is the frequency at which the controller updates nodes' status NodeStatusUpdateFrequency metav1.Duration } -// NewCloudControllerManagerServer creates a new ExternalCMServer with a default config. -func NewCloudControllerManagerServer() *CloudControllerManagerServer { - s := CloudControllerManagerServer{ +// NewCloudControllerManagerOptions creates a new ExternalCMServer with a default config. +func NewCloudControllerManagerOptions() *CloudControllerManagerOptions { + componentConfig := cmoptions.NewDefaultControllerManagerComponentConfig(ports.InsecureCloudControllerManagerPort) + + s := CloudControllerManagerOptions{ // The common/default are kept in 'cmd/kube-controller-manager/app/options/util.go'. // Please make common changes there and put anything cloud specific here. - ControllerManagerServer: cmoptions.ControllerManagerServer{ - KubeControllerManagerConfiguration: cmoptions.GetDefaultControllerOptions(ports.CloudControllerManagerPort), - }, + Generic: cmoptions.NewGenericControllerManagerOptions(componentConfig), NodeStatusUpdateFrequency: metav1.Duration{Duration: 5 * time.Minute}, } - s.LeaderElection.LeaderElect = true + s.Generic.ComponentConfig.LeaderElection.LeaderElect = true + return &s } // AddFlags adds flags for a specific ExternalCMServer to the specified FlagSet -func (s *CloudControllerManagerServer) AddFlags(fs *pflag.FlagSet) { - cmoptions.AddDefaultControllerFlags(&s.ControllerManagerServer, fs) - fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider of cloud services. Cannot be empty.") - fs.DurationVar(&s.NodeStatusUpdateFrequency.Duration, "node-status-update-frequency", s.NodeStatusUpdateFrequency.Duration, "Specifies how often the controller updates nodes' status.") - // TODO: remove --service-account-private-key-file 6 months after 1.8 is released (~1.10) - fs.StringVar(&s.ServiceAccountKeyFile, "service-account-private-key-file", s.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA or ECDSA key used to sign service account tokens.") - fs.MarkDeprecated("service-account-private-key-file", "This flag is currently no-op and will be deleted.") - fs.Int32Var(&s.ConcurrentServiceSyncs, "concurrent-service-syncs", s.ConcurrentServiceSyncs, "The number of services that are allowed to sync concurrently. Larger number = more responsive service management, but more CPU (and network) load") +func (o *CloudControllerManagerOptions) AddFlags(fs *pflag.FlagSet) { + o.Generic.AddFlags(fs) - leaderelectionconfig.BindFlags(&s.LeaderElection, fs) + fs.StringVar(&o.Generic.ComponentConfig.CloudProvider, "cloud-provider", o.Generic.ComponentConfig.CloudProvider, "The provider of cloud services. Cannot be empty.") + fs.DurationVar(&o.NodeStatusUpdateFrequency.Duration, "node-status-update-frequency", o.NodeStatusUpdateFrequency.Duration, "Specifies how often the controller updates nodes' status.") + // TODO: remove --service-account-private-key-file 6 months after 1.8 is released (~1.10) + fs.StringVar(&o.Generic.ComponentConfig.ServiceAccountKeyFile, "service-account-private-key-file", o.Generic.ComponentConfig.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA or ECDSA key used to sign service account tokens.") + fs.MarkDeprecated("service-account-private-key-file", "This flag is currently no-op and will be deleted.") + fs.Int32Var(&o.Generic.ComponentConfig.ConcurrentServiceSyncs, "concurrent-service-syncs", o.Generic.ComponentConfig.ConcurrentServiceSyncs, "The number of services that are allowed to sync concurrently. Larger number = more responsive service management, but more CPU (and network) load") + + leaderelectionconfig.BindFlags(&o.Generic.ComponentConfig.LeaderElection, fs) utilfeature.DefaultFeatureGate.AddFlag(fs) } + +// ApplyTo fills up cloud controller manager config with options. +func (o *CloudControllerManagerOptions) ApplyTo(c *cloudcontrollerconfig.Config) error { + if err := o.Generic.ApplyTo(&c.Generic, "cloud-controller-manager"); err != nil { + return err + } + + c.Extra.NodeStatusUpdateFrequency = o.NodeStatusUpdateFrequency.Duration + + return nil +} + +// Validate is used to validate config before launching the cloud controller manager +func (o *CloudControllerManagerOptions) Validate() error { + errors := []error{} + errors = append(errors, o.Generic.Validate()...) + + if len(o.Generic.ComponentConfig.CloudProvider) == 0 { + errors = append(errors, fmt.Errorf("--cloud-provider cannot be empty")) + } + + return utilerrors.NewAggregate(errors) +} + +// Config return a cloud controller manager config objective +func (o CloudControllerManagerOptions) Config() (*cloudcontrollerconfig.Config, error) { + if err := o.Validate(); err != nil { + return nil, err + } + + c := &cloudcontrollerconfig.Config{} + if err := o.ApplyTo(c); err != nil { + return nil, err + } + + return c, nil +} diff --git a/cmd/cloud-controller-manager/app/options/options_test.go b/cmd/cloud-controller-manager/app/options/options_test.go index 57f26c8ca9d..bc5844fb600 100644 --- a/cmd/cloud-controller-manager/app/options/options_test.go +++ b/cmd/cloud-controller-manager/app/options/options_test.go @@ -17,6 +17,7 @@ limitations under the License. package options import ( + "net" "reflect" "testing" "time" @@ -31,7 +32,7 @@ import ( func TestAddFlags(t *testing.T) { f := pflag.NewFlagSet("addflagstest", pflag.ContinueOnError) - s := NewCloudControllerManagerServer() + s := NewCloudControllerManagerOptions() s.AddFlags(f) args := []string{ @@ -65,13 +66,13 @@ func TestAddFlags(t *testing.T) { } f.Parse(args) - expected := &CloudControllerManagerServer{ - ControllerManagerServer: cmoptions.ControllerManagerServer{ - KubeControllerManagerConfiguration: componentconfig.KubeControllerManagerConfiguration{ + expected := &CloudControllerManagerOptions{ + Generic: cmoptions.GenericControllerManagerOptions{ + ComponentConfig: componentconfig.KubeControllerManagerConfiguration{ CloudProvider: "gce", CloudConfigFile: "/cloud-config", - Port: 10000, - Address: "192.168.4.10", + Port: 10253, // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config + Address: "0.0.0.0", // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config ConcurrentEndpointSyncs: 5, ConcurrentRSSyncs: 5, ConcurrentResourceQuotaSyncs: 5, @@ -138,6 +139,11 @@ func TestAddFlags(t *testing.T) { CIDRAllocatorType: "RangeAllocator", Controllers: []string{"*"}, }, + InsecureServing: &cmoptions.InsecureServingOptions{ + BindAddress: net.ParseIP("192.168.4.10"), + BindPort: int(10000), + BindNetwork: "tcp", + }, Kubeconfig: "/kubeconfig", Master: "192.168.4.20", }, diff --git a/cmd/controller-manager/app/config.go b/cmd/controller-manager/app/config.go new file mode 100644 index 00000000000..d97adc127aa --- /dev/null +++ b/cmd/controller-manager/app/config.go @@ -0,0 +1,60 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + "k8s.io/kubernetes/pkg/apis/componentconfig" +) + +// Config is the main context object for the controller manager. +type Config struct { + // TODO: split up the component config. This is not generic. + ComponentConfig componentconfig.KubeControllerManagerConfiguration + + InsecureServing *InsecureServingInfo + + // the general kube client + Client *clientset.Clientset + + // the client only used for leader election + LeaderElectionClient *clientset.Clientset + + // the rest config for the master + Kubeconfig *restclient.Config + + // the event sink + EventRecorder record.EventRecorder +} + +type completedConfig struct { + *Config +} + +// CompletedConfig same as Config, just to swap private object. +type CompletedConfig struct { + // Embed a private pointer that cannot be instantiated outside of this package. + *completedConfig +} + +// Complete fills in any fields not set that are required to have valid data. It's mutating the receiver. +func (c *Config) Complete() CompletedConfig { + cc := completedConfig{c} + return CompletedConfig{&cc} +} diff --git a/cmd/controller-manager/app/insecure_serving.go b/cmd/controller-manager/app/insecure_serving.go new file mode 100644 index 00000000000..d374a92e448 --- /dev/null +++ b/cmd/controller-manager/app/insecure_serving.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "net" + "net/http" + "time" + + "github.com/golang/glog" + + "k8s.io/apiserver/pkg/server" +) + +// InsecureServingInfo is the main context object for the insecure http server. +type InsecureServingInfo struct { + // Listener is the secure server network listener. + Listener net.Listener +} + +// Serve starts an insecure http server with the given handler. It fails only if +// the initial listen call fails. It does not block. +func (s *InsecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Duration, stopCh <-chan struct{}) error { + insecureServer := &http.Server{ + Addr: s.Listener.Addr().String(), + Handler: handler, + MaxHeaderBytes: 1 << 20, + } + + glog.Infof("Serving insecurely on %s", s.Listener.Addr()) + return server.RunServer(insecureServer, s.Listener, shutdownTimeout, stopCh) +} diff --git a/cmd/controller-manager/app/options/insecure_serving.go b/cmd/controller-manager/app/options/insecure_serving.go new file mode 100644 index 00000000000..d93c182c951 --- /dev/null +++ b/cmd/controller-manager/app/options/insecure_serving.go @@ -0,0 +1,112 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "net" + + "github.com/spf13/pflag" + "k8s.io/apiserver/pkg/server/options" + genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app" + "k8s.io/kubernetes/pkg/apis/componentconfig" +) + +// InsecureServingOptions are for creating an unauthenticated, unauthorized, insecure port. +// No one should be using these anymore. +type InsecureServingOptions struct { + BindAddress net.IP + BindPort int + // BindNetwork is the type of network to bind to - defaults to "tcp", accepts "tcp", + // "tcp4", and "tcp6". + BindNetwork string + + // Listener is the secure server network listener. + // either Listener or BindAddress/BindPort/BindNetwork is set, + // if Listener is set, use it and omit BindAddress/BindPort/BindNetwork. + Listener net.Listener +} + +// Validate ensures that the insecure port values within the range of the port. +func (s *InsecureServingOptions) Validate() []error { + errors := []error{} + + if s == nil { + return nil + } + + if s.BindPort < 0 || s.BindPort > 32767 { + errors = append(errors, fmt.Errorf("--insecure-port %v must be between 0 and 32767, inclusive. 0 for turning off insecure (HTTP) port", s.BindPort)) + } + + return errors +} + +// AddFlags adds flags related to insecure serving for controller manager to the specified FlagSet. +func (s *InsecureServingOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } +} + +// AddDeprecatedFlags adds deprecated flags related to insecure serving for controller manager to the specified FlagSet. +// TODO: remove it until kops stop using `--address` +func (s *InsecureServingOptions) AddDeprecatedFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + fs.IPVar(&s.BindAddress, "address", s.BindAddress, + "DEPRECATED: the IP address on which to listen for the --port port. See --bind-address instead.") + // MarkDeprecated hides the flag from the help. We don't want that: + // fs.MarkDeprecated("address", "see --bind-address instead.") + + fs.IntVar(&s.BindPort, "port", s.BindPort, "DEPRECATED: the port on which to serve HTTP insecurely without authentication and authorization. If 0, don't serve HTTPS at all. See --secure-port instead.") + // MarkDeprecated hides the flag from the help. We don't want that: + // fs.MarkDeprecated("port", "see --secure-port instead.") +} + +// ApplyTo adds InsecureServingOptions to the insecureserverinfo amd kube-controller manager configuration. +// Note: the double pointer allows to set the *InsecureServingInfo to nil without referencing the struct hosting this pointer. +func (s *InsecureServingOptions) ApplyTo(c **genericcontrollermanager.InsecureServingInfo, cfg *componentconfig.KubeControllerManagerConfiguration) error { + if s == nil { + return nil + } + if s.BindPort <= 0 { + return nil + } + + if s.Listener == nil { + var err error + addr := net.JoinHostPort(s.BindAddress.String(), fmt.Sprintf("%d", s.BindPort)) + s.Listener, s.BindPort, err = options.CreateListener(s.BindNetwork, addr) + if err != nil { + return fmt.Errorf("failed to create listener: %v", err) + } + } + + *c = &genericcontrollermanager.InsecureServingInfo{ + Listener: s.Listener, + } + + // sync back to component config + // TODO: find more elegant way than synching back the values. + cfg.Port = int32(s.BindPort) + cfg.Address = s.BindAddress.String() + + return nil +} diff --git a/cmd/controller-manager/app/options/options.go b/cmd/controller-manager/app/options/options.go new file mode 100644 index 00000000000..5d1834c106c --- /dev/null +++ b/cmd/controller-manager/app/options/options.go @@ -0,0 +1,214 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "net" + "time" + + "github.com/cloudflare/cfssl/helpers" + "github.com/golang/glog" + + "github.com/spf13/pflag" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + clientset "k8s.io/client-go/kubernetes" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/record" + genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/componentconfig" + "k8s.io/kubernetes/pkg/client/leaderelectionconfig" +) + +// GenericControllerManagerOptions is the common structure for a controller manager. It works with NewGenericControllerManagerOptions +// and AddDefaultControllerFlags to create the common components of kube-controller-manager and cloud-controller-manager. +type GenericControllerManagerOptions struct { + // TODO: turn ComponentConfig into modular option structs. This is not generic. + ComponentConfig componentconfig.KubeControllerManagerConfiguration + + InsecureServing *InsecureServingOptions + Master string + Kubeconfig string +} + +const ( + // These defaults are deprecated and exported so that we can warn if + // they are being used. + + // DefaultClusterSigningCertFile is deprecated. Do not use. + DefaultClusterSigningCertFile = "/etc/kubernetes/ca/ca.pem" + // DefaultClusterSigningKeyFile is deprecated. Do not use. + DefaultClusterSigningKeyFile = "/etc/kubernetes/ca/ca.key" +) + +// NewGenericControllerManagerOptions returns common/default configuration values for both +// the kube-controller-manager and the cloud-contoller-manager. Any common changes should +// be made here. Any individual changes should be made in that controller. +func NewGenericControllerManagerOptions(componentConfig componentconfig.KubeControllerManagerConfiguration) GenericControllerManagerOptions { + o := GenericControllerManagerOptions{ + ComponentConfig: componentConfig, + InsecureServing: &InsecureServingOptions{ + BindAddress: net.ParseIP(componentConfig.Address), + BindPort: int(componentConfig.Port), + BindNetwork: "tcp", + }, + } + + return o +} + +// NewDefaultControllerManagerComponentConfig returns default kube-controller manager configuration object. +func NewDefaultControllerManagerComponentConfig(insecurePort int32) componentconfig.KubeControllerManagerConfiguration { + return componentconfig.KubeControllerManagerConfiguration{ + Controllers: []string{"*"}, + Port: insecurePort, + Address: "0.0.0.0", + ConcurrentEndpointSyncs: 5, + ConcurrentServiceSyncs: 1, + ConcurrentRCSyncs: 5, + ConcurrentRSSyncs: 5, + ConcurrentDaemonSetSyncs: 2, + ConcurrentJobSyncs: 5, + ConcurrentResourceQuotaSyncs: 5, + ConcurrentDeploymentSyncs: 5, + ConcurrentNamespaceSyncs: 10, + ConcurrentSATokenSyncs: 5, + RouteReconciliationPeriod: metav1.Duration{Duration: 10 * time.Second}, + ResourceQuotaSyncPeriod: metav1.Duration{Duration: 5 * time.Minute}, + NamespaceSyncPeriod: metav1.Duration{Duration: 5 * time.Minute}, + PVClaimBinderSyncPeriod: metav1.Duration{Duration: 15 * time.Second}, + HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, + HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 3 * time.Minute}, + HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 5 * time.Minute}, + HorizontalPodAutoscalerTolerance: 0.1, + DeploymentControllerSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, + MinResyncPeriod: metav1.Duration{Duration: 12 * time.Hour}, + RegisterRetryCount: 10, + PodEvictionTimeout: metav1.Duration{Duration: 5 * time.Minute}, + NodeMonitorGracePeriod: metav1.Duration{Duration: 40 * time.Second}, + NodeStartupGracePeriod: metav1.Duration{Duration: 60 * time.Second}, + NodeMonitorPeriod: metav1.Duration{Duration: 5 * time.Second}, + ClusterName: "kubernetes", + NodeCIDRMaskSize: 24, + ConfigureCloudRoutes: true, + TerminatedPodGCThreshold: 12500, + VolumeConfiguration: componentconfig.VolumeConfiguration{ + EnableHostPathProvisioning: false, + EnableDynamicProvisioning: true, + PersistentVolumeRecyclerConfiguration: componentconfig.PersistentVolumeRecyclerConfiguration{ + MaximumRetry: 3, + MinimumTimeoutNFS: 300, + IncrementTimeoutNFS: 30, + MinimumTimeoutHostPath: 60, + IncrementTimeoutHostPath: 30, + }, + FlexVolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/", + }, + ContentType: "application/vnd.kubernetes.protobuf", + KubeAPIQPS: 20.0, + KubeAPIBurst: 30, + LeaderElection: leaderelectionconfig.DefaultLeaderElectionConfiguration(), + ControllerStartInterval: metav1.Duration{Duration: 0 * time.Second}, + EnableGarbageCollector: true, + ConcurrentGCSyncs: 20, + ClusterSigningCertFile: DefaultClusterSigningCertFile, + ClusterSigningKeyFile: DefaultClusterSigningKeyFile, + ClusterSigningDuration: metav1.Duration{Duration: helpers.OneYear}, + ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 60 * time.Second}, + EnableTaintManager: true, + HorizontalPodAutoscalerUseRESTClients: true, + } +} + +// AddFlags adds common/default flags for both the kube and cloud Controller Manager Server to the +// specified FlagSet. Any common changes should be made here. Any individual changes should be made in that controller. +func (o *GenericControllerManagerOptions) AddFlags(fs *pflag.FlagSet) { + fs.BoolVar(&o.ComponentConfig.UseServiceAccountCredentials, "use-service-account-credentials", o.ComponentConfig.UseServiceAccountCredentials, "If true, use individual service account credentials for each controller.") + fs.StringVar(&o.ComponentConfig.CloudConfigFile, "cloud-config", o.ComponentConfig.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.") + fs.BoolVar(&o.ComponentConfig.AllowUntaggedCloud, "allow-untagged-cloud", false, "Allow the cluster to run without the cluster-id on cloud instances. This is a legacy mode of operation and a cluster-id will be required in the future.") + fs.MarkDeprecated("allow-untagged-cloud", "This flag is deprecated and will be removed in a future release. A cluster-id will be required on cloud instances.") + fs.DurationVar(&o.ComponentConfig.RouteReconciliationPeriod.Duration, "route-reconciliation-period", o.ComponentConfig.RouteReconciliationPeriod.Duration, "The period for reconciling routes created for Nodes by cloud provider.") + fs.DurationVar(&o.ComponentConfig.MinResyncPeriod.Duration, "min-resync-period", o.ComponentConfig.MinResyncPeriod.Duration, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod.") + fs.DurationVar(&o.ComponentConfig.NodeMonitorPeriod.Duration, "node-monitor-period", o.ComponentConfig.NodeMonitorPeriod.Duration, + "The period for syncing NodeStatus in NodeController.") + fs.BoolVar(&o.ComponentConfig.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/") + fs.BoolVar(&o.ComponentConfig.EnableContentionProfiling, "contention-profiling", false, "Enable lock contention profiling, if profiling is enabled.") + fs.StringVar(&o.ComponentConfig.ClusterName, "cluster-name", o.ComponentConfig.ClusterName, "The instance prefix for the cluster.") + fs.StringVar(&o.ComponentConfig.ClusterCIDR, "cluster-cidr", o.ComponentConfig.ClusterCIDR, "CIDR Range for Pods in cluster. Requires --allocate-node-cidrs to be true") + fs.BoolVar(&o.ComponentConfig.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.") + fs.StringVar(&o.ComponentConfig.CIDRAllocatorType, "cidr-allocator-type", "RangeAllocator", "Type of CIDR allocator to use") + fs.BoolVar(&o.ComponentConfig.ConfigureCloudRoutes, "configure-cloud-routes", true, "Should CIDRs allocated by allocate-node-cidrs be configured on the cloud provider.") + fs.StringVar(&o.Master, "master", o.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig).") + fs.StringVar(&o.Kubeconfig, "kubeconfig", o.Kubeconfig, "Path to kubeconfig file with authorization and master location information.") + fs.StringVar(&o.ComponentConfig.ContentType, "kube-api-content-type", o.ComponentConfig.ContentType, "Content type of requests sent to apiserver.") + fs.Float32Var(&o.ComponentConfig.KubeAPIQPS, "kube-api-qps", o.ComponentConfig.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver.") + fs.Int32Var(&o.ComponentConfig.KubeAPIBurst, "kube-api-burst", o.ComponentConfig.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver.") + fs.DurationVar(&o.ComponentConfig.ControllerStartInterval.Duration, "controller-start-interval", o.ComponentConfig.ControllerStartInterval.Duration, "Interval between starting controller managers.") + + o.InsecureServing.AddFlags(fs) + o.InsecureServing.AddDeprecatedFlags(fs) +} + +// ApplyTo fills up controller manager config with options and userAgent +func (o *GenericControllerManagerOptions) ApplyTo(c *genericcontrollermanager.Config, userAgent string) error { + c.ComponentConfig = o.ComponentConfig + + if err := o.InsecureServing.ApplyTo(&c.InsecureServing, &c.ComponentConfig); err != nil { + return err + } + + var err error + c.Kubeconfig, err = clientcmd.BuildConfigFromFlags(o.Master, o.Kubeconfig) + if err != nil { + return err + } + c.Kubeconfig.ContentConfig.ContentType = o.ComponentConfig.ContentType + c.Kubeconfig.QPS = o.ComponentConfig.KubeAPIQPS + c.Kubeconfig.Burst = int(o.ComponentConfig.KubeAPIBurst) + + c.Client, err = clientset.NewForConfig(restclient.AddUserAgent(c.Kubeconfig, userAgent)) + if err != nil { + return err + } + + c.LeaderElectionClient = clientset.NewForConfigOrDie(restclient.AddUserAgent(c.Kubeconfig, "leader-election")) + + c.EventRecorder = createRecorder(c.Client, userAgent) + + return nil +} + +// Validate checks GenericControllerManagerOptions and return a slice of found errors. +func (o *GenericControllerManagerOptions) Validate() []error { + errors := []error{} + errors = append(errors, o.InsecureServing.Validate()...) + + // TODO: validate component config, master and kubeconfig + + return errors +} + +func createRecorder(kubeClient *kubernetes.Clientset, userAgent string) record.EventRecorder { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) + return eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: userAgent}) +} diff --git a/cmd/controller-manager/app/options/utils.go b/cmd/controller-manager/app/options/utils.go deleted file mode 100644 index 196e4df2bc7..00000000000 --- a/cmd/controller-manager/app/options/utils.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package options - -import ( - "github.com/cloudflare/cfssl/helpers" - "time" - - "github.com/spf13/pflag" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/apis/componentconfig" - "k8s.io/kubernetes/pkg/client/leaderelectionconfig" -) - -// ControllerManagerServer is the common structure for a controller manager. It works with GetDefaultControllerOptions -// and AddDefaultControllerFlags to create the common components of kube-controller-manager and cloud-controller-manager. -type ControllerManagerServer struct { - componentconfig.KubeControllerManagerConfiguration - - Master string - Kubeconfig string -} - -const ( - // These defaults are deprecated and exported so that we can warn if - // they are being used. - - // DefaultClusterSigningCertFile is deprecated. Do not use. - DefaultClusterSigningCertFile = "/etc/kubernetes/ca/ca.pem" - // DefaultClusterSigningKeyFile is deprecated. Do not use. - DefaultClusterSigningKeyFile = "/etc/kubernetes/ca/ca.key" -) - -// GetDefaultControllerOptions returns common/default configuration values for both -// the kube-controller-manager and the cloud-contoller-manager. Any common changes should -// be made here. Any individual changes should be made in that controller. -func GetDefaultControllerOptions(port int32) componentconfig.KubeControllerManagerConfiguration { - return componentconfig.KubeControllerManagerConfiguration{ - Controllers: []string{"*"}, - Port: port, - Address: "0.0.0.0", - ConcurrentEndpointSyncs: 5, - ConcurrentServiceSyncs: 1, - ConcurrentRCSyncs: 5, - ConcurrentRSSyncs: 5, - ConcurrentDaemonSetSyncs: 2, - ConcurrentJobSyncs: 5, - ConcurrentResourceQuotaSyncs: 5, - ConcurrentDeploymentSyncs: 5, - ConcurrentNamespaceSyncs: 10, - ConcurrentSATokenSyncs: 5, - RouteReconciliationPeriod: metav1.Duration{Duration: 10 * time.Second}, - ResourceQuotaSyncPeriod: metav1.Duration{Duration: 5 * time.Minute}, - NamespaceSyncPeriod: metav1.Duration{Duration: 5 * time.Minute}, - PVClaimBinderSyncPeriod: metav1.Duration{Duration: 15 * time.Second}, - HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, - HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 3 * time.Minute}, - HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 5 * time.Minute}, - HorizontalPodAutoscalerTolerance: 0.1, - DeploymentControllerSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, - MinResyncPeriod: metav1.Duration{Duration: 12 * time.Hour}, - RegisterRetryCount: 10, - PodEvictionTimeout: metav1.Duration{Duration: 5 * time.Minute}, - NodeMonitorGracePeriod: metav1.Duration{Duration: 40 * time.Second}, - NodeStartupGracePeriod: metav1.Duration{Duration: 60 * time.Second}, - NodeMonitorPeriod: metav1.Duration{Duration: 5 * time.Second}, - ClusterName: "kubernetes", - NodeCIDRMaskSize: 24, - ConfigureCloudRoutes: true, - TerminatedPodGCThreshold: 12500, - VolumeConfiguration: componentconfig.VolumeConfiguration{ - EnableHostPathProvisioning: false, - EnableDynamicProvisioning: true, - PersistentVolumeRecyclerConfiguration: componentconfig.PersistentVolumeRecyclerConfiguration{ - MaximumRetry: 3, - MinimumTimeoutNFS: 300, - IncrementTimeoutNFS: 30, - MinimumTimeoutHostPath: 60, - IncrementTimeoutHostPath: 30, - }, - FlexVolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/", - }, - ContentType: "application/vnd.kubernetes.protobuf", - KubeAPIQPS: 20.0, - KubeAPIBurst: 30, - LeaderElection: leaderelectionconfig.DefaultLeaderElectionConfiguration(), - ControllerStartInterval: metav1.Duration{Duration: 0 * time.Second}, - EnableGarbageCollector: true, - ConcurrentGCSyncs: 20, - ClusterSigningCertFile: DefaultClusterSigningCertFile, - ClusterSigningKeyFile: DefaultClusterSigningKeyFile, - ClusterSigningDuration: metav1.Duration{Duration: helpers.OneYear}, - ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 60 * time.Second}, - EnableTaintManager: true, - HorizontalPodAutoscalerUseRESTClients: true, - } -} - -// AddDefaultControllerFlags adds common/default flags for both the kube and cloud Controller Manager Server to the -// specified FlagSet. Any common changes should be made here. Any individual changes should be made in that controller. -func AddDefaultControllerFlags(s *ControllerManagerServer, fs *pflag.FlagSet) { - fs.Int32Var(&s.Port, "port", s.Port, "The port that the controller-manager's http service runs on.") - fs.Var(componentconfig.IPVar{Val: &s.Address}, "address", "The IP address to serve on (set to 0.0.0.0 for all interfaces).") - fs.BoolVar(&s.UseServiceAccountCredentials, "use-service-account-credentials", s.UseServiceAccountCredentials, "If true, use individual service account credentials for each controller.") - fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.") - fs.BoolVar(&s.AllowUntaggedCloud, "allow-untagged-cloud", false, "Allow the cluster to run without the cluster-id on cloud instances. This is a legacy mode of operation and a cluster-id will be required in the future.") - fs.MarkDeprecated("allow-untagged-cloud", "This flag is deprecated and will be removed in a future release. A cluster-id will be required on cloud instances.") - fs.DurationVar(&s.RouteReconciliationPeriod.Duration, "route-reconciliation-period", s.RouteReconciliationPeriod.Duration, "The period for reconciling routes created for Nodes by cloud provider.") - fs.DurationVar(&s.MinResyncPeriod.Duration, "min-resync-period", s.MinResyncPeriod.Duration, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod.") - fs.DurationVar(&s.NodeMonitorPeriod.Duration, "node-monitor-period", s.NodeMonitorPeriod.Duration, - "The period for syncing NodeStatus in NodeController.") - fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/") - fs.BoolVar(&s.EnableContentionProfiling, "contention-profiling", false, "Enable lock contention profiling, if profiling is enabled.") - fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster.") - fs.StringVar(&s.ClusterCIDR, "cluster-cidr", s.ClusterCIDR, "CIDR Range for Pods in cluster. Requires --allocate-node-cidrs to be true") - fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.") - fs.StringVar(&s.CIDRAllocatorType, "cidr-allocator-type", "RangeAllocator", "Type of CIDR allocator to use") - fs.BoolVar(&s.ConfigureCloudRoutes, "configure-cloud-routes", true, "Should CIDRs allocated by allocate-node-cidrs be configured on the cloud provider.") - fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig).") - fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.") - fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "Content type of requests sent to apiserver.") - fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver.") - fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver.") - fs.DurationVar(&s.ControllerStartInterval.Duration, "controller-start-interval", s.ControllerStartInterval.Duration, "Interval between starting controller managers.") -} diff --git a/cmd/controller-manager/app/serve.go b/cmd/controller-manager/app/serve.go new file mode 100644 index 00000000000..93c8aad43d1 --- /dev/null +++ b/cmd/controller-manager/app/serve.go @@ -0,0 +1,51 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "net/http" + "net/http/pprof" + goruntime "runtime" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/kubernetes/pkg/util/configz" +) + +type serveFunc func(handler http.Handler, shutdownTimeout time.Duration, stopCh <-chan struct{}) error + +// Serve creates a base handler chain for a controller manager. It runs the +// the chain with the given serveFunc. +func Serve(c *CompletedConfig, serveFunc serveFunc, stopCh <-chan struct{}) error { + mux := http.NewServeMux() + healthz.InstallHandler(mux) + if c.ComponentConfig.EnableProfiling { + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + if c.ComponentConfig.EnableContentionProfiling { + goruntime.SetBlockProfileRate(1) + } + } + configz.InstallHandler(mux) + mux.Handle("/metrics", prometheus.Handler()) + + return serveFunc(mux, 0, stopCh) +} diff --git a/cmd/kube-controller-manager/app/autoscaling.go b/cmd/kube-controller-manager/app/autoscaling.go index 43c1cd1ab00..1f9532b73a4 100644 --- a/cmd/kube-controller-manager/app/autoscaling.go +++ b/cmd/kube-controller-manager/app/autoscaling.go @@ -38,7 +38,7 @@ func startHPAController(ctx ControllerContext) (bool, error) { return false, nil } - if ctx.Options.HorizontalPodAutoscalerUseRESTClients { + if ctx.ComponentConfig.HorizontalPodAutoscalerUseRESTClients { // use the new-style clients if support for custom metrics is enabled return startHPAControllerWithRESTClient(ctx) } @@ -88,7 +88,7 @@ func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient me replicaCalc := podautoscaler.NewReplicaCalculator( metricsClient, hpaClient.CoreV1(), - ctx.Options.HorizontalPodAutoscalerTolerance, + ctx.ComponentConfig.HorizontalPodAutoscalerTolerance, ) go podautoscaler.NewHorizontalController( hpaClientGoClient.CoreV1(), @@ -97,9 +97,9 @@ func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient me restMapper, replicaCalc, ctx.InformerFactory.Autoscaling().V1().HorizontalPodAutoscalers(), - ctx.Options.HorizontalPodAutoscalerSyncPeriod.Duration, - ctx.Options.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, - ctx.Options.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, + ctx.ComponentConfig.HorizontalPodAutoscalerSyncPeriod.Duration, + ctx.ComponentConfig.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, + ctx.ComponentConfig.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, ).Run(ctx.Stop) return true, nil } diff --git a/cmd/kube-controller-manager/app/batch.go b/cmd/kube-controller-manager/app/batch.go index b60d7c149e5..31340eadfdf 100644 --- a/cmd/kube-controller-manager/app/batch.go +++ b/cmd/kube-controller-manager/app/batch.go @@ -36,7 +36,7 @@ func startJobController(ctx ControllerContext) (bool, error) { ctx.InformerFactory.Core().V1().Pods(), ctx.InformerFactory.Batch().V1().Jobs(), ctx.ClientBuilder.ClientOrDie("job-controller"), - ).Run(int(ctx.Options.ConcurrentJobSyncs), ctx.Stop) + ).Run(int(ctx.ComponentConfig.ConcurrentJobSyncs), ctx.Stop) return true, nil } diff --git a/cmd/kube-controller-manager/app/certificates.go b/cmd/kube-controller-manager/app/certificates.go index 6c1531cae5b..98e2f2ce49e 100644 --- a/cmd/kube-controller-manager/app/certificates.go +++ b/cmd/kube-controller-manager/app/certificates.go @@ -37,7 +37,7 @@ func startCSRSigningController(ctx ControllerContext) (bool, error) { if !ctx.AvailableResources[schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}] { return false, nil } - if ctx.Options.ClusterSigningCertFile == "" || ctx.Options.ClusterSigningKeyFile == "" { + if ctx.ComponentConfig.ClusterSigningCertFile == "" || ctx.ComponentConfig.ClusterSigningKeyFile == "" { return false, nil } @@ -52,15 +52,15 @@ func startCSRSigningController(ctx ControllerContext) (bool, error) { // bail out of startController without logging. var keyFileExists, keyUsesDefault, certFileExists, certUsesDefault bool - _, err := os.Stat(ctx.Options.ClusterSigningCertFile) + _, err := os.Stat(ctx.ComponentConfig.ClusterSigningCertFile) certFileExists = !os.IsNotExist(err) - certUsesDefault = (ctx.Options.ClusterSigningCertFile == cmoptions.DefaultClusterSigningCertFile) + certUsesDefault = (ctx.ComponentConfig.ClusterSigningCertFile == cmoptions.DefaultClusterSigningCertFile) - _, err = os.Stat(ctx.Options.ClusterSigningKeyFile) + _, err = os.Stat(ctx.ComponentConfig.ClusterSigningKeyFile) keyFileExists = !os.IsNotExist(err) - keyUsesDefault = (ctx.Options.ClusterSigningKeyFile == cmoptions.DefaultClusterSigningKeyFile) + keyUsesDefault = (ctx.ComponentConfig.ClusterSigningKeyFile == cmoptions.DefaultClusterSigningKeyFile) switch { case (keyFileExists && keyUsesDefault) || (certFileExists && certUsesDefault): @@ -84,9 +84,9 @@ func startCSRSigningController(ctx ControllerContext) (bool, error) { signer, err := signer.NewCSRSigningController( c, ctx.InformerFactory.Certificates().V1beta1().CertificateSigningRequests(), - ctx.Options.ClusterSigningCertFile, - ctx.Options.ClusterSigningKeyFile, - ctx.Options.ClusterSigningDuration.Duration, + ctx.ComponentConfig.ClusterSigningCertFile, + ctx.ComponentConfig.ClusterSigningKeyFile, + ctx.ComponentConfig.ClusterSigningDuration.Duration, ) if err != nil { return false, fmt.Errorf("failed to start certificate controller: %v", err) diff --git a/cmd/kube-controller-manager/app/config/config.go b/cmd/kube-controller-manager/app/config/config.go new file mode 100644 index 00000000000..4eb9c3ff812 --- /dev/null +++ b/cmd/kube-controller-manager/app/config/config.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "time" + + genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app" +) + +// ExtraConfig are part of Config, also can place your custom config here. +type ExtraConfig struct { + NodeStatusUpdateFrequency time.Duration +} + +// Config is the main context object for the controller manager. +type Config struct { + Generic genericcontrollermanager.Config + Extra ExtraConfig +} + +type completedConfig struct { + Generic genericcontrollermanager.CompletedConfig + Extra *ExtraConfig +} + +// CompletedConfig same as Config, just to swap private object. +type CompletedConfig struct { + // Embed a private pointer that cannot be instantiated outside of this package. + *completedConfig +} + +// Complete fills in any fields not set that are required to have valid data. It's mutating the receiver. +func (c *Config) Complete() *CompletedConfig { + cc := completedConfig{ + c.Generic.Complete(), + &c.Extra, + } + + return &CompletedConfig{&cc} +} diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index dab77f9f809..5b321c422ff 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -24,46 +24,34 @@ import ( "fmt" "io/ioutil" "math/rand" - "net" "net/http" - "net/http/pprof" "os" - goruntime "runtime" - "strconv" "time" + "github.com/golang/glog" + "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" - - "k8s.io/apiserver/pkg/server/healthz" - - "k8s.io/api/core/v1" "k8s.io/client-go/discovery" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/record" - certutil "k8s.io/client-go/util/cert" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" + certutil "k8s.io/client-go/util/cert" + genericcontrollerconfig "k8s.io/kubernetes/cmd/controller-manager/app" + "k8s.io/kubernetes/cmd/kube-controller-manager/app/config" "k8s.io/kubernetes/cmd/kube-controller-manager/app/options" - "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller" serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount" "k8s.io/kubernetes/pkg/serviceaccount" "k8s.io/kubernetes/pkg/util/configz" "k8s.io/kubernetes/pkg/version" - - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "github.com/spf13/cobra" - "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/version/verflag" ) @@ -81,7 +69,7 @@ const ( // NewControllerManagerCommand creates a *cobra.Command object with default parameters func NewControllerManagerCommand() *cobra.Command { - s := options.NewCMServer() + s := options.NewKubeControllerManagerOptions() cmd := &cobra.Command{ Use: "kube-controller-manager", Long: `The Kubernetes controller manager is a daemon that embeds @@ -94,7 +82,17 @@ Kubernetes today are the replication controller, endpoints controller, namespace controller, and serviceaccounts controller.`, Run: func(cmd *cobra.Command, args []string) { verflag.PrintAndExitIfRequested() - Run(s) + + c, err := s.Config(KnownControllers(), ControllersDisabledByDefault.List()) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } + + if err := Run(c.Complete()); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } }, } s.AddFlags(cmd.Flags(), KnownControllers(), ControllersDisabledByDefault.List()) @@ -105,59 +103,53 @@ controller, and serviceaccounts controller.`, // ResyncPeriod returns a function which generates a duration each time it is // invoked; this is so that multiple controllers don't get into lock-step and all // hammer the apiserver with list requests simultaneously. -func ResyncPeriod(s *options.CMServer) func() time.Duration { +func ResyncPeriod(c *config.CompletedConfig) func() time.Duration { return func() time.Duration { factor := rand.Float64() + 1 - return time.Duration(float64(s.MinResyncPeriod.Nanoseconds()) * factor) + return time.Duration(float64(c.Generic.ComponentConfig.MinResyncPeriod.Nanoseconds()) * factor) } } -// Run runs the CMServer. This should never exit. -func Run(s *options.CMServer) error { +// Run runs the KubeControllerManagerOptions. This should never exit. +func Run(c *config.CompletedConfig) error { // To help debugging, immediately log version glog.Infof("Version: %+v", version.Get()) - if err := s.Validate(KnownControllers(), ControllersDisabledByDefault.List()); err != nil { - return err - } - if c, err := configz.New("componentconfig"); err == nil { - c.Set(s.KubeControllerManagerConfiguration) + if cfgz, err := configz.New("componentconfig"); err == nil { + cfgz.Set(c.Generic.ComponentConfig) } else { - glog.Errorf("unable to register configz: %s", err) + glog.Errorf("unable to register configz: %c", err) } - kubeClient, leaderElectionClient, kubeconfig, err := createClients(s) - if err != nil { - return err + // Start the controller manager HTTP server + stopCh := make(chan struct{}) + if c.Generic.InsecureServing != nil { + if err := genericcontrollerconfig.Serve(&c.Generic, c.Generic.InsecureServing.Serve, stopCh); err != nil { + return err + } } - if s.Port >= 0 { - go startHTTP(s) - } - - recorder := createRecorder(kubeClient) - run := func(stop <-chan struct{}) { rootClientBuilder := controller.SimpleControllerClientBuilder{ - ClientConfig: kubeconfig, + ClientConfig: c.Generic.Kubeconfig, } var clientBuilder controller.ControllerClientBuilder - if s.UseServiceAccountCredentials { - if len(s.ServiceAccountKeyFile) == 0 { - // It's possible another controller process is creating the tokens for us. + if c.Generic.ComponentConfig.UseServiceAccountCredentials { + if len(c.Generic.ComponentConfig.ServiceAccountKeyFile) == 0 { + // It'c possible another controller process is creating the tokens for us. // If one isn't, we'll timeout and exit when our client builder is unable to create the tokens. glog.Warningf("--use-service-account-credentials was specified without providing a --service-account-private-key-file") } clientBuilder = controller.SAControllerClientBuilder{ - ClientConfig: restclient.AnonymousClientConfig(kubeconfig), - CoreClient: kubeClient.CoreV1(), - AuthenticationClient: kubeClient.AuthenticationV1(), + ClientConfig: restclient.AnonymousClientConfig(c.Generic.Kubeconfig), + CoreClient: c.Generic.Client.CoreV1(), + AuthenticationClient: c.Generic.Client.AuthenticationV1(), Namespace: "kube-system", } } else { clientBuilder = rootClientBuilder } - ctx, err := CreateControllerContext(s, rootClientBuilder, clientBuilder, stop) + ctx, err := CreateControllerContext(c, rootClientBuilder, clientBuilder, stop) if err != nil { glog.Fatalf("error building controller context: %v", err) } @@ -173,7 +165,7 @@ func Run(s *options.CMServer) error { select {} } - if !s.LeaderElection.LeaderElect { + if !c.Generic.ComponentConfig.LeaderElection.LeaderElect { run(wait.NeverStop) panic("unreachable") } @@ -182,16 +174,16 @@ func Run(s *options.CMServer) error { if err != nil { return err } + // add a uniquifier so that two processes on the same host don't accidentally both become active id = id + "_" + string(uuid.NewUUID()) - - rl, err := resourcelock.New(s.LeaderElection.ResourceLock, + rl, err := resourcelock.New(c.Generic.ComponentConfig.LeaderElection.ResourceLock, "kube-system", "kube-controller-manager", - leaderElectionClient.CoreV1(), + c.Generic.LeaderElectionClient.CoreV1(), resourcelock.ResourceLockConfig{ Identity: id, - EventRecorder: recorder, + EventRecorder: c.Generic.EventRecorder, }) if err != nil { glog.Fatalf("error creating lock: %v", err) @@ -199,9 +191,9 @@ func Run(s *options.CMServer) error { leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{ Lock: rl, - LeaseDuration: s.LeaderElection.LeaseDuration.Duration, - RenewDeadline: s.LeaderElection.RenewDeadline.Duration, - RetryPeriod: s.LeaderElection.RetryPeriod.Duration, + LeaseDuration: c.Generic.ComponentConfig.LeaderElection.LeaseDuration.Duration, + RenewDeadline: c.Generic.ComponentConfig.LeaderElection.RenewDeadline.Duration, + RetryPeriod: c.Generic.ComponentConfig.LeaderElection.RetryPeriod.Duration, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: run, OnStoppedLeading: func() { @@ -212,53 +204,6 @@ func Run(s *options.CMServer) error { panic("unreachable") } -func startHTTP(s *options.CMServer) { - mux := http.NewServeMux() - healthz.InstallHandler(mux) - if s.EnableProfiling { - mux.HandleFunc("/debug/pprof/", pprof.Index) - mux.HandleFunc("/debug/pprof/profile", pprof.Profile) - mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) - mux.HandleFunc("/debug/pprof/trace", pprof.Trace) - if s.EnableContentionProfiling { - goruntime.SetBlockProfileRate(1) - } - } - configz.InstallHandler(mux) - mux.Handle("/metrics", prometheus.Handler()) - - server := &http.Server{ - Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))), - Handler: mux, - } - glog.Fatal(server.ListenAndServe()) -} - -func createRecorder(kubeClient *clientset.Clientset) record.EventRecorder { - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) - return eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "controller-manager"}) -} - -func createClients(s *options.CMServer) (*clientset.Clientset, *clientset.Clientset, *restclient.Config, error) { - kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig) - if err != nil { - return nil, nil, nil, err - } - - kubeconfig.ContentConfig.ContentType = s.ContentType - // Override kubeconfig qps/burst settings from flags - kubeconfig.QPS = s.KubeAPIQPS - kubeconfig.Burst = int(s.KubeAPIBurst) - kubeClient, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "controller-manager")) - if err != nil { - glog.Fatalf("Invalid API configuration: %v", err) - } - leaderElectionClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election")) - return kubeClient, leaderElectionClient, kubeconfig, nil -} - type ControllerContext struct { // ClientBuilder will provide a client for this controller to use ClientBuilder controller.ControllerClientBuilder @@ -267,7 +212,7 @@ type ControllerContext struct { InformerFactory informers.SharedInformerFactory // Options provides access to init options for a given controller - Options options.CMServer + ComponentConfig componentconfig.KubeControllerManagerConfiguration // AvailableResources is a map listing currently available resources AvailableResources map[schema.GroupVersionResource]bool @@ -287,10 +232,15 @@ type ControllerContext struct { // InformersStarted is closed after all of the controllers have been initialized and are running. After this point it is safe, // for an individual controller to start the shared informers. Before it is closed, they should not. InformersStarted chan struct{} + + // ResyncPeriod generates a duration each time it is invoked; this is so that + // multiple controllers don't get into lock-step and all hammer the apiserver + // with list requests simultaneously. + ResyncPeriod func() time.Duration } func (c ControllerContext) IsControllerEnabled(name string) bool { - return IsControllerEnabled(name, ControllersDisabledByDefault, c.Options.Controllers...) + return IsControllerEnabled(name, ControllersDisabledByDefault, c.ComponentConfig.Controllers...) } func IsControllerEnabled(name string, disabledByDefaultControllers sets.String, controllers ...string) bool { @@ -446,7 +396,7 @@ func GetAvailableResources(clientBuilder controller.ControllerClientBuilder) (ma // CreateControllerContext creates a context struct containing references to resources needed by the // controllers such as the cloud provider and clientBuilder. rootClientBuilder is only used for // the shared-informers client and token controller. -func CreateControllerContext(s *options.CMServer, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}) (ControllerContext, error) { +func CreateControllerContext(s *config.CompletedConfig, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}) (ControllerContext, error) { versionedClient := rootClientBuilder.ClientOrDie("shared-informers") sharedInformers := informers.NewSharedInformerFactory(versionedClient, ResyncPeriod(s)()) @@ -455,8 +405,8 @@ func CreateControllerContext(s *options.CMServer, rootClientBuilder, clientBuild return ControllerContext{}, err } - cloud, loopMode, err := createCloudProvider(s.CloudProvider, s.ExternalCloudVolumePlugin, - s.CloudConfigFile, s.AllowUntaggedCloud, sharedInformers) + cloud, loopMode, err := createCloudProvider(s.Generic.ComponentConfig.CloudProvider, s.Generic.ComponentConfig.ExternalCloudVolumePlugin, + s.Generic.ComponentConfig.CloudConfigFile, s.Generic.ComponentConfig.AllowUntaggedCloud, sharedInformers) if err != nil { return ControllerContext{}, err } @@ -464,12 +414,13 @@ func CreateControllerContext(s *options.CMServer, rootClientBuilder, clientBuild ctx := ControllerContext{ ClientBuilder: clientBuilder, InformerFactory: sharedInformers, - Options: *s, + ComponentConfig: s.Generic.ComponentConfig, AvailableResources: availableResources, Cloud: cloud, LoopMode: loopMode, Stop: stop, InformersStarted: make(chan struct{}), + ResyncPeriod: ResyncPeriod(s), } return ctx, nil } @@ -493,7 +444,7 @@ func StartControllers(ctx ControllerContext, startSATokenController InitFunc, co continue } - time.Sleep(wait.Jitter(ctx.Options.ControllerStartInterval.Duration, ControllerStartJitter)) + time.Sleep(wait.Jitter(ctx.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter)) glog.V(1).Infof("Starting %q", controllerName) started, err := initFn(ctx) @@ -524,23 +475,23 @@ func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController return false, nil } - if len(ctx.Options.ServiceAccountKeyFile) == 0 { + if len(ctx.ComponentConfig.ServiceAccountKeyFile) == 0 { glog.Warningf("%q is disabled because there is no private key", saTokenControllerName) return false, nil } - privateKey, err := certutil.PrivateKeyFromFile(ctx.Options.ServiceAccountKeyFile) + privateKey, err := certutil.PrivateKeyFromFile(ctx.ComponentConfig.ServiceAccountKeyFile) if err != nil { return true, fmt.Errorf("error reading key for service account token controller: %v", err) } var rootCA []byte - if ctx.Options.RootCAFile != "" { - rootCA, err = ioutil.ReadFile(ctx.Options.RootCAFile) + if ctx.ComponentConfig.RootCAFile != "" { + rootCA, err = ioutil.ReadFile(ctx.ComponentConfig.RootCAFile) if err != nil { - return true, fmt.Errorf("error reading root-ca-file at %s: %v", ctx.Options.RootCAFile, err) + return true, fmt.Errorf("error reading root-ca-file at %s: %v", ctx.ComponentConfig.RootCAFile, err) } if _, err := certutil.ParseCertsPEM(rootCA); err != nil { - return true, fmt.Errorf("error parsing root-ca-file at %s: %v", ctx.Options.RootCAFile, err) + return true, fmt.Errorf("error parsing root-ca-file at %s: %v", ctx.ComponentConfig.RootCAFile, err) } } else { rootCA = c.rootClientBuilder.ConfigOrDie("tokens-controller").CAData @@ -558,7 +509,7 @@ func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController if err != nil { return true, fmt.Errorf("error creating Tokens controller: %v", err) } - go controller.Run(int(ctx.Options.ConcurrentSATokenSyncs), ctx.Stop) + go controller.Run(int(ctx.ComponentConfig.ConcurrentSATokenSyncs), ctx.Stop) // start the first set of informers now so that other controllers can start ctx.InformerFactory.Start(ctx.Stop) diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index fdd1e729f3e..d25f7150565 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -68,33 +68,33 @@ func startServiceController(ctx ControllerContext) (bool, error) { ctx.ClientBuilder.ClientOrDie("service-controller"), ctx.InformerFactory.Core().V1().Services(), ctx.InformerFactory.Core().V1().Nodes(), - ctx.Options.ClusterName, + ctx.ComponentConfig.ClusterName, ) if err != nil { // This error shouldn't fail. It lives like this as a legacy. glog.Errorf("Failed to start service controller: %v", err) return false, nil } - go serviceController.Run(ctx.Stop, int(ctx.Options.ConcurrentServiceSyncs)) + go serviceController.Run(ctx.Stop, int(ctx.ComponentConfig.ConcurrentServiceSyncs)) return true, nil } func startNodeIpamController(ctx ControllerContext) (bool, error) { var clusterCIDR *net.IPNet = nil var serviceCIDR *net.IPNet = nil - if ctx.Options.AllocateNodeCIDRs { + if ctx.ComponentConfig.AllocateNodeCIDRs { var err error - if len(strings.TrimSpace(ctx.Options.ClusterCIDR)) != 0 { - _, clusterCIDR, err = net.ParseCIDR(ctx.Options.ClusterCIDR) + if len(strings.TrimSpace(ctx.ComponentConfig.ClusterCIDR)) != 0 { + _, clusterCIDR, err = net.ParseCIDR(ctx.ComponentConfig.ClusterCIDR) if err != nil { - glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.Options.ClusterCIDR, err) + glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.ClusterCIDR, err) } } - if len(strings.TrimSpace(ctx.Options.ServiceCIDR)) != 0 { - _, serviceCIDR, err = net.ParseCIDR(ctx.Options.ServiceCIDR) + if len(strings.TrimSpace(ctx.ComponentConfig.ServiceCIDR)) != 0 { + _, serviceCIDR, err = net.ParseCIDR(ctx.ComponentConfig.ServiceCIDR) if err != nil { - glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.Options.ServiceCIDR, err) + glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.ServiceCIDR, err) } } } @@ -105,9 +105,9 @@ func startNodeIpamController(ctx ControllerContext) (bool, error) { ctx.ClientBuilder.ClientOrDie("node-controller"), clusterCIDR, serviceCIDR, - int(ctx.Options.NodeCIDRMaskSize), - ctx.Options.AllocateNodeCIDRs, - ipam.CIDRAllocatorType(ctx.Options.CIDRAllocatorType), + int(ctx.ComponentConfig.NodeCIDRMaskSize), + ctx.ComponentConfig.AllocateNodeCIDRs, + ipam.CIDRAllocatorType(ctx.ComponentConfig.CIDRAllocatorType), ) if err != nil { return true, err @@ -123,15 +123,15 @@ func startNodeLifecycleController(ctx ControllerContext) (bool, error) { ctx.InformerFactory.Extensions().V1beta1().DaemonSets(), ctx.Cloud, ctx.ClientBuilder.ClientOrDie("node-controller"), - ctx.Options.NodeMonitorPeriod.Duration, - ctx.Options.NodeStartupGracePeriod.Duration, - ctx.Options.NodeMonitorGracePeriod.Duration, - ctx.Options.PodEvictionTimeout.Duration, - ctx.Options.NodeEvictionRate, - ctx.Options.SecondaryNodeEvictionRate, - ctx.Options.LargeClusterSizeThreshold, - ctx.Options.UnhealthyZoneThreshold, - ctx.Options.EnableTaintManager, + ctx.ComponentConfig.NodeMonitorPeriod.Duration, + ctx.ComponentConfig.NodeStartupGracePeriod.Duration, + ctx.ComponentConfig.NodeMonitorGracePeriod.Duration, + ctx.ComponentConfig.PodEvictionTimeout.Duration, + ctx.ComponentConfig.NodeEvictionRate, + ctx.ComponentConfig.SecondaryNodeEvictionRate, + ctx.ComponentConfig.LargeClusterSizeThreshold, + ctx.ComponentConfig.UnhealthyZoneThreshold, + ctx.ComponentConfig.EnableTaintManager, utilfeature.DefaultFeatureGate.Enabled(features.TaintBasedEvictions), utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition), ) @@ -143,8 +143,8 @@ func startNodeLifecycleController(ctx ControllerContext) (bool, error) { } func startRouteController(ctx ControllerContext) (bool, error) { - if !ctx.Options.AllocateNodeCIDRs || !ctx.Options.ConfigureCloudRoutes { - glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", ctx.Options.AllocateNodeCIDRs, ctx.Options.ConfigureCloudRoutes) + if !ctx.ComponentConfig.AllocateNodeCIDRs || !ctx.ComponentConfig.ConfigureCloudRoutes { + glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", ctx.ComponentConfig.AllocateNodeCIDRs, ctx.ComponentConfig.ConfigureCloudRoutes) return false, nil } if ctx.Cloud == nil { @@ -156,27 +156,27 @@ func startRouteController(ctx ControllerContext) (bool, error) { glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.") return false, nil } - _, clusterCIDR, err := net.ParseCIDR(ctx.Options.ClusterCIDR) + _, clusterCIDR, err := net.ParseCIDR(ctx.ComponentConfig.ClusterCIDR) if err != nil { - glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.Options.ClusterCIDR, err) + glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.ClusterCIDR, err) } - routeController := routecontroller.New(routes, ctx.ClientBuilder.ClientOrDie("route-controller"), ctx.InformerFactory.Core().V1().Nodes(), ctx.Options.ClusterName, clusterCIDR) - go routeController.Run(ctx.Stop, ctx.Options.RouteReconciliationPeriod.Duration) + routeController := routecontroller.New(routes, ctx.ClientBuilder.ClientOrDie("route-controller"), ctx.InformerFactory.Core().V1().Nodes(), ctx.ComponentConfig.ClusterName, clusterCIDR) + go routeController.Run(ctx.Stop, ctx.ComponentConfig.RouteReconciliationPeriod.Duration) return true, nil } func startPersistentVolumeBinderController(ctx ControllerContext) (bool, error) { params := persistentvolumecontroller.ControllerParameters{ KubeClient: ctx.ClientBuilder.ClientOrDie("persistent-volume-binder"), - SyncPeriod: ctx.Options.PVClaimBinderSyncPeriod.Duration, - VolumePlugins: ProbeControllerVolumePlugins(ctx.Cloud, ctx.Options.VolumeConfiguration), + SyncPeriod: ctx.ComponentConfig.PVClaimBinderSyncPeriod.Duration, + VolumePlugins: ProbeControllerVolumePlugins(ctx.Cloud, ctx.ComponentConfig.VolumeConfiguration), Cloud: ctx.Cloud, - ClusterName: ctx.Options.ClusterName, + ClusterName: ctx.ComponentConfig.ClusterName, VolumeInformer: ctx.InformerFactory.Core().V1().PersistentVolumes(), ClaimInformer: ctx.InformerFactory.Core().V1().PersistentVolumeClaims(), ClassInformer: ctx.InformerFactory.Storage().V1().StorageClasses(), PodInformer: ctx.InformerFactory.Core().V1().Pods(), - EnableDynamicProvisioning: ctx.Options.VolumeConfiguration.EnableDynamicProvisioning, + EnableDynamicProvisioning: ctx.ComponentConfig.VolumeConfiguration.EnableDynamicProvisioning, } volumeController, volumeControllerErr := persistentvolumecontroller.NewController(params) if volumeControllerErr != nil { @@ -187,7 +187,7 @@ func startPersistentVolumeBinderController(ctx ControllerContext) (bool, error) } func startAttachDetachController(ctx ControllerContext) (bool, error) { - if ctx.Options.ReconcilerSyncLoopPeriod.Duration < time.Second { + if ctx.ComponentConfig.ReconcilerSyncLoopPeriod.Duration < time.Second { return true, fmt.Errorf("Duration time must be greater than one second as set via command line option reconcile-sync-loop-period.") } attachDetachController, attachDetachControllerErr := @@ -199,9 +199,9 @@ func startAttachDetachController(ctx ControllerContext) (bool, error) { ctx.InformerFactory.Core().V1().PersistentVolumes(), ctx.Cloud, ProbeAttachableVolumePlugins(), - GetDynamicPluginProber(ctx.Options.VolumeConfiguration), - ctx.Options.DisableAttachDetachReconcilerSync, - ctx.Options.ReconcilerSyncLoopPeriod.Duration, + GetDynamicPluginProber(ctx.ComponentConfig.VolumeConfiguration), + ctx.ComponentConfig.DisableAttachDetachReconcilerSync, + ctx.ComponentConfig.ReconcilerSyncLoopPeriod.Duration, attachdetach.DefaultTimerConfig, ) if attachDetachControllerErr != nil { @@ -218,7 +218,7 @@ func startVolumeExpandController(ctx ControllerContext) (bool, error) { ctx.InformerFactory.Core().V1().PersistentVolumeClaims(), ctx.InformerFactory.Core().V1().PersistentVolumes(), ctx.Cloud, - ProbeExpandableVolumePlugins(ctx.Options.VolumeConfiguration)) + ProbeExpandableVolumePlugins(ctx.ComponentConfig.VolumeConfiguration)) if expandControllerErr != nil { return true, fmt.Errorf("Failed to start volume expand controller : %v", expandControllerErr) @@ -235,7 +235,7 @@ func startEndpointController(ctx ControllerContext) (bool, error) { ctx.InformerFactory.Core().V1().Services(), ctx.InformerFactory.Core().V1().Endpoints(), ctx.ClientBuilder.ClientOrDie("endpoint-controller"), - ).Run(int(ctx.Options.ConcurrentEndpointSyncs), ctx.Stop) + ).Run(int(ctx.ComponentConfig.ConcurrentEndpointSyncs), ctx.Stop) return true, nil } @@ -245,7 +245,7 @@ func startReplicationController(ctx ControllerContext) (bool, error) { ctx.InformerFactory.Core().V1().ReplicationControllers(), ctx.ClientBuilder.ClientOrDie("replication-controller"), replicationcontroller.BurstReplicas, - ).Run(int(ctx.Options.ConcurrentRCSyncs), ctx.Stop) + ).Run(int(ctx.ComponentConfig.ConcurrentRCSyncs), ctx.Stop) return true, nil } @@ -253,7 +253,7 @@ func startPodGCController(ctx ControllerContext) (bool, error) { go podgc.NewPodGC( ctx.ClientBuilder.ClientOrDie("pod-garbage-collector"), ctx.InformerFactory.Core().V1().Pods(), - int(ctx.Options.TerminatedPodGCThreshold), + int(ctx.ComponentConfig.TerminatedPodGCThreshold), ).Run(ctx.Stop) return true, nil } @@ -267,9 +267,9 @@ func startResourceQuotaController(ctx ControllerContext) (bool, error) { resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{ QuotaClient: resourceQuotaControllerClient.CoreV1(), ResourceQuotaInformer: ctx.InformerFactory.Core().V1().ResourceQuotas(), - ResyncPeriod: controller.StaticResyncPeriodFunc(ctx.Options.ResourceQuotaSyncPeriod.Duration), + ResyncPeriod: controller.StaticResyncPeriodFunc(ctx.ComponentConfig.ResourceQuotaSyncPeriod.Duration), InformerFactory: ctx.InformerFactory, - ReplenishmentResyncPeriod: ResyncPeriod(&ctx.Options), + ReplenishmentResyncPeriod: ctx.ResyncPeriod, DiscoveryFunc: discoveryFunc, IgnoredResourcesFunc: quotaConfiguration.IgnoredResources, InformersStarted: ctx.InformersStarted, @@ -285,7 +285,7 @@ func startResourceQuotaController(ctx ControllerContext) (bool, error) { if err != nil { return false, err } - go resourceQuotaController.Run(int(ctx.Options.ConcurrentResourceQuotaSyncs), ctx.Stop) + go resourceQuotaController.Run(int(ctx.ComponentConfig.ConcurrentResourceQuotaSyncs), ctx.Stop) // Periodically the quota controller to detect new resource types go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, ctx.Stop) @@ -313,10 +313,10 @@ func startNamespaceController(ctx ControllerContext) (bool, error) { namespaceClientPool, discoverResourcesFn, ctx.InformerFactory.Core().V1().Namespaces(), - ctx.Options.NamespaceSyncPeriod.Duration, + ctx.ComponentConfig.NamespaceSyncPeriod.Duration, v1.FinalizerKubernetes, ) - go namespaceController.Run(int(ctx.Options.ConcurrentNamespaceSyncs), ctx.Stop) + go namespaceController.Run(int(ctx.ComponentConfig.ConcurrentNamespaceSyncs), ctx.Stop) return true, nil } @@ -344,7 +344,7 @@ func startTTLController(ctx ControllerContext) (bool, error) { } func startGarbageCollectorController(ctx ControllerContext) (bool, error) { - if !ctx.Options.EnableGarbageCollector { + if !ctx.ComponentConfig.EnableGarbageCollector { return false, nil } @@ -367,7 +367,7 @@ func startGarbageCollectorController(ctx ControllerContext) (bool, error) { // Get an initial set of deletable resources to prime the garbage collector. deletableResources := garbagecollector.GetDeletableResources(discoveryClient) ignoredResources := make(map[schema.GroupResource]struct{}) - for _, r := range ctx.Options.GCIgnoredResources { + for _, r := range ctx.ComponentConfig.GCIgnoredResources { ignoredResources[schema.GroupResource{Group: r.Group, Resource: r.Resource}] = struct{}{} } garbageCollector, err := garbagecollector.NewGarbageCollector( @@ -384,7 +384,7 @@ func startGarbageCollectorController(ctx ControllerContext) (bool, error) { } // Start the garbage collector. - workers := int(ctx.Options.ConcurrentGCSyncs) + workers := int(ctx.ComponentConfig.ConcurrentGCSyncs) go garbageCollector.Run(workers, ctx.Stop) // Periodically refresh the RESTMapper with new discovery information and sync diff --git a/cmd/kube-controller-manager/app/extensions.go b/cmd/kube-controller-manager/app/extensions.go index 060704bbf53..d369748c6ad 100644 --- a/cmd/kube-controller-manager/app/extensions.go +++ b/cmd/kube-controller-manager/app/extensions.go @@ -43,7 +43,7 @@ func startDaemonSetController(ctx ControllerContext) (bool, error) { if err != nil { return true, fmt.Errorf("error creating DaemonSets controller: %v", err) } - go dsc.Run(int(ctx.Options.ConcurrentDaemonSetSyncs), ctx.Stop) + go dsc.Run(int(ctx.ComponentConfig.ConcurrentDaemonSetSyncs), ctx.Stop) return true, nil } @@ -60,7 +60,7 @@ func startDeploymentController(ctx ControllerContext) (bool, error) { if err != nil { return true, fmt.Errorf("error creating Deployment controller: %v", err) } - go dc.Run(int(ctx.Options.ConcurrentDeploymentSyncs), ctx.Stop) + go dc.Run(int(ctx.ComponentConfig.ConcurrentDeploymentSyncs), ctx.Stop) return true, nil } @@ -73,6 +73,6 @@ func startReplicaSetController(ctx ControllerContext) (bool, error) { ctx.InformerFactory.Core().V1().Pods(), ctx.ClientBuilder.ClientOrDie("replicaset-controller"), replicaset.BurstReplicas, - ).Run(int(ctx.Options.ConcurrentRSSyncs), ctx.Stop) + ).Run(int(ctx.ComponentConfig.ConcurrentRSSyncs), ctx.Stop) return true, nil } diff --git a/cmd/kube-controller-manager/app/options/options.go b/cmd/kube-controller-manager/app/options/options.go index 3e2f0debe4c..572e60c55dc 100644 --- a/cmd/kube-controller-manager/app/options/options.go +++ b/cmd/kube-controller-manager/app/options/options.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options" + kubecontrollerconfig "k8s.io/kubernetes/cmd/kube-controller-manager/app/config" "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/client/leaderelectionconfig" "k8s.io/kubernetes/pkg/controller/garbagecollector" @@ -37,117 +38,124 @@ import ( "github.com/spf13/pflag" ) -// CMServer is the main context object for the controller manager. -type CMServer struct { - cmoptions.ControllerManagerServer +// KubeControllerManagerOptions is the main context object for the controller manager. +type KubeControllerManagerOptions struct { + Generic cmoptions.GenericControllerManagerOptions } -// NewCMServer creates a new CMServer with a default config. -func NewCMServer() *CMServer { +// NewKubeControllerManagerOptions creates a new KubeControllerManagerOptions with a default config. +func NewKubeControllerManagerOptions() *KubeControllerManagerOptions { + componentConfig := cmoptions.NewDefaultControllerManagerComponentConfig(ports.InsecureKubeControllerManagerPort) + s := KubeControllerManagerOptions{ + // The common/default are kept in 'cmd/kube-controller-manager/app/options/util.go'. + // Please make common changes there but put anything kube-controller specific here. + Generic: cmoptions.NewGenericControllerManagerOptions(componentConfig), + } + gcIgnoredResources := make([]componentconfig.GroupResource, 0, len(garbagecollector.DefaultIgnoredResources())) for r := range garbagecollector.DefaultIgnoredResources() { gcIgnoredResources = append(gcIgnoredResources, componentconfig.GroupResource{Group: r.Group, Resource: r.Resource}) } + s.Generic.ComponentConfig.GCIgnoredResources = gcIgnoredResources + s.Generic.ComponentConfig.LeaderElection.LeaderElect = true - s := CMServer{ - // The common/default are kept in 'cmd/kube-controller-manager/app/options/util.go'. - // Please make common changes there but put anything kube-controller specific here. - ControllerManagerServer: cmoptions.ControllerManagerServer{ - KubeControllerManagerConfiguration: cmoptions.GetDefaultControllerOptions(ports.ControllerManagerPort), - }, - } - s.KubeControllerManagerConfiguration.GCIgnoredResources = gcIgnoredResources - s.LeaderElection.LeaderElect = true return &s } -// AddFlags adds flags for a specific CMServer to the specified FlagSet -func (s *CMServer) AddFlags(fs *pflag.FlagSet, allControllers []string, disabledByDefaultControllers []string) { - cmoptions.AddDefaultControllerFlags(&s.ControllerManagerServer, fs) +// AddFlags adds flags for a specific KubeControllerManagerOptions to the specified FlagSet +func (s *KubeControllerManagerOptions) AddFlags(fs *pflag.FlagSet, allControllers []string, disabledByDefaultControllers []string) { + s.Generic.AddFlags(fs) - fs.StringSliceVar(&s.Controllers, "controllers", s.Controllers, fmt.Sprintf(""+ + fs.StringSliceVar(&s.Generic.ComponentConfig.Controllers, "controllers", s.Generic.ComponentConfig.Controllers, fmt.Sprintf(""+ "A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+ "named 'foo', '-foo' disables the controller named 'foo'.\nAll controllers: %s\nDisabled-by-default controllers: %s", strings.Join(allControllers, ", "), strings.Join(disabledByDefaultControllers, ", "))) - fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.") - fs.StringVar(&s.ExternalCloudVolumePlugin, "external-cloud-volume-plugin", s.ExternalCloudVolumePlugin, "The plugin to use when cloud provider is set to external. Can be empty, should only be set when cloud-provider is external. Currently used to allow node and volume controllers to work for in tree cloud providers.") - fs.Int32Var(&s.ConcurrentEndpointSyncs, "concurrent-endpoint-syncs", s.ConcurrentEndpointSyncs, "The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load") - fs.Int32Var(&s.ConcurrentServiceSyncs, "concurrent-service-syncs", s.ConcurrentServiceSyncs, "The number of services that are allowed to sync concurrently. Larger number = more responsive service management, but more CPU (and network) load") - fs.Int32Var(&s.ConcurrentRCSyncs, "concurrent_rc_syncs", s.ConcurrentRCSyncs, "The number of replication controllers that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load") - fs.Int32Var(&s.ConcurrentRSSyncs, "concurrent-replicaset-syncs", s.ConcurrentRSSyncs, "The number of replica sets that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load") + fs.StringVar(&s.Generic.ComponentConfig.CloudProvider, "cloud-provider", s.Generic.ComponentConfig.CloudProvider, "The provider for cloud services. Empty string for no provider.") + fs.StringVar(&s.Generic.ComponentConfig.ExternalCloudVolumePlugin, "external-cloud-volume-plugin", s.Generic.ComponentConfig.ExternalCloudVolumePlugin, "The plugin to use when cloud provider is set to external. Can be empty, should only be set when cloud-provider is external. Currently used to allow node and volume controllers to work for in tree cloud providers.") + fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentEndpointSyncs, "concurrent-endpoint-syncs", s.Generic.ComponentConfig.ConcurrentEndpointSyncs, "The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load") + fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentServiceSyncs, "concurrent-service-syncs", s.Generic.ComponentConfig.ConcurrentServiceSyncs, "The number of services that are allowed to sync concurrently. Larger number = more responsive service management, but more CPU (and network) load") + fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentRCSyncs, "concurrent_rc_syncs", s.Generic.ComponentConfig.ConcurrentRCSyncs, "The number of replication controllers that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load") + fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentRSSyncs, "concurrent-replicaset-syncs", s.Generic.ComponentConfig.ConcurrentRSSyncs, "The number of replica sets that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load") - fs.Int32Var(&s.ConcurrentResourceQuotaSyncs, "concurrent-resource-quota-syncs", s.ConcurrentResourceQuotaSyncs, "The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load") - fs.Int32Var(&s.ConcurrentDeploymentSyncs, "concurrent-deployment-syncs", s.ConcurrentDeploymentSyncs, "The number of deployment objects that are allowed to sync concurrently. Larger number = more responsive deployments, but more CPU (and network) load") - fs.Int32Var(&s.ConcurrentNamespaceSyncs, "concurrent-namespace-syncs", s.ConcurrentNamespaceSyncs, "The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load") - fs.Int32Var(&s.ConcurrentSATokenSyncs, "concurrent-serviceaccount-token-syncs", s.ConcurrentSATokenSyncs, "The number of service account token objects that are allowed to sync concurrently. Larger number = more responsive token generation, but more CPU (and network) load") - fs.DurationVar(&s.NodeSyncPeriod.Duration, "node-sync-period", 0, ""+ + fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentResourceQuotaSyncs, "concurrent-resource-quota-syncs", s.Generic.ComponentConfig.ConcurrentResourceQuotaSyncs, "The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load") + fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentDeploymentSyncs, "concurrent-deployment-syncs", s.Generic.ComponentConfig.ConcurrentDeploymentSyncs, "The number of deployment objects that are allowed to sync concurrently. Larger number = more responsive deployments, but more CPU (and network) load") + fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentNamespaceSyncs, "concurrent-namespace-syncs", s.Generic.ComponentConfig.ConcurrentNamespaceSyncs, "The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load") + fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentSATokenSyncs, "concurrent-serviceaccount-token-syncs", s.Generic.ComponentConfig.ConcurrentSATokenSyncs, "The number of service account token objects that are allowed to sync concurrently. Larger number = more responsive token generation, but more CPU (and network) load") + fs.DurationVar(&s.Generic.ComponentConfig.NodeSyncPeriod.Duration, "node-sync-period", 0, ""+ "This flag is deprecated and will be removed in future releases. See node-monitor-period for Node health checking or "+ "route-reconciliation-period for cloud provider's route configuration settings.") fs.MarkDeprecated("node-sync-period", "This flag is currently no-op and will be deleted.") - fs.DurationVar(&s.ResourceQuotaSyncPeriod.Duration, "resource-quota-sync-period", s.ResourceQuotaSyncPeriod.Duration, "The period for syncing quota usage status in the system") - fs.DurationVar(&s.NamespaceSyncPeriod.Duration, "namespace-sync-period", s.NamespaceSyncPeriod.Duration, "The period for syncing namespace life-cycle updates") - fs.DurationVar(&s.PVClaimBinderSyncPeriod.Duration, "pvclaimbinder-sync-period", s.PVClaimBinderSyncPeriod.Duration, "The period for syncing persistent volumes and persistent volume claims") - fs.StringVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "pv-recycler-pod-template-filepath-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "The file path to a pod definition used as a template for NFS persistent volume recycling") - fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "pv-recycler-minimum-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod") - fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "pv-recycler-increment-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod") - fs.StringVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, "pv-recycler-pod-template-filepath-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, "The file path to a pod definition used as a template for HostPath persistent volume recycling. This is for development and testing only and will not work in a multi-node cluster.") - fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.") - fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.") - fs.BoolVar(&s.VolumeConfiguration.EnableHostPathProvisioning, "enable-hostpath-provisioner", s.VolumeConfiguration.EnableHostPathProvisioning, "Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.") - fs.BoolVar(&s.VolumeConfiguration.EnableDynamicProvisioning, "enable-dynamic-provisioning", s.VolumeConfiguration.EnableDynamicProvisioning, "Enable dynamic provisioning for environments that support it.") - fs.StringVar(&s.VolumeConfiguration.FlexVolumePluginDir, "flex-volume-plugin-dir", s.VolumeConfiguration.FlexVolumePluginDir, "Full path of the directory in which the flex volume plugin should search for additional third party volume plugins.") - fs.Int32Var(&s.TerminatedPodGCThreshold, "terminated-pod-gc-threshold", s.TerminatedPodGCThreshold, "Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.") - fs.DurationVar(&s.HorizontalPodAutoscalerSyncPeriod.Duration, "horizontal-pod-autoscaler-sync-period", s.HorizontalPodAutoscalerSyncPeriod.Duration, "The period for syncing the number of pods in horizontal pod autoscaler.") - fs.DurationVar(&s.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, "horizontal-pod-autoscaler-upscale-delay", s.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, "The period since last upscale, before another upscale can be performed in horizontal pod autoscaler.") - fs.DurationVar(&s.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, "horizontal-pod-autoscaler-downscale-delay", s.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, "The period since last downscale, before another downscale can be performed in horizontal pod autoscaler.") - fs.Float64Var(&s.HorizontalPodAutoscalerTolerance, "horizontal-pod-autoscaler-tolerance", s.HorizontalPodAutoscalerTolerance, "The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling.") - fs.DurationVar(&s.DeploymentControllerSyncPeriod.Duration, "deployment-controller-sync-period", s.DeploymentControllerSyncPeriod.Duration, "Period for syncing the deployments.") - fs.DurationVar(&s.PodEvictionTimeout.Duration, "pod-eviction-timeout", s.PodEvictionTimeout.Duration, "The grace period for deleting pods on failed nodes.") - fs.Float32Var(&s.DeletingPodsQps, "deleting-pods-qps", 0.1, "Number of nodes per second on which pods are deleted in case of node failure.") + fs.DurationVar(&s.Generic.ComponentConfig.ResourceQuotaSyncPeriod.Duration, "resource-quota-sync-period", s.Generic.ComponentConfig.ResourceQuotaSyncPeriod.Duration, "The period for syncing quota usage status in the system") + fs.DurationVar(&s.Generic.ComponentConfig.NamespaceSyncPeriod.Duration, "namespace-sync-period", s.Generic.ComponentConfig.NamespaceSyncPeriod.Duration, "The period for syncing namespace life-cycle updates") + fs.DurationVar(&s.Generic.ComponentConfig.PVClaimBinderSyncPeriod.Duration, "pvclaimbinder-sync-period", s.Generic.ComponentConfig.PVClaimBinderSyncPeriod.Duration, "The period for syncing persistent volumes and persistent volume claims") + fs.StringVar(&s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "pv-recycler-pod-template-filepath-nfs", s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "The file path to a pod definition used as a template for NFS persistent volume recycling") + fs.Int32Var(&s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "pv-recycler-minimum-timeout-nfs", s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod") + fs.Int32Var(&s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "pv-recycler-increment-timeout-nfs", s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod") + fs.StringVar(&s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, "pv-recycler-pod-template-filepath-hostpath", s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, "The file path to a pod definition used as a template for HostPath persistent volume recycling. This is for development and testing only and will not work in a multi-node cluster.") + fs.Int32Var(&s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath", s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.") + fs.Int32Var(&s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath", s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.") + fs.BoolVar(&s.Generic.ComponentConfig.VolumeConfiguration.EnableHostPathProvisioning, "enable-hostpath-provisioner", s.Generic.ComponentConfig.VolumeConfiguration.EnableHostPathProvisioning, "Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.") + fs.BoolVar(&s.Generic.ComponentConfig.VolumeConfiguration.EnableDynamicProvisioning, "enable-dynamic-provisioning", s.Generic.ComponentConfig.VolumeConfiguration.EnableDynamicProvisioning, "Enable dynamic provisioning for environments that support it.") + fs.StringVar(&s.Generic.ComponentConfig.VolumeConfiguration.FlexVolumePluginDir, "flex-volume-plugin-dir", s.Generic.ComponentConfig.VolumeConfiguration.FlexVolumePluginDir, "Full path of the directory in which the flex volume plugin should search for additional third party volume plugins.") + fs.Int32Var(&s.Generic.ComponentConfig.TerminatedPodGCThreshold, "terminated-pod-gc-threshold", s.Generic.ComponentConfig.TerminatedPodGCThreshold, "Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.") + fs.DurationVar(&s.Generic.ComponentConfig.HorizontalPodAutoscalerSyncPeriod.Duration, "horizontal-pod-autoscaler-sync-period", s.Generic.ComponentConfig.HorizontalPodAutoscalerSyncPeriod.Duration, "The period for syncing the number of pods in horizontal pod autoscaler.") + fs.DurationVar(&s.Generic.ComponentConfig.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, "horizontal-pod-autoscaler-upscale-delay", s.Generic.ComponentConfig.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, "The period since last upscale, before another upscale can be performed in horizontal pod autoscaler.") + fs.DurationVar(&s.Generic.ComponentConfig.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, "horizontal-pod-autoscaler-downscale-delay", s.Generic.ComponentConfig.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, "The period since last downscale, before another downscale can be performed in horizontal pod autoscaler.") + fs.Float64Var(&s.Generic.ComponentConfig.HorizontalPodAutoscalerTolerance, "horizontal-pod-autoscaler-tolerance", s.Generic.ComponentConfig.HorizontalPodAutoscalerTolerance, "The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling.") + fs.DurationVar(&s.Generic.ComponentConfig.DeploymentControllerSyncPeriod.Duration, "deployment-controller-sync-period", s.Generic.ComponentConfig.DeploymentControllerSyncPeriod.Duration, "Period for syncing the deployments.") + fs.DurationVar(&s.Generic.ComponentConfig.PodEvictionTimeout.Duration, "pod-eviction-timeout", s.Generic.ComponentConfig.PodEvictionTimeout.Duration, "The grace period for deleting pods on failed nodes.") + fs.Float32Var(&s.Generic.ComponentConfig.DeletingPodsQps, "deleting-pods-qps", 0.1, "Number of nodes per second on which pods are deleted in case of node failure.") fs.MarkDeprecated("deleting-pods-qps", "This flag is currently no-op and will be deleted.") - fs.Int32Var(&s.DeletingPodsBurst, "deleting-pods-burst", 0, "Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.") + fs.Int32Var(&s.Generic.ComponentConfig.DeletingPodsBurst, "deleting-pods-burst", 0, "Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.") fs.MarkDeprecated("deleting-pods-burst", "This flag is currently no-op and will be deleted.") - fs.Int32Var(&s.RegisterRetryCount, "register-retry-count", s.RegisterRetryCount, ""+ + fs.Int32Var(&s.Generic.ComponentConfig.RegisterRetryCount, "register-retry-count", s.Generic.ComponentConfig.RegisterRetryCount, ""+ "The number of retries for initial node registration. Retry interval equals node-sync-period.") fs.MarkDeprecated("register-retry-count", "This flag is currently no-op and will be deleted.") - fs.DurationVar(&s.NodeMonitorGracePeriod.Duration, "node-monitor-grace-period", s.NodeMonitorGracePeriod.Duration, + fs.DurationVar(&s.Generic.ComponentConfig.NodeMonitorGracePeriod.Duration, "node-monitor-grace-period", s.Generic.ComponentConfig.NodeMonitorGracePeriod.Duration, "Amount of time which we allow running Node to be unresponsive before marking it unhealthy. "+ "Must be N times more than kubelet's nodeStatusUpdateFrequency, "+ "where N means number of retries allowed for kubelet to post node status.") - fs.DurationVar(&s.NodeStartupGracePeriod.Duration, "node-startup-grace-period", s.NodeStartupGracePeriod.Duration, + fs.DurationVar(&s.Generic.ComponentConfig.NodeStartupGracePeriod.Duration, "node-startup-grace-period", s.Generic.ComponentConfig.NodeStartupGracePeriod.Duration, "Amount of time which we allow starting Node to be unresponsive before marking it unhealthy.") - fs.StringVar(&s.ServiceAccountKeyFile, "service-account-private-key-file", s.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA or ECDSA key used to sign service account tokens.") - fs.StringVar(&s.ClusterSigningCertFile, "cluster-signing-cert-file", s.ClusterSigningCertFile, "Filename containing a PEM-encoded X509 CA certificate used to issue cluster-scoped certificates") - fs.StringVar(&s.ClusterSigningKeyFile, "cluster-signing-key-file", s.ClusterSigningKeyFile, "Filename containing a PEM-encoded RSA or ECDSA private key used to sign cluster-scoped certificates") - fs.DurationVar(&s.ClusterSigningDuration.Duration, "experimental-cluster-signing-duration", s.ClusterSigningDuration.Duration, "The length of duration signed certificates will be given.") + fs.StringVar(&s.Generic.ComponentConfig.ServiceAccountKeyFile, "service-account-private-key-file", s.Generic.ComponentConfig.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA or ECDSA key used to sign service account tokens.") + fs.StringVar(&s.Generic.ComponentConfig.ClusterSigningCertFile, "cluster-signing-cert-file", s.Generic.ComponentConfig.ClusterSigningCertFile, "Filename containing a PEM-encoded X509 CA certificate used to issue cluster-scoped certificates") + fs.StringVar(&s.Generic.ComponentConfig.ClusterSigningKeyFile, "cluster-signing-key-file", s.Generic.ComponentConfig.ClusterSigningKeyFile, "Filename containing a PEM-encoded RSA or ECDSA private key used to sign cluster-scoped certificates") + fs.DurationVar(&s.Generic.ComponentConfig.ClusterSigningDuration.Duration, "experimental-cluster-signing-duration", s.Generic.ComponentConfig.ClusterSigningDuration.Duration, "The length of duration signed certificates will be given.") var dummy string fs.MarkDeprecated("insecure-experimental-approve-all-kubelet-csrs-for-group", "This flag does nothing.") fs.StringVar(&dummy, "insecure-experimental-approve-all-kubelet-csrs-for-group", "", "This flag does nothing.") - fs.StringVar(&s.ServiceCIDR, "service-cluster-ip-range", s.ServiceCIDR, "CIDR Range for Services in cluster. Requires --allocate-node-cidrs to be true") - fs.Int32Var(&s.NodeCIDRMaskSize, "node-cidr-mask-size", s.NodeCIDRMaskSize, "Mask size for node cidr in cluster.") - fs.StringVar(&s.RootCAFile, "root-ca-file", s.RootCAFile, "If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.") - fs.BoolVar(&s.EnableGarbageCollector, "enable-garbage-collector", s.EnableGarbageCollector, "Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-apiserver.") - fs.Int32Var(&s.ConcurrentGCSyncs, "concurrent-gc-syncs", s.ConcurrentGCSyncs, "The number of garbage collector workers that are allowed to sync concurrently.") - fs.Int32Var(&s.LargeClusterSizeThreshold, "large-cluster-size-threshold", 50, "Number of nodes from which NodeController treats the cluster as large for the eviction logic purposes. --secondary-node-eviction-rate is implicitly overridden to 0 for clusters this size or smaller.") - fs.Float32Var(&s.UnhealthyZoneThreshold, "unhealthy-zone-threshold", 0.55, "Fraction of Nodes in a zone which needs to be not Ready (minimum 3) for zone to be treated as unhealthy. ") - fs.BoolVar(&s.DisableAttachDetachReconcilerSync, "disable-attach-detach-reconcile-sync", false, "Disable volume attach detach reconciler sync. Disabling this may cause volumes to be mismatched with pods. Use wisely.") - fs.DurationVar(&s.ReconcilerSyncLoopPeriod.Duration, "attach-detach-reconcile-sync-period", s.ReconcilerSyncLoopPeriod.Duration, "The reconciler sync wait time between volume attach detach. This duration must be larger than one second, and increasing this value from the default may allow for volumes to be mismatched with pods.") - fs.BoolVar(&s.EnableTaintManager, "enable-taint-manager", s.EnableTaintManager, "WARNING: Beta feature. If set to true enables NoExecute Taints and will evict all not-tolerating Pod running on Nodes tainted with this kind of Taints.") - fs.BoolVar(&s.HorizontalPodAutoscalerUseRESTClients, "horizontal-pod-autoscaler-use-rest-clients", s.HorizontalPodAutoscalerUseRESTClients, "WARNING: alpha feature. If set to true, causes the horizontal pod autoscaler controller to use REST clients through the kube-aggregator, instead of using the legacy metrics client through the API server proxy. This is required for custom metrics support in the horizontal pod autoscaler.") - fs.Float32Var(&s.NodeEvictionRate, "node-eviction-rate", 0.1, "Number of nodes per second on which pods are deleted in case of node failure when a zone is healthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters.") - fs.Float32Var(&s.SecondaryNodeEvictionRate, "secondary-node-eviction-rate", 0.01, "Number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters. This value is implicitly overridden to 0 if the cluster size is smaller than --large-cluster-size-threshold.") + fs.StringVar(&s.Generic.ComponentConfig.ServiceCIDR, "service-cluster-ip-range", s.Generic.ComponentConfig.ServiceCIDR, "CIDR Range for Services in cluster. Requires --allocate-node-cidrs to be true") + fs.Int32Var(&s.Generic.ComponentConfig.NodeCIDRMaskSize, "node-cidr-mask-size", s.Generic.ComponentConfig.NodeCIDRMaskSize, "Mask size for node cidr in cluster.") + fs.StringVar(&s.Generic.ComponentConfig.RootCAFile, "root-ca-file", s.Generic.ComponentConfig.RootCAFile, "If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.") + fs.BoolVar(&s.Generic.ComponentConfig.EnableGarbageCollector, "enable-garbage-collector", s.Generic.ComponentConfig.EnableGarbageCollector, "Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-apiserver.") + fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentGCSyncs, "concurrent-gc-syncs", s.Generic.ComponentConfig.ConcurrentGCSyncs, "The number of garbage collector workers that are allowed to sync concurrently.") + fs.Int32Var(&s.Generic.ComponentConfig.LargeClusterSizeThreshold, "large-cluster-size-threshold", 50, "Number of nodes from which NodeController treats the cluster as large for the eviction logic purposes. --secondary-node-eviction-rate is implicitly overridden to 0 for clusters this size or smaller.") + fs.Float32Var(&s.Generic.ComponentConfig.UnhealthyZoneThreshold, "unhealthy-zone-threshold", 0.55, "Fraction of Nodes in a zone which needs to be not Ready (minimum 3) for zone to be treated as unhealthy. ") + fs.BoolVar(&s.Generic.ComponentConfig.DisableAttachDetachReconcilerSync, "disable-attach-detach-reconcile-sync", false, "Disable volume attach detach reconciler sync. Disabling this may cause volumes to be mismatched with pods. Use wisely.") + fs.DurationVar(&s.Generic.ComponentConfig.ReconcilerSyncLoopPeriod.Duration, "attach-detach-reconcile-sync-period", s.Generic.ComponentConfig.ReconcilerSyncLoopPeriod.Duration, "The reconciler sync wait time between volume attach detach. This duration must be larger than one second, and increasing this value from the default may allow for volumes to be mismatched with pods.") + fs.BoolVar(&s.Generic.ComponentConfig.EnableTaintManager, "enable-taint-manager", s.Generic.ComponentConfig.EnableTaintManager, "WARNING: Beta feature. If set to true enables NoExecute Taints and will evict all not-tolerating Pod running on Nodes tainted with this kind of Taints.") + fs.BoolVar(&s.Generic.ComponentConfig.HorizontalPodAutoscalerUseRESTClients, "horizontal-pod-autoscaler-use-rest-clients", s.Generic.ComponentConfig.HorizontalPodAutoscalerUseRESTClients, "WARNING: alpha feature. If set to true, causes the horizontal pod autoscaler controller to use REST clients through the kube-aggregator, instead of using the legacy metrics client through the API server proxy. This is required for custom metrics support in the horizontal pod autoscaler.") + fs.Float32Var(&s.Generic.ComponentConfig.NodeEvictionRate, "node-eviction-rate", 0.1, "Number of nodes per second on which pods are deleted in case of node failure when a zone is healthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters.") + fs.Float32Var(&s.Generic.ComponentConfig.SecondaryNodeEvictionRate, "secondary-node-eviction-rate", 0.01, "Number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters. This value is implicitly overridden to 0 if the cluster size is smaller than --large-cluster-size-threshold.") - leaderelectionconfig.BindFlags(&s.LeaderElection, fs) + leaderelectionconfig.BindFlags(&s.Generic.ComponentConfig.LeaderElection, fs) utilfeature.DefaultFeatureGate.AddFlag(fs) } +// ApplyTo fills up controller manager config with options. +func (s *KubeControllerManagerOptions) ApplyTo(c *kubecontrollerconfig.Config) error { + err := s.Generic.ApplyTo(&c.Generic, "controller-manager") + + return err +} + // Validate is used to validate the options and config before launching the controller manager -func (s *CMServer) Validate(allControllers []string, disabledByDefaultControllers []string) error { +func (s *KubeControllerManagerOptions) Validate(allControllers []string, disabledByDefaultControllers []string) error { var errs []error allControllersSet := sets.NewString(allControllers...) - for _, controller := range s.Controllers { + for _, controller := range s.Generic.ComponentConfig.Controllers { if controller == "*" { continue } @@ -162,3 +170,17 @@ func (s *CMServer) Validate(allControllers []string, disabledByDefaultController return utilerrors.NewAggregate(errs) } + +// Config return a controller manager config objective +func (s KubeControllerManagerOptions) Config(allControllers []string, disabledByDefaultControllers []string) (*kubecontrollerconfig.Config, error) { + if err := s.Validate(allControllers, disabledByDefaultControllers); err != nil { + return nil, err + } + + c := &kubecontrollerconfig.Config{} + if err := s.ApplyTo(c); err != nil { + return nil, err + } + + return c, nil +} diff --git a/cmd/kube-controller-manager/app/options/options_test.go b/cmd/kube-controller-manager/app/options/options_test.go index 94ac9382b9f..e7c246bdbf2 100644 --- a/cmd/kube-controller-manager/app/options/options_test.go +++ b/cmd/kube-controller-manager/app/options/options_test.go @@ -17,6 +17,7 @@ limitations under the License. package options import ( + "net" "reflect" "sort" "testing" @@ -32,7 +33,7 @@ import ( func TestAddFlags(t *testing.T) { f := pflag.NewFlagSet("addflagstest", pflag.ContinueOnError) - s := NewCMServer() + s := NewKubeControllerManagerOptions() s.AddFlags(f, []string{""}, []string{""}) args := []string{ @@ -107,13 +108,13 @@ func TestAddFlags(t *testing.T) { f.Parse(args) // Sort GCIgnoredResources because it's built from a map, which means the // insertion order is random. - sort.Sort(sortedGCIgnoredResources(s.GCIgnoredResources)) + sort.Sort(sortedGCIgnoredResources(s.Generic.ComponentConfig.GCIgnoredResources)) - expected := &CMServer{ - ControllerManagerServer: cmoptions.ControllerManagerServer{ - KubeControllerManagerConfiguration: componentconfig.KubeControllerManagerConfiguration{ - Port: 10000, - Address: "192.168.4.10", + expected := &KubeControllerManagerOptions{ + Generic: cmoptions.GenericControllerManagerOptions{ + ComponentConfig: componentconfig.KubeControllerManagerConfiguration{ + Port: 10252, // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config + Address: "0.0.0.0", // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config AllocateNodeCIDRs: true, CloudConfigFile: "/cloud-config", CloudProvider: "gce", @@ -204,6 +205,11 @@ func TestAddFlags(t *testing.T) { HorizontalPodAutoscalerUseRESTClients: true, UseServiceAccountCredentials: true, }, + InsecureServing: &cmoptions.InsecureServingOptions{ + BindAddress: net.ParseIP("192.168.4.10"), + BindPort: int(10000), + BindNetwork: "tcp", + }, Kubeconfig: "/kubeconfig", Master: "192.168.4.20", }, @@ -211,7 +217,7 @@ func TestAddFlags(t *testing.T) { // Sort GCIgnoredResources because it's built from a map, which means the // insertion order is random. - sort.Sort(sortedGCIgnoredResources(expected.GCIgnoredResources)) + sort.Sort(sortedGCIgnoredResources(expected.Generic.ComponentConfig.GCIgnoredResources)) if !reflect.DeepEqual(expected, s) { t.Errorf("Got different run options than expected.\nDifference detected on:\n%s", diff.ObjectReflectDiff(expected, s)) diff --git a/cmd/kube-controller-manager/controller-manager.go b/cmd/kube-controller-manager/controller-manager.go index c537a672d4a..b6d1197e8f7 100644 --- a/cmd/kube-controller-manager/controller-manager.go +++ b/cmd/kube-controller-manager/controller-manager.go @@ -22,14 +22,13 @@ package main import ( goflag "flag" + "fmt" "math/rand" "os" "time" "github.com/spf13/pflag" - "fmt" - utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/apiserver/pkg/util/logs" "k8s.io/kubernetes/cmd/kube-controller-manager/app" diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index 88547dde055..8f426cb7344 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -530,7 +530,7 @@ func (eac ExtraArgsCheck) Check() (warnings, errors []error) { } if len(eac.ControllerManagerExtraArgs) > 0 { flags := pflag.NewFlagSet("", pflag.ContinueOnError) - s := cmoptions.NewCMServer() + s := cmoptions.NewKubeControllerManagerOptions() s.AddFlags(flags, []string{}, []string{}) warnings = append(warnings, argsCheck("kube-controller-manager", eac.ControllerManagerExtraArgs, flags)...) } diff --git a/pkg/master/ports/ports.go b/pkg/master/ports/ports.go index 76783de0056..fae2d6225d3 100644 --- a/pkg/master/ports/ports.go +++ b/pkg/master/ports/ports.go @@ -26,12 +26,12 @@ const ( // SchedulerPort is the default port for the scheduler status server. // May be overridden by a flag at startup. SchedulerPort = 10251 - // ControllerManagerPort is the default port for the controller manager status server. + // InsecureKubeControllerManagerPort is the default port for the controller manager status server. // May be overridden by a flag at startup. - ControllerManagerPort = 10252 - // CloudControllerManagerPort is the default port for the cloud controller manager server. + InsecureControllerManagerPort = 10252 + // InsecureCloudControllerManagerPort is the default port for the cloud controller manager server. // This value may be overridden by a flag at startup. - CloudControllerManagerPort = 10253 + InsecureCloudControllerManagerPort = 10253 // KubeletReadOnlyPort exposes basic read-only services from the kubelet. // May be overridden by a flag at startup. // This is necessary for heapster to collect monitoring stats from the kubelet diff --git a/pkg/registry/core/rest/storage_core.go b/pkg/registry/core/rest/storage_core.go index 7a316bd500f..96e2219a297 100644 --- a/pkg/registry/core/rest/storage_core.go +++ b/pkg/registry/core/rest/storage_core.go @@ -239,7 +239,7 @@ type componentStatusStorage struct { func (s componentStatusStorage) serversToValidate() map[string]*componentstatus.Server { serversToValidate := map[string]*componentstatus.Server{ - "controller-manager": {Addr: "127.0.0.1", Port: ports.ControllerManagerPort, Path: "/healthz"}, + "controller-manager": {Addr: "127.0.0.1", Port: ports.InsecureKubeControllerManagerPort, Path: "/healthz"}, "scheduler": {Addr: "127.0.0.1", Port: ports.SchedulerPort, Path: "/healthz"}, } diff --git a/test/e2e/apps/daemon_restart.go b/test/e2e/apps/daemon_restart.go index 2319dfe5731..911f8e41d54 100644 --- a/test/e2e/apps/daemon_restart.go +++ b/test/e2e/apps/daemon_restart.go @@ -249,7 +249,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { // Requires master ssh access. framework.SkipUnlessProviderIs("gce", "aws") restarter := NewRestartConfig( - framework.GetMasterHost(), "kube-controller", ports.ControllerManagerPort, restartPollInterval, restartTimeout) + framework.GetMasterHost(), "kube-controller", ports.InsecureKubeControllerManagerPort, restartPollInterval, restartTimeout) restarter.restart() // The intent is to ensure the replication controller manager has observed and reported status of diff --git a/test/e2e/framework/metrics/metrics_grabber.go b/test/e2e/framework/metrics/metrics_grabber.go index 806ceb55996..dd95dea3eb6 100644 --- a/test/e2e/framework/metrics/metrics_grabber.go +++ b/test/e2e/framework/metrics/metrics_grabber.go @@ -158,7 +158,7 @@ func (g *MetricsGrabber) GrabFromControllerManager() (ControllerManagerMetrics, if !g.registeredMaster { return ControllerManagerMetrics{}, fmt.Errorf("Master's Kubelet is not registered. Skipping ControllerManager's metrics gathering.") } - output, err := g.getMetricsFromPod(g.client, fmt.Sprintf("%v-%v", "kube-controller-manager", g.masterName), metav1.NamespaceSystem, ports.ControllerManagerPort) + output, err := g.getMetricsFromPod(g.client, fmt.Sprintf("%v-%v", "kube-controller-manager", g.masterName), metav1.NamespaceSystem, ports.InsecureKubeControllerManagerPort) if err != nil { return ControllerManagerMetrics{}, err } diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index c34d8b43688..dc77883d7de 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -3967,7 +3967,7 @@ func RestartControllerManager() error { } func WaitForControllerManagerUp() error { - cmd := "curl http://localhost:" + strconv.Itoa(ports.ControllerManagerPort) + "/healthz" + cmd := "curl http://localhost:" + strconv.Itoa(ports.InsecureKubeControllerManagerPort) + "/healthz" for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider) if err != nil || result.Code != 0 { diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index fe701318605..5324a280274 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -170,7 +170,7 @@ var _ = SIGDescribe("Firewall rule", func() { nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP) Expect(len(nodeAddrs)).NotTo(BeZero()) masterAddr := framework.GetMasterAddress(cs) - flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.ControllerManagerPort, framework.FirewallTestTcpTimeout) + flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.InsecureKubeControllerManagerPort, framework.FirewallTestTcpTimeout) Expect(flag).To(BeTrue()) flag, _ = framework.TestNotReachableHTTPTimeout(masterAddr, ports.SchedulerPort, framework.FirewallTestTcpTimeout) Expect(flag).To(BeTrue()) From 4e0114b0dd3701b68c02d038edcf4fbe84515a68 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Wed, 31 Jan 2018 16:17:48 +0100 Subject: [PATCH 43/53] apiserver: make SecureServingOptions and authz/n options re-usable --- cmd/kube-apiserver/app/options/options.go | 2 +- .../app/options/options_test.go | 5 +- cmd/kube-apiserver/app/server.go | 6 +- pkg/kubeapiserver/options/authentication.go | 8 +- pkg/kubeapiserver/options/serving.go | 6 +- pkg/master/master.go | 6 +- .../src/k8s.io/apiserver/pkg/server/config.go | 64 +++++++++------ .../apiserver/pkg/server/genericapiserver.go | 2 +- .../pkg/server/genericapiserver_test.go | 2 +- .../k8s.io/apiserver/pkg/server/options/BUILD | 1 + .../apiserver/pkg/server/options/admission.go | 2 +- .../pkg/server/options/authentication.go | 13 ++- .../pkg/server/options/authorization.go | 2 +- .../pkg/server/options/recommended.go | 8 +- .../apiserver/pkg/server/options/serving.go | 56 +++---------- .../pkg/server/options/serving_test.go | 6 +- .../server/options/serving_with_loopback.go | 79 +++++++++++++++++++ .../src/k8s.io/apiserver/pkg/server/serve.go | 32 ++++---- test/integration/auth/accessreview_test.go | 12 +-- test/integration/auth/auth_test.go | 38 ++++----- test/integration/auth/bootstraptoken_test.go | 2 +- test/integration/auth/node_test.go | 4 +- test/integration/auth/rbac_test.go | 8 +- test/integration/framework/master_utils.go | 14 ++-- .../master/synthetic_master_test.go | 6 +- .../serviceaccount/service_account_test.go | 4 +- 26 files changed, 222 insertions(+), 166 deletions(-) create mode 100644 staging/src/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go diff --git a/cmd/kube-apiserver/app/options/options.go b/cmd/kube-apiserver/app/options/options.go index 4dbd1370474..1f57add7c3e 100644 --- a/cmd/kube-apiserver/app/options/options.go +++ b/cmd/kube-apiserver/app/options/options.go @@ -42,7 +42,7 @@ import ( type ServerRunOptions struct { GenericServerRunOptions *genericoptions.ServerRunOptions Etcd *genericoptions.EtcdOptions - SecureServing *genericoptions.SecureServingOptions + SecureServing *genericoptions.SecureServingOptionsWithLoopback InsecureServing *kubeoptions.InsecureServingOptions Audit *genericoptions.AuditOptions Features *genericoptions.FeatureOptions diff --git a/cmd/kube-apiserver/app/options/options_test.go b/cmd/kube-apiserver/app/options/options_test.go index 38d95b4c04b..9f97b175608 100644 --- a/cmd/kube-apiserver/app/options/options_test.go +++ b/cmd/kube-apiserver/app/options/options_test.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/diff" apiserveroptions "k8s.io/apiserver/pkg/server/options" + genericoptions "k8s.io/apiserver/pkg/server/options" "k8s.io/apiserver/pkg/storage/storagebackend" utilflag "k8s.io/apiserver/pkg/util/flag" auditwebhook "k8s.io/apiserver/plugin/pkg/audit/webhook" @@ -137,14 +138,14 @@ func TestAddFlags(t *testing.T) { EnableWatchCache: true, DefaultWatchCacheSize: 100, }, - SecureServing: &apiserveroptions.SecureServingOptions{ + SecureServing: genericoptions.WithLoopback(&apiserveroptions.SecureServingOptions{ BindAddress: net.ParseIP("192.168.10.20"), BindPort: 6443, ServerCert: apiserveroptions.GeneratableKeyCert{ CertDirectory: "/var/run/kubernetes", PairName: "apiserver", }, - }, + }), InsecureServing: &kubeoptions.InsecureServingOptions{ BindAddress: net.ParseIP("127.0.0.1"), BindPort: 8080, diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 5a3a46afd54..092a77cd7f2 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -450,12 +450,12 @@ func BuildGenericConfig(s *options.ServerRunOptions, proxyTransport *http.Transp ) } - genericConfig.Authenticator, genericConfig.OpenAPIConfig.SecurityDefinitions, err = BuildAuthenticator(s, storageFactory, client, sharedInformers) + genericConfig.Authentication.Authenticator, genericConfig.OpenAPIConfig.SecurityDefinitions, err = BuildAuthenticator(s, storageFactory, client, sharedInformers) if err != nil { return nil, nil, nil, nil, nil, fmt.Errorf("invalid authentication config: %v", err) } - genericConfig.Authorizer, genericConfig.RuleResolver, err = BuildAuthorizer(s, sharedInformers, versionedInformers) + genericConfig.Authorization.Authorizer, genericConfig.RuleResolver, err = BuildAuthorizer(s, sharedInformers, versionedInformers) if err != nil { return nil, nil, nil, nil, nil, fmt.Errorf("invalid authorization config: %v", err) } @@ -634,7 +634,7 @@ func BuildStorageFactory(s *options.ServerRunOptions, apiResourceConfig *servers func defaultOptions(s *options.ServerRunOptions) error { // set defaults - if err := s.GenericServerRunOptions.DefaultAdvertiseAddress(s.SecureServing); err != nil { + if err := s.GenericServerRunOptions.DefaultAdvertiseAddress(s.SecureServing.SecureServingOptions); err != nil { return err } if err := kubeoptions.DefaultAdvertiseAddress(s.GenericServerRunOptions, s.InsecureServing); err != nil { diff --git a/pkg/kubeapiserver/options/authentication.go b/pkg/kubeapiserver/options/authentication.go index 6a6b579f215..5896871a673 100644 --- a/pkg/kubeapiserver/options/authentication.go +++ b/pkg/kubeapiserver/options/authentication.go @@ -341,19 +341,17 @@ func (o *BuiltInAuthenticationOptions) ApplyTo(c *genericapiserver.Config) error var err error if o.ClientCert != nil { - c, err = c.ApplyClientCert(o.ClientCert.ClientCA) - if err != nil { + if err = c.Authentication.ApplyClientCert(o.ClientCert.ClientCA, c.SecureServing); err != nil { return fmt.Errorf("unable to load client CA file: %v", err) } } if o.RequestHeader != nil { - c, err = c.ApplyClientCert(o.RequestHeader.ClientCAFile) - if err != nil { + if err = c.Authentication.ApplyClientCert(o.RequestHeader.ClientCAFile, c.SecureServing); err != nil { return fmt.Errorf("unable to load client CA file: %v", err) } } - c.SupportsBasicAuth = o.PasswordFile != nil && len(o.PasswordFile.BasicAuthFile) > 0 + c.Authentication.SupportsBasicAuth = o.PasswordFile != nil && len(o.PasswordFile.BasicAuthFile) > 0 return nil } diff --git a/pkg/kubeapiserver/options/serving.go b/pkg/kubeapiserver/options/serving.go index 2b1ca82d91e..74dd56f41c9 100644 --- a/pkg/kubeapiserver/options/serving.go +++ b/pkg/kubeapiserver/options/serving.go @@ -33,15 +33,15 @@ import ( // NewSecureServingOptions gives default values for the kube-apiserver which are not the options wanted by // "normal" API servers running on the platform -func NewSecureServingOptions() *genericoptions.SecureServingOptions { - return &genericoptions.SecureServingOptions{ +func NewSecureServingOptions() *genericoptions.SecureServingOptionsWithLoopback { + return genericoptions.WithLoopback(&genericoptions.SecureServingOptions{ BindAddress: net.ParseIP("0.0.0.0"), BindPort: 6443, ServerCert: genericoptions.GeneratableKeyCert{ PairName: "apiserver", CertDirectory: "/var/run/kubernetes", }, - } + }) } // DefaultAdvertiseAddress sets the field AdvertiseAddress if diff --git a/pkg/master/master.go b/pkg/master/master.go index eb8569d072d..a8b8a47a219 100644 --- a/pkg/master/master.go +++ b/pkg/master/master.go @@ -337,15 +337,15 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) // TODO: describe the priority all the way down in the RESTStorageProviders and plumb it back through the various discovery // handlers that we have. restStorageProviders := []RESTStorageProvider{ - authenticationrest.RESTStorageProvider{Authenticator: c.GenericConfig.Authenticator}, - authorizationrest.RESTStorageProvider{Authorizer: c.GenericConfig.Authorizer, RuleResolver: c.GenericConfig.RuleResolver}, + authenticationrest.RESTStorageProvider{Authenticator: c.GenericConfig.Authentication.Authenticator}, + authorizationrest.RESTStorageProvider{Authorizer: c.GenericConfig.Authorization.Authorizer, RuleResolver: c.GenericConfig.RuleResolver}, autoscalingrest.RESTStorageProvider{}, batchrest.RESTStorageProvider{}, certificatesrest.RESTStorageProvider{}, extensionsrest.RESTStorageProvider{}, networkingrest.RESTStorageProvider{}, policyrest.RESTStorageProvider{}, - rbacrest.RESTStorageProvider{Authorizer: c.GenericConfig.Authorizer}, + rbacrest.RESTStorageProvider{Authorizer: c.GenericConfig.Authorization.Authorizer}, schedulingrest.RESTStorageProvider{}, settingsrest.RESTStorageProvider{}, storagerest.RESTStorageProvider{}, diff --git a/staging/src/k8s.io/apiserver/pkg/server/config.go b/staging/src/k8s.io/apiserver/pkg/server/config.go index 5ddd2e91122..4d5a5ed422c 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/config.go +++ b/staging/src/k8s.io/apiserver/pkg/server/config.go @@ -79,14 +79,19 @@ const ( // Config is a structure used to configure a GenericAPIServer. // Its members are sorted roughly in order of importance for composers. type Config struct { - // SecureServingInfo is required to serve https - SecureServingInfo *SecureServingInfo + // SecureServing is required to serve https + SecureServing *SecureServingInfo + + // Authentication is the configuration for authentication + Authentication AuthenticationInfo + + // Authentication is the configuration for authentication + Authorization AuthorizationInfo // LoopbackClientConfig is a config for a privileged loopback connection to the API server // This is required for proper functioning of the PostStartHooks on a GenericAPIServer + // TODO: move into SecureServing(WithLoopback) as soon as insecure serving is gone LoopbackClientConfig *restclient.Config - // Authenticator determines which subject is making the request - Authenticator authenticator.Request // Authorizer determines whether the subject is allowed to make the request based only // on the RequestURI Authorizer authorizer.Authorizer @@ -116,10 +121,6 @@ type Config struct { AuditBackend audit.Backend // AuditPolicyChecker makes the decision of whether and how to audit log a request. AuditPolicyChecker auditpolicy.Checker - // SupportsBasicAuth indicates that's at least one Authenticator supports basic auth - // If this is true, a basic auth challenge is returned on authentication failure - // TODO(roberthbailey): Remove once the server no longer supports http basic auth. - SupportsBasicAuth bool // ExternalAddress is the host name to use for external (public internet) facing URLs (e.g. Swagger) // Will default to a value based on secure serving info and available ipv4 IPs. ExternalAddress string @@ -231,6 +232,21 @@ type SecureServingInfo struct { CipherSuites []uint16 } +type AuthenticationInfo struct { + // Authenticator determines which subject is making the request + Authenticator authenticator.Request + // SupportsBasicAuth indicates that's at least one Authenticator supports basic auth + // If this is true, a basic auth challenge is returned on authentication failure + // TODO(roberthbailey): Remove once the server no longer supports http basic auth. + SupportsBasicAuth bool +} + +type AuthorizationInfo struct { + // Authorizer determines whether the subject is allowed to make the request based only + // on the RequestURI + Authorizer authorizer.Authorizer +} + // NewConfig returns a Config struct with the default values func NewConfig(codecs serializer.CodecFactory) *Config { return &Config{ @@ -302,23 +318,23 @@ func DefaultSwaggerConfig() *swagger.Config { } } -func (c *Config) ApplyClientCert(clientCAFile string) (*Config, error) { - if c.SecureServingInfo != nil { +func (c *AuthenticationInfo) ApplyClientCert(clientCAFile string, servingInfo *SecureServingInfo) error { + if servingInfo != nil { if len(clientCAFile) > 0 { clientCAs, err := certutil.CertsFromFile(clientCAFile) if err != nil { - return nil, fmt.Errorf("unable to load client CA file: %v", err) + return fmt.Errorf("unable to load client CA file: %v", err) } - if c.SecureServingInfo.ClientCA == nil { - c.SecureServingInfo.ClientCA = x509.NewCertPool() + if servingInfo.ClientCA == nil { + servingInfo.ClientCA = x509.NewCertPool() } for _, cert := range clientCAs { - c.SecureServingInfo.ClientCA.AddCert(cert) + servingInfo.ClientCA.AddCert(cert) } } } - return c, nil + return nil } type completedConfig struct { @@ -385,7 +401,7 @@ func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedCo } } if c.SwaggerConfig != nil && len(c.SwaggerConfig.WebServicesUrl) == 0 { - if c.SecureServingInfo != nil { + if c.SecureServing != nil { c.SwaggerConfig.WebServicesUrl = "https://" + c.ExternalAddress } else { c.SwaggerConfig.WebServicesUrl = "http://" + c.ExternalAddress @@ -397,7 +413,7 @@ func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedCo // If the loopbackclientconfig is specified AND it has a token for use against the API server // wrap the authenticator and authorizer in loopback authentication logic - if c.Authenticator != nil && c.Authorizer != nil && c.LoopbackClientConfig != nil && len(c.LoopbackClientConfig.BearerToken) > 0 { + if c.Authentication.Authenticator != nil && c.Authorization.Authorizer != nil && c.LoopbackClientConfig != nil && len(c.LoopbackClientConfig.BearerToken) > 0 { privilegedLoopbackToken := c.LoopbackClientConfig.BearerToken var uid = uuid.NewRandom().String() tokens := make(map[string]*user.DefaultInfo) @@ -408,10 +424,10 @@ func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedCo } tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens) - c.Authenticator = authenticatorunion.New(tokenAuthenticator, c.Authenticator) + c.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, c.Authentication.Authenticator) tokenAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup) - c.Authorizer = authorizerunion.New(tokenAuthorizer, c.Authorizer) + c.Authorization.Authorizer = authorizerunion.New(tokenAuthorizer, c.Authorization.Authorizer) } if c.RequestInfoResolver == nil { @@ -458,7 +474,7 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G minRequestTimeout: time.Duration(c.MinRequestTimeout) * time.Second, ShutdownTimeout: c.RequestTimeout, - SecureServingInfo: c.SecureServingInfo, + SecureServingInfo: c.SecureServing, ExternalAddress: c.ExternalAddress, Handler: apiServerHandler, @@ -530,19 +546,19 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G } func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { - handler := genericapifilters.WithAuthorization(apiHandler, c.RequestContextMapper, c.Authorizer, c.Serializer) + handler := genericapifilters.WithAuthorization(apiHandler, c.RequestContextMapper, c.Authorization.Authorizer, c.Serializer) handler = genericfilters.WithMaxInFlightLimit(handler, c.MaxRequestsInFlight, c.MaxMutatingRequestsInFlight, c.RequestContextMapper, c.LongRunningFunc) - handler = genericapifilters.WithImpersonation(handler, c.RequestContextMapper, c.Authorizer, c.Serializer) + handler = genericapifilters.WithImpersonation(handler, c.RequestContextMapper, c.Authorization.Authorizer, c.Serializer) if utilfeature.DefaultFeatureGate.Enabled(features.AdvancedAuditing) { handler = genericapifilters.WithAudit(handler, c.RequestContextMapper, c.AuditBackend, c.AuditPolicyChecker, c.LongRunningFunc) } else { handler = genericapifilters.WithLegacyAudit(handler, c.RequestContextMapper, c.LegacyAuditWriter) } - failedHandler := genericapifilters.Unauthorized(c.RequestContextMapper, c.Serializer, c.SupportsBasicAuth) + failedHandler := genericapifilters.Unauthorized(c.RequestContextMapper, c.Serializer, c.Authentication.SupportsBasicAuth) if utilfeature.DefaultFeatureGate.Enabled(features.AdvancedAuditing) { failedHandler = genericapifilters.WithFailedAuthenticationAudit(failedHandler, c.RequestContextMapper, c.AuditBackend, c.AuditPolicyChecker) } - handler = genericapifilters.WithAuthentication(handler, c.RequestContextMapper, c.Authenticator, failedHandler) + handler = genericapifilters.WithAuthentication(handler, c.RequestContextMapper, c.Authentication.Authenticator, failedHandler) handler = genericfilters.WithCORS(handler, c.CorsAllowedOriginList, nil, nil, nil, "true") handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, c.RequestContextMapper, c.LongRunningFunc, c.RequestTimeout) handler = genericfilters.WithWaitGroup(handler, c.RequestContextMapper, c.LongRunningFunc, c.HandlerChainWaitGroup) diff --git a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go index 8235b26540f..38cd2e98179 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -310,7 +310,7 @@ func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}) error { internalStopCh := make(chan struct{}) if s.SecureServingInfo != nil && s.Handler != nil { - if err := s.serveSecurely(internalStopCh); err != nil { + if err := s.SecureServingInfo.Serve(s.Handler, s.ShutdownTimeout, internalStopCh); err != nil { close(internalStopCh) return err } diff --git a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go index 0ce483219d8..18761881b37 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go @@ -387,7 +387,7 @@ func TestNotRestRoutesHaveAuth(t *testing.T) { authz := mockAuthorizer{} config.LegacyAPIGroupPrefixes = sets.NewString("/apiPrefix") - config.Authorizer = &authz + config.Authorization.Authorizer = &authz config.EnableSwaggerUI = true config.EnableIndex = true diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/BUILD b/staging/src/k8s.io/apiserver/pkg/server/options/BUILD index f026cd2d760..ea27ab891fa 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/options/BUILD @@ -15,6 +15,7 @@ go_library( "recommended.go", "server_run_options.go", "serving.go", + "serving_with_loopback.go", ], importpath = "k8s.io/apiserver/pkg/server/options", visibility = ["//visibility:public"], diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go index ce5b01e4819..180ba4ffdc3 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go @@ -136,7 +136,7 @@ func (a *AdmissionOptions) ApplyTo( if err != nil { return err } - genericInitializer := initializer.New(clientset, informers, c.Authorizer, scheme) + genericInitializer := initializer.New(clientset, informers, c.Authorization.Authorizer, scheme) initializersChain := admission.PluginInitializers{} pluginInitializers = append(pluginInitializers, genericInitializer) initializersChain = append(initializersChain, pluginInitializers...) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go b/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go index 28c933bea54..c516a6bba42 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go @@ -32,6 +32,7 @@ import ( coreclient "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + openapicommon "k8s.io/kube-openapi/pkg/common" ) type RequestHeaderAuthenticationOptions struct { @@ -146,7 +147,7 @@ func (s *DelegatingAuthenticationOptions) AddFlags(fs *pflag.FlagSet) { } -func (s *DelegatingAuthenticationOptions) ApplyTo(c *server.Config) error { +func (s *DelegatingAuthenticationOptions) ApplyTo(c *server.AuthenticationInfo, servingInfo *server.SecureServingInfo, openAPIConfig *openapicommon.Config) error { if s == nil { c.Authenticator = nil return nil @@ -156,8 +157,7 @@ func (s *DelegatingAuthenticationOptions) ApplyTo(c *server.Config) error { if err != nil { return err } - c, err = c.ApplyClientCert(clientCA.ClientCA) - if err != nil { + if err = c.ApplyClientCert(clientCA.ClientCA, servingInfo); err != nil { return fmt.Errorf("unable to load client CA file: %v", err) } @@ -165,8 +165,7 @@ func (s *DelegatingAuthenticationOptions) ApplyTo(c *server.Config) error { if err != nil { return err } - c, err = c.ApplyClientCert(requestHeader.ClientCAFile) - if err != nil { + if err = c.ApplyClientCert(requestHeader.ClientCAFile, servingInfo); err != nil { return fmt.Errorf("unable to load client CA file: %v", err) } @@ -180,8 +179,8 @@ func (s *DelegatingAuthenticationOptions) ApplyTo(c *server.Config) error { } c.Authenticator = authenticator - if c.OpenAPIConfig != nil { - c.OpenAPIConfig.SecurityDefinitions = securityDefinitions + if openAPIConfig != nil { + openAPIConfig.SecurityDefinitions = securityDefinitions } c.SupportsBasicAuth = false diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go b/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go index 9a452d11e6b..fb8899bff9b 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go @@ -74,7 +74,7 @@ func (s *DelegatingAuthorizationOptions) AddFlags(fs *pflag.FlagSet) { "The duration to cache 'unauthorized' responses from the webhook authorizer.") } -func (s *DelegatingAuthorizationOptions) ApplyTo(c *server.Config) error { +func (s *DelegatingAuthorizationOptions) ApplyTo(c *server.AuthorizationInfo) error { if s == nil { c.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer() return nil diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go b/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go index 148bfbdce5a..c2aa6d576a5 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go @@ -30,7 +30,7 @@ import ( // Each of them can be nil to leave the feature unconfigured on ApplyTo. type RecommendedOptions struct { Etcd *EtcdOptions - SecureServing *SecureServingOptions + SecureServing *SecureServingOptionsWithLoopback Authentication *DelegatingAuthenticationOptions Authorization *DelegatingAuthorizationOptions Audit *AuditOptions @@ -46,7 +46,7 @@ type RecommendedOptions struct { func NewRecommendedOptions(prefix string, codec runtime.Codec) *RecommendedOptions { return &RecommendedOptions{ Etcd: NewEtcdOptions(storagebackend.NewDefaultConfig(prefix, codec)), - SecureServing: NewSecureServingOptions(), + SecureServing: WithLoopback(NewSecureServingOptions()), Authentication: NewDelegatingAuthenticationOptions(), Authorization: NewDelegatingAuthorizationOptions(), Audit: NewAuditOptions(), @@ -78,10 +78,10 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig, scheme *r if err := o.SecureServing.ApplyTo(&config.Config); err != nil { return err } - if err := o.Authentication.ApplyTo(&config.Config); err != nil { + if err := o.Authentication.ApplyTo(&config.Config.Authentication, config.SecureServing, config.OpenAPIConfig); err != nil { return err } - if err := o.Authorization.ApplyTo(&config.Config); err != nil { + if err := o.Authorization.ApplyTo(&config.Config.Authorization); err != nil { return err } if err := o.Audit.ApplyTo(&config.Config); err != nil { diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/serving.go b/staging/src/k8s.io/apiserver/pkg/server/options/serving.go index edf2b02f5cf..a1addcbe75f 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/serving.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/serving.go @@ -24,7 +24,6 @@ import ( "strconv" "github.com/golang/glog" - "github.com/pborman/uuid" "github.com/spf13/pflag" utilnet "k8s.io/apimachinery/pkg/util/net" @@ -110,9 +109,7 @@ func (s *SecureServingOptions) AddFlags(fs *pflag.FlagSet) { } fs.IPVar(&s.BindAddress, "bind-address", s.BindAddress, ""+ - "The IP address on which to listen for the --secure-port port. The "+ - "associated interface(s) must be reachable by the rest of the cluster, and by CLI/web "+ - "clients. If blank, all interfaces will be used (0.0.0.0).") + "The IP address on which to listen for the --secure-port port. If blank, all interfaces will be used (0.0.0.0).") fs.IntVar(&s.BindPort, "secure-port", s.BindPort, ""+ "The port on which to serve HTTPS with authentication and authorization. If 0, "+ @@ -156,7 +153,7 @@ func (s *SecureServingOptions) AddFlags(fs *pflag.FlagSet) { } // ApplyTo fills up serving information in the server configuration. -func (s *SecureServingOptions) ApplyTo(c *server.Config) error { +func (s *SecureServingOptions) ApplyTo(config **server.SecureServingInfo) error { if s == nil { return nil } @@ -173,42 +170,10 @@ func (s *SecureServingOptions) ApplyTo(c *server.Config) error { } } - if err := s.applyServingInfoTo(c); err != nil { - return err + *config = &server.SecureServingInfo{ + Listener: s.Listener, } - - c.SecureServingInfo.Listener = s.Listener - - // create self-signed cert+key with the fake server.LoopbackClientServerNameOverride and - // let the server return it when the loopback client connects. - certPem, keyPem, err := certutil.GenerateSelfSignedCertKey(server.LoopbackClientServerNameOverride, nil, nil) - if err != nil { - return fmt.Errorf("failed to generate self-signed certificate for loopback connection: %v", err) - } - tlsCert, err := tls.X509KeyPair(certPem, keyPem) - if err != nil { - return fmt.Errorf("failed to generate self-signed certificate for loopback connection: %v", err) - } - - secureLoopbackClientConfig, err := c.SecureServingInfo.NewLoopbackClientConfig(uuid.NewRandom().String(), certPem) - switch { - // if we failed and there's no fallback loopback client config, we need to fail - case err != nil && c.LoopbackClientConfig == nil: - return err - - // if we failed, but we already have a fallback loopback client config (usually insecure), allow it - case err != nil && c.LoopbackClientConfig != nil: - - default: - c.LoopbackClientConfig = secureLoopbackClientConfig - c.SecureServingInfo.SNICerts[server.LoopbackClientServerNameOverride] = &tlsCert - } - - return nil -} - -func (s *SecureServingOptions) applyServingInfoTo(c *server.Config) error { - secureServingInfo := &server.SecureServingInfo{} + c := *config serverCertFile, serverKeyFile := s.ServerCert.CertKey.CertFile, s.ServerCert.CertKey.KeyFile // load main cert @@ -217,7 +182,7 @@ func (s *SecureServingOptions) applyServingInfoTo(c *server.Config) error { if err != nil { return fmt.Errorf("unable to load server certificate: %v", err) } - secureServingInfo.Cert = &tlsCert + c.Cert = &tlsCert } if len(s.CipherSuites) != 0 { @@ -225,11 +190,11 @@ func (s *SecureServingOptions) applyServingInfoTo(c *server.Config) error { if err != nil { return err } - secureServingInfo.CipherSuites = cipherSuites + c.CipherSuites = cipherSuites } var err error - secureServingInfo.MinTLSVersion, err = utilflag.TLSVersion(s.MinTLSVersion) + c.MinTLSVersion, err = utilflag.TLSVersion(s.MinTLSVersion) if err != nil { return err } @@ -246,14 +211,11 @@ func (s *SecureServingOptions) applyServingInfoTo(c *server.Config) error { return fmt.Errorf("failed to load SNI cert and key: %v", err) } } - secureServingInfo.SNICerts, err = server.GetNamedCertificateMap(namedTLSCerts) + c.SNICerts, err = server.GetNamedCertificateMap(namedTLSCerts) if err != nil { return err } - c.SecureServingInfo = secureServingInfo - c.ReadWritePort = s.BindPort - return nil } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/serving_test.go b/staging/src/k8s.io/apiserver/pkg/server/options/serving_test.go index 86d38474547..01a443a33de 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/serving_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/serving_test.go @@ -32,6 +32,7 @@ import ( "os" "path/filepath" "reflect" + "strconv" "strings" "testing" "time" @@ -47,7 +48,6 @@ import ( utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/client-go/discovery" restclient "k8s.io/client-go/rest" - "strconv" ) func setUp(t *testing.T) Config { @@ -471,7 +471,7 @@ NextTest: config.Version = &v config.EnableIndex = true - secureOptions := &SecureServingOptions{ + secureOptions := WithLoopback(&SecureServingOptions{ BindAddress: net.ParseIP("127.0.0.1"), BindPort: 6443, ServerCert: GeneratableKeyCert{ @@ -481,7 +481,7 @@ NextTest: }, }, SNICertKeys: namedCertKeys, - } + }) // use a random free port ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go b/staging/src/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go new file mode 100644 index 00000000000..8d249cb54b4 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go @@ -0,0 +1,79 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "crypto/tls" + "fmt" + + "github.com/pborman/uuid" + + "k8s.io/apiserver/pkg/server" + certutil "k8s.io/client-go/util/cert" +) + +type SecureServingOptionsWithLoopback struct { + *SecureServingOptions +} + +func WithLoopback(o *SecureServingOptions) *SecureServingOptionsWithLoopback { + return &SecureServingOptionsWithLoopback{o} +} + +// ApplyTo fills up serving information in the server configuration. +func (s *SecureServingOptionsWithLoopback) ApplyTo(c *server.Config) error { + if s == nil || s.SecureServingOptions == nil { + return nil + } + + if err := s.SecureServingOptions.ApplyTo(&c.SecureServing); err != nil { + return err + } + + if c.SecureServing == nil { + return nil + } + + c.ReadWritePort = s.BindPort + + // create self-signed cert+key with the fake server.LoopbackClientServerNameOverride and + // let the server return it when the loopback client connects. + certPem, keyPem, err := certutil.GenerateSelfSignedCertKey(server.LoopbackClientServerNameOverride, nil, nil) + if err != nil { + return fmt.Errorf("failed to generate self-signed certificate for loopback connection: %v", err) + } + tlsCert, err := tls.X509KeyPair(certPem, keyPem) + if err != nil { + return fmt.Errorf("failed to generate self-signed certificate for loopback connection: %v", err) + } + + secureLoopbackClientConfig, err := c.SecureServing.NewLoopbackClientConfig(uuid.NewRandom().String(), certPem) + switch { + // if we failed and there's no fallback loopback client config, we need to fail + case err != nil && c.LoopbackClientConfig == nil: + return err + + // if we failed, but we already have a fallback loopback client config (usually insecure), allow it + case err != nil && c.LoopbackClientConfig != nil: + + default: + c.LoopbackClientConfig = secureLoopbackClientConfig + c.SecureServing.SNICerts[server.LoopbackClientServerNameOverride] = &tlsCert + } + + return nil +} diff --git a/staging/src/k8s.io/apiserver/pkg/server/serve.go b/staging/src/k8s.io/apiserver/pkg/server/serve.go index dcb4de1e592..71a3a34793b 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/serve.go +++ b/staging/src/k8s.io/apiserver/pkg/server/serve.go @@ -39,17 +39,17 @@ const ( // serveSecurely runs the secure http server. It fails only if certificates cannot // be loaded or the initial listen call fails. The actual server loop (stoppable by closing // stopCh) runs in a go routine, i.e. serveSecurely does not block. -func (s *GenericAPIServer) serveSecurely(stopCh <-chan struct{}) error { - if s.SecureServingInfo.Listener == nil { +func (s *SecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Duration, stopCh <-chan struct{}) error { + if s.Listener == nil { return fmt.Errorf("listener must not be nil") } secureServer := &http.Server{ - Addr: s.SecureServingInfo.Listener.Addr().String(), - Handler: s.Handler, + Addr: s.Listener.Addr().String(), + Handler: handler, MaxHeaderBytes: 1 << 20, TLSConfig: &tls.Config{ - NameToCertificate: s.SecureServingInfo.SNICerts, + NameToCertificate: s.SNICerts, // Can't use SSLv3 because of POODLE and BEAST // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher // Can't use TLSv1.1 because of RC4 cipher usage @@ -59,41 +59,41 @@ func (s *GenericAPIServer) serveSecurely(stopCh <-chan struct{}) error { }, } - if s.SecureServingInfo.MinTLSVersion > 0 { - secureServer.TLSConfig.MinVersion = s.SecureServingInfo.MinTLSVersion + if s.MinTLSVersion > 0 { + secureServer.TLSConfig.MinVersion = s.MinTLSVersion } - if len(s.SecureServingInfo.CipherSuites) > 0 { - secureServer.TLSConfig.CipherSuites = s.SecureServingInfo.CipherSuites + if len(s.CipherSuites) > 0 { + secureServer.TLSConfig.CipherSuites = s.CipherSuites } - if s.SecureServingInfo.Cert != nil { - secureServer.TLSConfig.Certificates = []tls.Certificate{*s.SecureServingInfo.Cert} + if s.Cert != nil { + secureServer.TLSConfig.Certificates = []tls.Certificate{*s.Cert} } // append all named certs. Otherwise, the go tls stack will think no SNI processing // is necessary because there is only one cert anyway. // Moreover, if ServerCert.CertFile/ServerCert.KeyFile are not set, the first SNI // cert will become the default cert. That's what we expect anyway. - for _, c := range s.SecureServingInfo.SNICerts { + for _, c := range s.SNICerts { secureServer.TLSConfig.Certificates = append(secureServer.TLSConfig.Certificates, *c) } - if s.SecureServingInfo.ClientCA != nil { + if s.ClientCA != nil { // Populate PeerCertificates in requests, but don't reject connections without certificates // This allows certificates to be validated by authenticators, while still allowing other auth types secureServer.TLSConfig.ClientAuth = tls.RequestClientCert // Specify allowed CAs for client certificates - secureServer.TLSConfig.ClientCAs = s.SecureServingInfo.ClientCA + secureServer.TLSConfig.ClientCAs = s.ClientCA } glog.Infof("Serving securely on %s", secureServer.Addr) - err := RunServer(secureServer, s.SecureServingInfo.Listener, s.ShutdownTimeout, stopCh) - return err + return RunServer(secureServer, s.Listener, shutdownTimeout, stopCh) } // RunServer listens on the given port if listener is not given, // then spawns a go-routine continuously serving // until the stopCh is closed. This function does not block. +// TODO: make private when insecure serving is gone from the kube-apiserver func RunServer( server *http.Server, ln net.Listener, diff --git a/test/integration/auth/accessreview_test.go b/test/integration/auth/accessreview_test.go index c5e1fbe32d1..f5835aaec3f 100644 --- a/test/integration/auth/accessreview_test.go +++ b/test/integration/auth/accessreview_test.go @@ -55,8 +55,8 @@ func alwaysAlice(req *http.Request) (user.Info, bool, error) { func TestSubjectAccessReview(t *testing.T) { masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authenticator = authenticator.RequestFunc(alwaysAlice) - masterConfig.GenericConfig.Authorizer = sarAuthorizer{} + masterConfig.GenericConfig.Authentication.Authenticator = authenticator.RequestFunc(alwaysAlice) + masterConfig.GenericConfig.Authorization.Authorizer = sarAuthorizer{} masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit() _, s, closeFn := framework.RunAMaster(masterConfig) defer closeFn() @@ -147,10 +147,10 @@ func TestSubjectAccessReview(t *testing.T) { func TestSelfSubjectAccessReview(t *testing.T) { username := "alice" masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authenticator = authenticator.RequestFunc(func(req *http.Request) (user.Info, bool, error) { + masterConfig.GenericConfig.Authentication.Authenticator = authenticator.RequestFunc(func(req *http.Request) (user.Info, bool, error) { return &user.DefaultInfo{Name: username}, true, nil }) - masterConfig.GenericConfig.Authorizer = sarAuthorizer{} + masterConfig.GenericConfig.Authorization.Authorizer = sarAuthorizer{} masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit() _, s, closeFn := framework.RunAMaster(masterConfig) defer closeFn() @@ -229,8 +229,8 @@ func TestSelfSubjectAccessReview(t *testing.T) { func TestLocalSubjectAccessReview(t *testing.T) { masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authenticator = authenticator.RequestFunc(alwaysAlice) - masterConfig.GenericConfig.Authorizer = sarAuthorizer{} + masterConfig.GenericConfig.Authentication.Authenticator = authenticator.RequestFunc(alwaysAlice) + masterConfig.GenericConfig.Authorization.Authorizer = sarAuthorizer{} masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit() _, s, closeFn := framework.RunAMaster(masterConfig) defer closeFn() diff --git a/test/integration/auth/auth_test.go b/test/integration/auth/auth_test.go index 41c53e3bf2b..3d2141ce5aa 100644 --- a/test/integration/auth/auth_test.go +++ b/test/integration/auth/auth_test.go @@ -500,7 +500,7 @@ func getPreviousResourceVersionKey(url, id string) string { func TestAuthModeAlwaysDeny(t *testing.T) { // Set up a master masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authorizer = authorizerfactory.NewAlwaysDenyAuthorizer() + masterConfig.GenericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysDenyAuthorizer() _, s, closeFn := framework.RunAMaster(masterConfig) defer closeFn() @@ -549,8 +549,8 @@ func TestAliceNotForbiddenOrUnauthorized(t *testing.T) { // Set up a master masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authenticator = getTestTokenAuth() - masterConfig.GenericConfig.Authorizer = allowAliceAuthorizer{} + masterConfig.GenericConfig.Authentication.Authenticator = getTestTokenAuth() + masterConfig.GenericConfig.Authorization.Authorizer = allowAliceAuthorizer{} masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit() _, s, closeFn := framework.RunAMaster(masterConfig) defer closeFn() @@ -619,8 +619,8 @@ func TestAliceNotForbiddenOrUnauthorized(t *testing.T) { func TestBobIsForbidden(t *testing.T) { // This file has alice and bob in it. masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authenticator = getTestTokenAuth() - masterConfig.GenericConfig.Authorizer = allowAliceAuthorizer{} + masterConfig.GenericConfig.Authentication.Authenticator = getTestTokenAuth() + masterConfig.GenericConfig.Authorization.Authorizer = allowAliceAuthorizer{} _, s, closeFn := framework.RunAMaster(masterConfig) defer closeFn() @@ -663,8 +663,8 @@ func TestUnknownUserIsUnauthorized(t *testing.T) { // Set up a master masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authenticator = getTestTokenAuth() - masterConfig.GenericConfig.Authorizer = allowAliceAuthorizer{} + masterConfig.GenericConfig.Authentication.Authenticator = getTestTokenAuth() + masterConfig.GenericConfig.Authorization.Authorizer = allowAliceAuthorizer{} _, s, closeFn := framework.RunAMaster(masterConfig) defer closeFn() @@ -725,8 +725,8 @@ func (impersonateAuthorizer) Authorize(a authorizer.Attributes) (authorizer.Deci func TestImpersonateIsForbidden(t *testing.T) { // Set up a master masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authenticator = getTestTokenAuth() - masterConfig.GenericConfig.Authorizer = impersonateAuthorizer{} + masterConfig.GenericConfig.Authentication.Authenticator = getTestTokenAuth() + masterConfig.GenericConfig.Authorization.Authorizer = impersonateAuthorizer{} _, s, closeFn := framework.RunAMaster(masterConfig) defer closeFn() @@ -872,8 +872,8 @@ func TestAuthorizationAttributeDetermination(t *testing.T) { // Set up a master masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authenticator = getTestTokenAuth() - masterConfig.GenericConfig.Authorizer = trackingAuthorizer + masterConfig.GenericConfig.Authentication.Authenticator = getTestTokenAuth() + masterConfig.GenericConfig.Authorization.Authorizer = trackingAuthorizer _, s, closeFn := framework.RunAMaster(masterConfig) defer closeFn() @@ -938,8 +938,8 @@ func TestNamespaceAuthorization(t *testing.T) { // Set up a master masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authenticator = getTestTokenAuth() - masterConfig.GenericConfig.Authorizer = a + masterConfig.GenericConfig.Authentication.Authenticator = getTestTokenAuth() + masterConfig.GenericConfig.Authorization.Authorizer = a _, s, closeFn := framework.RunAMaster(masterConfig) defer closeFn() @@ -1036,8 +1036,8 @@ func TestKindAuthorization(t *testing.T) { // Set up a master masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authenticator = getTestTokenAuth() - masterConfig.GenericConfig.Authorizer = a + masterConfig.GenericConfig.Authentication.Authenticator = getTestTokenAuth() + masterConfig.GenericConfig.Authorization.Authorizer = a _, s, closeFn := framework.RunAMaster(masterConfig) defer closeFn() @@ -1120,8 +1120,8 @@ func TestReadOnlyAuthorization(t *testing.T) { // Set up a master masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authenticator = getTestTokenAuth() - masterConfig.GenericConfig.Authorizer = a + masterConfig.GenericConfig.Authentication.Authenticator = getTestTokenAuth() + masterConfig.GenericConfig.Authorization.Authorizer = a _, s, closeFn := framework.RunAMaster(masterConfig) defer closeFn() @@ -1179,8 +1179,8 @@ func TestWebhookTokenAuthenticator(t *testing.T) { // Set up a master masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authenticator = authenticator - masterConfig.GenericConfig.Authorizer = allowAliceAuthorizer{} + masterConfig.GenericConfig.Authentication.Authenticator = authenticator + masterConfig.GenericConfig.Authorization.Authorizer = allowAliceAuthorizer{} _, s, closeFn := framework.RunAMaster(masterConfig) defer closeFn() diff --git a/test/integration/auth/bootstraptoken_test.go b/test/integration/auth/bootstraptoken_test.go index 43b17c71128..38139adc71e 100644 --- a/test/integration/auth/bootstraptoken_test.go +++ b/test/integration/auth/bootstraptoken_test.go @@ -125,7 +125,7 @@ func TestBootstrapTokenAuth(t *testing.T) { authenticator := bearertoken.New(bootstrap.NewTokenAuthenticator(bootstrapSecrets{test.secret})) // Set up a master masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authenticator = authenticator + masterConfig.GenericConfig.Authentication.Authenticator = authenticator masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit() _, s, closeFn := framework.RunAMaster(masterConfig) defer closeFn() diff --git a/test/integration/auth/node_test.go b/test/integration/auth/node_test.go index 7698ff1be4f..a17e9605072 100644 --- a/test/integration/auth/node_test.go +++ b/test/integration/auth/node_test.go @@ -101,8 +101,8 @@ func TestNodeAuthorizer(t *testing.T) { // Start the server masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authenticator = authenticator - masterConfig.GenericConfig.Authorizer = nodeRBACAuthorizer + masterConfig.GenericConfig.Authentication.Authenticator = authenticator + masterConfig.GenericConfig.Authorization.Authorizer = nodeRBACAuthorizer masterConfig.GenericConfig.AdmissionControl = nodeRestrictionAdmission _, _, closeFn := framework.RunAMasterUsingServer(masterConfig, apiServer, h) diff --git a/test/integration/auth/rbac_test.go b/test/integration/auth/rbac_test.go index ea5f9594016..448b51c93a8 100644 --- a/test/integration/auth/rbac_test.go +++ b/test/integration/auth/rbac_test.go @@ -414,8 +414,8 @@ func TestRBAC(t *testing.T) { for i, tc := range tests { // Create an API Server. masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authorizer = newRBACAuthorizer(masterConfig) - masterConfig.GenericConfig.Authenticator = bearertoken.New(tokenfile.New(map[string]*user.DefaultInfo{ + masterConfig.GenericConfig.Authorization.Authorizer = newRBACAuthorizer(masterConfig) + masterConfig.GenericConfig.Authentication.Authenticator = bearertoken.New(tokenfile.New(map[string]*user.DefaultInfo{ superUser: {Name: "admin", Groups: []string{"system:masters"}}, "any-rolebinding-writer": {Name: "any-rolebinding-writer"}, "any-rolebinding-writer-namespace": {Name: "any-rolebinding-writer-namespace"}, @@ -517,8 +517,8 @@ func TestBootstrapping(t *testing.T) { superUser := "admin/system:masters" masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authorizer = newRBACAuthorizer(masterConfig) - masterConfig.GenericConfig.Authenticator = bearertoken.New(tokenfile.New(map[string]*user.DefaultInfo{ + masterConfig.GenericConfig.Authorization.Authorizer = newRBACAuthorizer(masterConfig) + masterConfig.GenericConfig.Authentication.Authenticator = bearertoken.New(tokenfile.New(map[string]*user.DefaultInfo{ superUser: {Name: "admin", Groups: []string{"system:masters"}}, })) _, s, closeFn := framework.RunAMaster(masterConfig) diff --git a/test/integration/framework/master_utils.go b/test/integration/framework/master_utils.go index 807c902c9e9..77f2ff68291 100644 --- a/test/integration/framework/master_utils.go +++ b/test/integration/framework/master_utils.go @@ -160,17 +160,17 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv } tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens) - if masterConfig.GenericConfig.Authenticator == nil { - masterConfig.GenericConfig.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty)) + if masterConfig.GenericConfig.Authentication.Authenticator == nil { + masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty)) } else { - masterConfig.GenericConfig.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authenticator) + masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authentication.Authenticator) } - if masterConfig.GenericConfig.Authorizer != nil { + if masterConfig.GenericConfig.Authorization.Authorizer != nil { tokenAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup) - masterConfig.GenericConfig.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorizer) + masterConfig.GenericConfig.Authorization.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorization.Authorizer) } else { - masterConfig.GenericConfig.Authorizer = alwaysAllow{} + masterConfig.GenericConfig.Authorization.Authorizer = alwaysAllow{} } masterConfig.GenericConfig.LoopbackClientConfig.BearerToken = privilegedLoopbackToken @@ -281,7 +281,7 @@ func NewMasterConfig() *master.Config { genericConfig := genericapiserver.NewConfig(legacyscheme.Codecs) kubeVersion := version.Get() genericConfig.Version = &kubeVersion - genericConfig.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer() + genericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer() genericConfig.AdmissionControl = admit.NewAlwaysAdmit() genericConfig.EnableMetrics = true diff --git a/test/integration/master/synthetic_master_test.go b/test/integration/master/synthetic_master_test.go index aa063be5d2e..90d5350d6f3 100644 --- a/test/integration/master/synthetic_master_test.go +++ b/test/integration/master/synthetic_master_test.go @@ -134,7 +134,7 @@ func TestEmptyList(t *testing.T) { func initStatusForbiddenMasterCongfig() *master.Config { masterConfig := framework.NewIntegrationTestMasterConfig() - masterConfig.GenericConfig.Authorizer = authorizerfactory.NewAlwaysDenyAuthorizer() + masterConfig.GenericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysDenyAuthorizer() return masterConfig } @@ -143,8 +143,8 @@ func initUnauthorizedMasterCongfig() *master.Config { tokenAuthenticator := tokentest.New() tokenAuthenticator.Tokens[AliceToken] = &user.DefaultInfo{Name: "alice", UID: "1"} tokenAuthenticator.Tokens[BobToken] = &user.DefaultInfo{Name: "bob", UID: "2"} - masterConfig.GenericConfig.Authenticator = group.NewGroupAdder(bearertoken.New(tokenAuthenticator), []string{user.AllAuthenticated}) - masterConfig.GenericConfig.Authorizer = allowAliceAuthorizer{} + masterConfig.GenericConfig.Authentication.Authenticator = group.NewGroupAdder(bearertoken.New(tokenAuthenticator), []string{user.AllAuthenticated}) + masterConfig.GenericConfig.Authorization.Authorizer = allowAliceAuthorizer{} return masterConfig } diff --git a/test/integration/serviceaccount/service_account_test.go b/test/integration/serviceaccount/service_account_test.go index 68655d2c1ec..7bd3bacb236 100644 --- a/test/integration/serviceaccount/service_account_test.go +++ b/test/integration/serviceaccount/service_account_test.go @@ -425,8 +425,8 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie masterConfig := framework.NewMasterConfig() masterConfig.GenericConfig.EnableIndex = true - masterConfig.GenericConfig.Authenticator = authenticator - masterConfig.GenericConfig.Authorizer = authorizer + masterConfig.GenericConfig.Authentication.Authenticator = authenticator + masterConfig.GenericConfig.Authorization.Authorizer = authorizer masterConfig.GenericConfig.AdmissionControl = serviceAccountAdmission framework.RunAMasterUsingServer(masterConfig, apiServer, h) From f4564ea0b8c09dd4998193215aaf6628bb880e99 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Thu, 8 Feb 2018 19:28:31 +0100 Subject: [PATCH 44/53] controller-manager: add SecureServingOptions --- .../app/controllermanager.go | 5 +++++ .../app/options/options.go | 3 +++ .../app/options/options_test.go | 12 ++++++++++++ cmd/controller-manager/app/config.go | 3 +++ cmd/controller-manager/app/options/options.go | 18 ++++++++++++++++-- .../app/controllermanager.go | 5 +++++ .../app/options/options.go | 3 +++ .../app/options/options_test.go | 12 ++++++++++++ pkg/master/ports/ports.go | 2 +- 9 files changed, 60 insertions(+), 3 deletions(-) diff --git a/cmd/cloud-controller-manager/app/controllermanager.go b/cmd/cloud-controller-manager/app/controllermanager.go index 3ffefbad476..879035c175f 100644 --- a/cmd/cloud-controller-manager/app/controllermanager.go +++ b/cmd/cloud-controller-manager/app/controllermanager.go @@ -115,6 +115,11 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error { // Start the controller manager HTTP server stopCh := make(chan struct{}) + if c.Generic.SecureServing != nil { + if err := genericcontrollermanager.Serve(&c.Generic, c.Generic.SecureServing.Serve, stopCh); err != nil { + return err + } + } if c.Generic.InsecureServing != nil { if err := genericcontrollermanager.Serve(&c.Generic, c.Generic.InsecureServing.Serve, stopCh); err != nil { return err diff --git a/cmd/cloud-controller-manager/app/options/options.go b/cmd/cloud-controller-manager/app/options/options.go index e783e06ccb0..530137ff6c2 100644 --- a/cmd/cloud-controller-manager/app/options/options.go +++ b/cmd/cloud-controller-manager/app/options/options.go @@ -54,6 +54,9 @@ func NewCloudControllerManagerOptions() *CloudControllerManagerOptions { } s.Generic.ComponentConfig.LeaderElection.LeaderElect = true + s.Generic.SecureServing.ServerCert.CertDirectory = "/var/run/kubernetes" + s.Generic.SecureServing.ServerCert.PairName = "cloud-controller-manager" + return &s } diff --git a/cmd/cloud-controller-manager/app/options/options_test.go b/cmd/cloud-controller-manager/app/options/options_test.go index bc5844fb600..6a199c01a22 100644 --- a/cmd/cloud-controller-manager/app/options/options_test.go +++ b/cmd/cloud-controller-manager/app/options/options_test.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/diff" + apiserveroptions "k8s.io/apiserver/pkg/server/options" cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options" "k8s.io/kubernetes/pkg/apis/componentconfig" ) @@ -63,6 +64,9 @@ func TestAddFlags(t *testing.T) { "--route-reconciliation-period=30s", "--min-resync-period=100m", "--use-service-account-credentials=false", + "--cert-dir=/a/b/c", + "--bind-address=192.168.4.21", + "--secure-port=10001", } f.Parse(args) @@ -139,6 +143,14 @@ func TestAddFlags(t *testing.T) { CIDRAllocatorType: "RangeAllocator", Controllers: []string{"*"}, }, + SecureServing: &apiserveroptions.SecureServingOptions{ + BindPort: 10001, + BindAddress: net.ParseIP("192.168.4.21"), + ServerCert: apiserveroptions.GeneratableKeyCert{ + CertDirectory: "/a/b/c", + PairName: "cloud-controller-manager", + }, + }, InsecureServing: &cmoptions.InsecureServingOptions{ BindAddress: net.ParseIP("192.168.4.10"), BindPort: int(10000), diff --git a/cmd/controller-manager/app/config.go b/cmd/controller-manager/app/config.go index d97adc127aa..b62550e3900 100644 --- a/cmd/controller-manager/app/config.go +++ b/cmd/controller-manager/app/config.go @@ -17,6 +17,7 @@ limitations under the License. package app import ( + apiserver "k8s.io/apiserver/pkg/server" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" @@ -28,6 +29,8 @@ type Config struct { // TODO: split up the component config. This is not generic. ComponentConfig componentconfig.KubeControllerManagerConfiguration + SecureServing *apiserver.SecureServingInfo + // TODO: remove deprecated insecure serving InsecureServing *InsecureServingInfo // the general kube client diff --git a/cmd/controller-manager/app/options/options.go b/cmd/controller-manager/app/options/options.go index 5d1834c106c..82393c7ba6a 100644 --- a/cmd/controller-manager/app/options/options.go +++ b/cmd/controller-manager/app/options/options.go @@ -26,6 +26,7 @@ import ( "github.com/spf13/pflag" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apiserveroptions "k8s.io/apiserver/pkg/server/options" "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" @@ -44,9 +45,12 @@ type GenericControllerManagerOptions struct { // TODO: turn ComponentConfig into modular option structs. This is not generic. ComponentConfig componentconfig.KubeControllerManagerConfiguration + SecureServing *apiserveroptions.SecureServingOptions + // TODO: remove insecure serving mode InsecureServing *InsecureServingOptions - Master string - Kubeconfig string + + Master string + Kubeconfig string } const ( @@ -65,6 +69,7 @@ const ( func NewGenericControllerManagerOptions(componentConfig componentconfig.KubeControllerManagerConfiguration) GenericControllerManagerOptions { o := GenericControllerManagerOptions{ ComponentConfig: componentConfig, + SecureServing: apiserveroptions.NewSecureServingOptions(), InsecureServing: &InsecureServingOptions{ BindAddress: net.ParseIP(componentConfig.Address), BindPort: int(componentConfig.Port), @@ -72,6 +77,10 @@ func NewGenericControllerManagerOptions(componentConfig componentconfig.KubeCont }, } + // disable secure serving for now + // TODO: enable HTTPS by default + o.SecureServing.BindPort = 0 + return o } @@ -163,6 +172,7 @@ func (o *GenericControllerManagerOptions) AddFlags(fs *pflag.FlagSet) { fs.Int32Var(&o.ComponentConfig.KubeAPIBurst, "kube-api-burst", o.ComponentConfig.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver.") fs.DurationVar(&o.ComponentConfig.ControllerStartInterval.Duration, "controller-start-interval", o.ComponentConfig.ControllerStartInterval.Duration, "Interval between starting controller managers.") + o.SecureServing.AddFlags(fs) o.InsecureServing.AddFlags(fs) o.InsecureServing.AddDeprecatedFlags(fs) } @@ -171,6 +181,9 @@ func (o *GenericControllerManagerOptions) AddFlags(fs *pflag.FlagSet) { func (o *GenericControllerManagerOptions) ApplyTo(c *genericcontrollermanager.Config, userAgent string) error { c.ComponentConfig = o.ComponentConfig + if err := o.SecureServing.ApplyTo(&c.SecureServing); err != nil { + return err + } if err := o.InsecureServing.ApplyTo(&c.InsecureServing, &c.ComponentConfig); err != nil { return err } @@ -199,6 +212,7 @@ func (o *GenericControllerManagerOptions) ApplyTo(c *genericcontrollermanager.Co // Validate checks GenericControllerManagerOptions and return a slice of found errors. func (o *GenericControllerManagerOptions) Validate() []error { errors := []error{} + errors = append(errors, o.SecureServing.Validate()...) errors = append(errors, o.InsecureServing.Validate()...) // TODO: validate component config, master and kubeconfig diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 5b321c422ff..5bc5260f0c4 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -123,6 +123,11 @@ func Run(c *config.CompletedConfig) error { // Start the controller manager HTTP server stopCh := make(chan struct{}) + if c.Generic.SecureServing != nil { + if err := genericcontrollerconfig.Serve(&c.Generic, c.Generic.SecureServing.Serve, stopCh); err != nil { + return err + } + } if c.Generic.InsecureServing != nil { if err := genericcontrollerconfig.Serve(&c.Generic, c.Generic.InsecureServing.Serve, stopCh); err != nil { return err diff --git a/cmd/kube-controller-manager/app/options/options.go b/cmd/kube-controller-manager/app/options/options.go index 572e60c55dc..f9adc670c6d 100644 --- a/cmd/kube-controller-manager/app/options/options.go +++ b/cmd/kube-controller-manager/app/options/options.go @@ -52,6 +52,9 @@ func NewKubeControllerManagerOptions() *KubeControllerManagerOptions { Generic: cmoptions.NewGenericControllerManagerOptions(componentConfig), } + s.Generic.SecureServing.ServerCert.CertDirectory = "/var/run/kubernetes" + s.Generic.SecureServing.ServerCert.PairName = "kube-controller-manager" + gcIgnoredResources := make([]componentconfig.GroupResource, 0, len(garbagecollector.DefaultIgnoredResources())) for r := range garbagecollector.DefaultIgnoredResources() { gcIgnoredResources = append(gcIgnoredResources, componentconfig.GroupResource{Group: r.Group, Resource: r.Resource}) diff --git a/cmd/kube-controller-manager/app/options/options_test.go b/cmd/kube-controller-manager/app/options/options_test.go index e7c246bdbf2..997b25646d7 100644 --- a/cmd/kube-controller-manager/app/options/options_test.go +++ b/cmd/kube-controller-manager/app/options/options_test.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/diff" + apiserveroptions "k8s.io/apiserver/pkg/server/options" cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options" "k8s.io/kubernetes/pkg/apis/componentconfig" ) @@ -104,6 +105,9 @@ func TestAddFlags(t *testing.T) { "--terminated-pod-gc-threshold=12000", "--unhealthy-zone-threshold=0.6", "--use-service-account-credentials=true", + "--cert-dir=/a/b/c", + "--bind-address=192.168.4.21", + "--secure-port=10001", } f.Parse(args) // Sort GCIgnoredResources because it's built from a map, which means the @@ -205,6 +209,14 @@ func TestAddFlags(t *testing.T) { HorizontalPodAutoscalerUseRESTClients: true, UseServiceAccountCredentials: true, }, + SecureServing: &apiserveroptions.SecureServingOptions{ + BindPort: 10001, + BindAddress: net.ParseIP("192.168.4.21"), + ServerCert: apiserveroptions.GeneratableKeyCert{ + CertDirectory: "/a/b/c", + PairName: "kube-controller-manager", + }, + }, InsecureServing: &cmoptions.InsecureServingOptions{ BindAddress: net.ParseIP("192.168.4.10"), BindPort: int(10000), diff --git a/pkg/master/ports/ports.go b/pkg/master/ports/ports.go index fae2d6225d3..d4a42f603bb 100644 --- a/pkg/master/ports/ports.go +++ b/pkg/master/ports/ports.go @@ -28,7 +28,7 @@ const ( SchedulerPort = 10251 // InsecureKubeControllerManagerPort is the default port for the controller manager status server. // May be overridden by a flag at startup. - InsecureControllerManagerPort = 10252 + InsecureKubeControllerManagerPort = 10252 // InsecureCloudControllerManagerPort is the default port for the cloud controller manager server. // This value may be overridden by a flag at startup. InsecureCloudControllerManagerPort = 10253 From cecd663c21d139a3a5a15b43a8dda8de26180246 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Thu, 8 Feb 2018 14:19:02 +0100 Subject: [PATCH 45/53] controller-manager: add authz/n to options, nil by default --- cmd/controller-manager/app/config.go | 2 ++ cmd/controller-manager/app/options/options.go | 14 ++++++++++++++ cmd/controller-manager/app/serve.go | 16 +++++++++++++++- .../pkg/server/options/authentication.go | 4 ++++ 4 files changed, 35 insertions(+), 1 deletion(-) diff --git a/cmd/controller-manager/app/config.go b/cmd/controller-manager/app/config.go index b62550e3900..d67841562e3 100644 --- a/cmd/controller-manager/app/config.go +++ b/cmd/controller-manager/app/config.go @@ -32,6 +32,8 @@ type Config struct { SecureServing *apiserver.SecureServingInfo // TODO: remove deprecated insecure serving InsecureServing *InsecureServingInfo + Authentication apiserver.AuthenticationInfo + Authorization apiserver.AuthorizationInfo // the general kube client Client *clientset.Clientset diff --git a/cmd/controller-manager/app/options/options.go b/cmd/controller-manager/app/options/options.go index 82393c7ba6a..8f468f4d1cc 100644 --- a/cmd/controller-manager/app/options/options.go +++ b/cmd/controller-manager/app/options/options.go @@ -48,6 +48,8 @@ type GenericControllerManagerOptions struct { SecureServing *apiserveroptions.SecureServingOptions // TODO: remove insecure serving mode InsecureServing *InsecureServingOptions + Authentication *apiserveroptions.DelegatingAuthenticationOptions + Authorization *apiserveroptions.DelegatingAuthorizationOptions Master string Kubeconfig string @@ -75,6 +77,8 @@ func NewGenericControllerManagerOptions(componentConfig componentconfig.KubeCont BindPort: int(componentConfig.Port), BindNetwork: "tcp", }, + Authentication: nil, // TODO: enable with apiserveroptions.NewDelegatingAuthenticationOptions() + Authorization: nil, // TODO: enable with apiserveroptions.NewDelegatingAuthorizationOptions() } // disable secure serving for now @@ -175,6 +179,8 @@ func (o *GenericControllerManagerOptions) AddFlags(fs *pflag.FlagSet) { o.SecureServing.AddFlags(fs) o.InsecureServing.AddFlags(fs) o.InsecureServing.AddDeprecatedFlags(fs) + o.Authentication.AddFlags(fs) + o.Authorization.AddFlags(fs) } // ApplyTo fills up controller manager config with options and userAgent @@ -187,6 +193,12 @@ func (o *GenericControllerManagerOptions) ApplyTo(c *genericcontrollermanager.Co if err := o.InsecureServing.ApplyTo(&c.InsecureServing, &c.ComponentConfig); err != nil { return err } + if err := o.Authentication.ApplyTo(&c.Authentication, c.SecureServing, nil); err != nil { + return err + } + if err := o.Authorization.ApplyTo(&c.Authorization); err != nil { + return err + } var err error c.Kubeconfig, err = clientcmd.BuildConfigFromFlags(o.Master, o.Kubeconfig) @@ -214,6 +226,8 @@ func (o *GenericControllerManagerOptions) Validate() []error { errors := []error{} errors = append(errors, o.SecureServing.Validate()...) errors = append(errors, o.InsecureServing.Validate()...) + errors = append(errors, o.Authentication.Validate()...) + errors = append(errors, o.Authorization.Validate()...) // TODO: validate component config, master and kubeconfig diff --git a/cmd/controller-manager/app/serve.go b/cmd/controller-manager/app/serve.go index 93c8aad43d1..f75777ed266 100644 --- a/cmd/controller-manager/app/serve.go +++ b/cmd/controller-manager/app/serve.go @@ -24,7 +24,11 @@ import ( "github.com/prometheus/client_golang/prometheus" + genericapifilters "k8s.io/apiserver/pkg/endpoints/filters" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + genericfilters "k8s.io/apiserver/pkg/server/filters" "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/util/configz" ) @@ -47,5 +51,15 @@ func Serve(c *CompletedConfig, serveFunc serveFunc, stopCh <-chan struct{}) erro configz.InstallHandler(mux) mux.Handle("/metrics", prometheus.Handler()) - return serveFunc(mux, 0, stopCh) + requestContextMapper := apirequest.NewRequestContextMapper() + requestInfoResolver := &apirequest.RequestInfoFactory{} + failedHandler := genericapifilters.Unauthorized(requestContextMapper, legacyscheme.Codecs, false) + + handler := genericapifilters.WithAuthorization(mux, requestContextMapper, c.Authorization.Authorizer, legacyscheme.Codecs) + handler = genericapifilters.WithAuthentication(handler, requestContextMapper, c.Authentication.Authenticator, failedHandler) + handler = genericapifilters.WithRequestInfo(handler, requestInfoResolver, requestContextMapper) + handler = apirequest.WithRequestContext(handler, requestContextMapper) + handler = genericfilters.WithPanicRecovery(handler) + + return serveFunc(handler, 0, stopCh) } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go b/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go index c516a6bba42..04e1ea815e7 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go @@ -131,6 +131,10 @@ func (s *DelegatingAuthenticationOptions) Validate() []error { } func (s *DelegatingAuthenticationOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + fs.StringVar(&s.RemoteKubeConfigFile, "authentication-kubeconfig", s.RemoteKubeConfigFile, ""+ "kubeconfig file pointing at the 'core' kubernetes server with enough rights to create "+ "tokenaccessreviews.authentication.k8s.io.") From 5483ab7679dd055422131fd1c22a18eee39a775e Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Thu, 8 Feb 2018 19:37:08 +0100 Subject: [PATCH 46/53] Update generated files --- cmd/BUILD | 2 +- cmd/cloud-controller-manager/app/BUILD | 9 ++-- cmd/cloud-controller-manager/app/config/BUILD | 23 ++++++++++ .../app/options/BUILD | 3 ++ cmd/controller-manager/app/BUILD | 44 +++++++++++++++++++ cmd/controller-manager/app/options/BUILD | 15 ++++++- cmd/kube-controller-manager/app/BUILD | 8 ++-- cmd/kube-controller-manager/app/config/BUILD | 23 ++++++++++ cmd/kube-controller-manager/app/options/BUILD | 2 + .../k8s.io/apiserver/pkg/server/options/BUILD | 1 + 10 files changed, 117 insertions(+), 13 deletions(-) create mode 100644 cmd/cloud-controller-manager/app/config/BUILD create mode 100644 cmd/controller-manager/app/BUILD create mode 100644 cmd/kube-controller-manager/app/config/BUILD diff --git a/cmd/BUILD b/cmd/BUILD index 1d4c2545ec8..3e0beea93d2 100644 --- a/cmd/BUILD +++ b/cmd/BUILD @@ -13,7 +13,7 @@ filegroup( ":package-srcs", "//cmd/clicheck:all-srcs", "//cmd/cloud-controller-manager:all-srcs", - "//cmd/controller-manager/app/options:all-srcs", + "//cmd/controller-manager/app:all-srcs", "//cmd/gendocs:all-srcs", "//cmd/genkubedocs:all-srcs", "//cmd/genman:all-srcs", diff --git a/cmd/cloud-controller-manager/app/BUILD b/cmd/cloud-controller-manager/app/BUILD index 57ed9bf0500..64cae62d6f1 100644 --- a/cmd/cloud-controller-manager/app/BUILD +++ b/cmd/cloud-controller-manager/app/BUILD @@ -10,8 +10,9 @@ go_library( srcs = ["controllermanager.go"], importpath = "k8s.io/kubernetes/cmd/cloud-controller-manager/app", deps = [ + "//cmd/cloud-controller-manager/app/config:go_default_library", "//cmd/cloud-controller-manager/app/options:go_default_library", - "//pkg/api/legacyscheme:go_default_library", + "//cmd/controller-manager/app:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/cloud:go_default_library", @@ -20,17 +21,12 @@ go_library( "//pkg/util/configz:go_default_library", "//pkg/version/verflag:go_default_library", "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/tools/leaderelection:go_default_library", "//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", @@ -48,6 +44,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//cmd/cloud-controller-manager/app/config:all-srcs", "//cmd/cloud-controller-manager/app/options:all-srcs", ], tags = ["automanaged"], diff --git a/cmd/cloud-controller-manager/app/config/BUILD b/cmd/cloud-controller-manager/app/config/BUILD new file mode 100644 index 00000000000..5f4ce185b20 --- /dev/null +++ b/cmd/cloud-controller-manager/app/config/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["config.go"], + importpath = "k8s.io/kubernetes/cmd/cloud-controller-manager/app/config", + visibility = ["//visibility:public"], + deps = ["//cmd/controller-manager/app:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cmd/cloud-controller-manager/app/options/BUILD b/cmd/cloud-controller-manager/app/options/BUILD index 9d9c0bb19cd..7191ee5cbc9 100644 --- a/cmd/cloud-controller-manager/app/options/BUILD +++ b/cmd/cloud-controller-manager/app/options/BUILD @@ -11,12 +11,14 @@ go_library( srcs = ["options.go"], importpath = "k8s.io/kubernetes/cmd/cloud-controller-manager/app/options", deps = [ + "//cmd/cloud-controller-manager/app/config:go_default_library", "//cmd/controller-manager/app/options:go_default_library", "//pkg/client/leaderelectionconfig:go_default_library", "//pkg/features:go_default_library", "//pkg/master/ports:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) @@ -45,5 +47,6 @@ go_test( "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server/options:go_default_library", ], ) diff --git a/cmd/controller-manager/app/BUILD b/cmd/controller-manager/app/BUILD new file mode 100644 index 00000000000..69e01c266c6 --- /dev/null +++ b/cmd/controller-manager/app/BUILD @@ -0,0 +1,44 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "config.go", + "insecure_serving.go", + "serve.go", + ], + importpath = "k8s.io/kubernetes/cmd/controller-manager/app", + visibility = ["//visibility:public"], + deps = [ + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/componentconfig:go_default_library", + "//pkg/util/configz:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/filters:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server/filters:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//cmd/controller-manager/app/options:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cmd/controller-manager/app/options/BUILD b/cmd/controller-manager/app/options/BUILD index 2ad13dea877..b449fb9c297 100644 --- a/cmd/controller-manager/app/options/BUILD +++ b/cmd/controller-manager/app/options/BUILD @@ -2,15 +2,28 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["utils.go"], + srcs = [ + "insecure_serving.go", + "options.go", + ], importpath = "k8s.io/kubernetes/cmd/controller-manager/app/options", visibility = ["//visibility:public"], deps = [ + "//cmd/controller-manager/app:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/componentconfig:go_default_library", "//pkg/client/leaderelectionconfig:go_default_library", "//vendor/github.com/cloudflare/cfssl/helpers:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server/options:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", ], ) diff --git a/cmd/kube-controller-manager/app/BUILD b/cmd/kube-controller-manager/app/BUILD index ba84f354501..7df78fa24c6 100644 --- a/cmd/kube-controller-manager/app/BUILD +++ b/cmd/kube-controller-manager/app/BUILD @@ -25,7 +25,9 @@ go_library( ], importpath = "k8s.io/kubernetes/cmd/kube-controller-manager/app", deps = [ + "//cmd/controller-manager/app:go_default_library", "//cmd/controller-manager/app/options:go_default_library", + "//cmd/kube-controller-manager/app/config:go_default_library", "//cmd/kube-controller-manager/app/options:go_default_library", "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/apps/install:go_default_library", @@ -110,7 +112,6 @@ go_library( "//pkg/volume/util:go_default_library", "//pkg/volume/vsphere_volume:go_default_library", "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", @@ -119,20 +120,16 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/discovery:go_default_library", "//vendor/k8s.io/client-go/discovery/cached:go_default_library", "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/scale:go_default_library", - "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/tools/leaderelection:go_default_library", "//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", - "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/client-go/util/cert:go_default_library", "//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1:go_default_library", "//vendor/k8s.io/metrics/pkg/client/custom_metrics:go_default_library", @@ -150,6 +147,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//cmd/kube-controller-manager/app/config:all-srcs", "//cmd/kube-controller-manager/app/options:all-srcs", ], tags = ["automanaged"], diff --git a/cmd/kube-controller-manager/app/config/BUILD b/cmd/kube-controller-manager/app/config/BUILD new file mode 100644 index 00000000000..a9c61b0abad --- /dev/null +++ b/cmd/kube-controller-manager/app/config/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["config.go"], + importpath = "k8s.io/kubernetes/cmd/kube-controller-manager/app/config", + visibility = ["//visibility:public"], + deps = ["//cmd/controller-manager/app:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cmd/kube-controller-manager/app/options/BUILD b/cmd/kube-controller-manager/app/options/BUILD index cd814a31116..37e9a647f80 100644 --- a/cmd/kube-controller-manager/app/options/BUILD +++ b/cmd/kube-controller-manager/app/options/BUILD @@ -12,6 +12,7 @@ go_library( importpath = "k8s.io/kubernetes/cmd/kube-controller-manager/app/options", deps = [ "//cmd/controller-manager/app/options:go_default_library", + "//cmd/kube-controller-manager/app/config:go_default_library", "//pkg/apis/componentconfig:go_default_library", "//pkg/client/leaderelectionconfig:go_default_library", "//pkg/controller/garbagecollector:go_default_library", @@ -49,5 +50,6 @@ go_test( "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server/options:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/BUILD b/staging/src/k8s.io/apiserver/pkg/server/options/BUILD index ea27ab891fa..07879567272 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/options/BUILD @@ -65,6 +65,7 @@ go_library( "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/util/cert:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", ], ) From 8a98c02afa3f1a9b16170a0fa969a6b1c434269b Mon Sep 17 00:00:00 2001 From: Aleksandra Malinowska Date: Tue, 13 Feb 2018 12:43:24 +0100 Subject: [PATCH 47/53] Increase timeout on waiting on cluster resize in autoscaling tests --- test/e2e/autoscaling/cluster_size_autoscaling.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index ec1dee63a79..f599f789462 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -54,6 +54,7 @@ import ( const ( defaultTimeout = 3 * time.Minute resizeTimeout = 5 * time.Minute + manualResizeTimeout = 6 * time.Minute scaleUpTimeout = 5 * time.Minute scaleUpTriggerTimeout = 2 * time.Minute scaleDownTimeout = 20 * time.Minute @@ -445,7 +446,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { By("Waiting for new node to appear and annotating it") framework.WaitForGroupSize(minMig, int32(minSize+1)) - // Verify, that cluster size is increased + // Verify that cluster size is increased framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) @@ -537,7 +538,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { } setMigSizes(newSizes) framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet, - func(size int) bool { return size >= increasedSize }, scaleUpTimeout, unready)) + func(size int) bool { return size >= increasedSize }, manualResizeTimeout, unready)) By("Some node should be removed") framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet, @@ -1778,7 +1779,7 @@ func manuallyIncreaseClusterSize(f *framework.Framework, originalSizes map[strin return false } - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, checkClusterSize, scaleUpTimeout)) + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, checkClusterSize, manualResizeTimeout)) return increasedSize } From a6c43c6a5ca7cc4449684d5e68d73773be91cd41 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Mon, 29 Jan 2018 11:58:23 +0800 Subject: [PATCH 48/53] pass listener in integration test to prevent port in use flake --- .../apiserver/pkg/server/options/serving.go | 11 +- staging/src/k8s.io/kube-aggregator/main.go | 3 +- .../kube-aggregator/pkg/cmd/server/start.go | 6 +- staging/src/k8s.io/sample-apiserver/main.go | 3 +- .../sample-apiserver/pkg/cmd/server/start.go | 6 +- .../etcd/etcd_storage_path_test.go | 63 +++--- test/integration/examples/apiserver_test.go | 206 +++++++++--------- test/integration/framework/master_utils.go | 23 -- test/integration/tls/ciphers_test.go | 9 +- 9 files changed, 154 insertions(+), 176 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/serving.go b/staging/src/k8s.io/apiserver/pkg/server/options/serving.go index edf2b02f5cf..0e239e78ca5 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/serving.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/serving.go @@ -35,7 +35,8 @@ import ( type SecureServingOptions struct { BindAddress net.IP - BindPort int + // BindPort is ignored when Listener is set, will serve https even with 0. + BindPort int // BindNetwork is the type of network to bind to - defaults to "tcp", accepts "tcp", // "tcp4", and "tcp6". BindNetwork string @@ -160,7 +161,7 @@ func (s *SecureServingOptions) ApplyTo(c *server.Config) error { if s == nil { return nil } - if s.BindPort <= 0 { + if s.BindPort <= 0 && s.Listener == nil { return nil } @@ -171,6 +172,12 @@ func (s *SecureServingOptions) ApplyTo(c *server.Config) error { if err != nil { return fmt.Errorf("failed to create listener: %v", err) } + } else { + if _, ok := s.Listener.Addr().(*net.TCPAddr); !ok { + return fmt.Errorf("failed to parse ip and port from listener") + } + s.BindPort = s.Listener.Addr().(*net.TCPAddr).Port + s.BindAddress = s.Listener.Addr().(*net.TCPAddr).IP } if err := s.applyServingInfoTo(c); err != nil { diff --git a/staging/src/k8s.io/kube-aggregator/main.go b/staging/src/k8s.io/kube-aggregator/main.go index 7c1e88664fe..3be730e47a6 100644 --- a/staging/src/k8s.io/kube-aggregator/main.go +++ b/staging/src/k8s.io/kube-aggregator/main.go @@ -44,7 +44,8 @@ func main() { } stopCh := genericapiserver.SetupSignalHandler() - cmd := server.NewCommandStartAggregator(os.Stdout, os.Stderr, stopCh) + options := server.NewDefaultOptions(os.Stdout, os.Stderr) + cmd := server.NewCommandStartAggregator(options, stopCh) cmd.Flags().AddGoFlagSet(flag.CommandLine) if err := cmd.Execute(); err != nil { glog.Fatal(err) diff --git a/staging/src/k8s.io/kube-aggregator/pkg/cmd/server/start.go b/staging/src/k8s.io/kube-aggregator/pkg/cmd/server/start.go index 83c83aa3bf8..6d3ec440c76 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/cmd/server/start.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/cmd/server/start.go @@ -49,9 +49,9 @@ type AggregatorOptions struct { } // NewCommandStartAggregator provides a CLI handler for 'start master' command -func NewCommandStartAggregator(out, err io.Writer, stopCh <-chan struct{}) *cobra.Command { - o := NewDefaultOptions(out, err) - +// with a default AggregatorOptions. +func NewCommandStartAggregator(defaults *AggregatorOptions, stopCh <-chan struct{}) *cobra.Command { + o := *defaults cmd := &cobra.Command{ Short: "Launch a API aggregator and proxy server", Long: "Launch a API aggregator and proxy server", diff --git a/staging/src/k8s.io/sample-apiserver/main.go b/staging/src/k8s.io/sample-apiserver/main.go index 2c3c5a10d2b..f05327d980a 100644 --- a/staging/src/k8s.io/sample-apiserver/main.go +++ b/staging/src/k8s.io/sample-apiserver/main.go @@ -37,7 +37,8 @@ func main() { } stopCh := genericapiserver.SetupSignalHandler() - cmd := server.NewCommandStartWardleServer(os.Stdout, os.Stderr, stopCh) + options := server.NewWardleServerOptions(os.Stdout, os.Stderr) + cmd := server.NewCommandStartWardleServer(options, stopCh) cmd.Flags().AddGoFlagSet(flag.CommandLine) if err := cmd.Execute(); err != nil { glog.Fatal(err) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go index dbf6022b540..f8715db3478 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go @@ -57,9 +57,9 @@ func NewWardleServerOptions(out, errOut io.Writer) *WardleServerOptions { } // NewCommandStartWardleServer provides a CLI handler for 'start master' command -func NewCommandStartWardleServer(out, errOut io.Writer, stopCh <-chan struct{}) *cobra.Command { - o := NewWardleServerOptions(out, errOut) - +// with a default WardleServerOptions. +func NewCommandStartWardleServer(defaults *WardleServerOptions, stopCh <-chan struct{}) *cobra.Command { + o := *defaults cmd := &cobra.Command{ Short: "Launch a wardle API server", Long: "Launch a wardle API server", diff --git a/test/integration/etcd/etcd_storage_path_test.go b/test/integration/etcd/etcd_storage_path_test.go index 6a5b6998008..16665451dd5 100644 --- a/test/integration/etcd/etcd_storage_path_test.go +++ b/test/integration/etcd/etcd_storage_path_test.go @@ -41,6 +41,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" genericapiserver "k8s.io/apiserver/pkg/server" + genericapiserveroptions "k8s.io/apiserver/pkg/server/options" "k8s.io/apiserver/pkg/storage/storagebackend" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" @@ -704,47 +705,41 @@ func startRealMasterOrDie(t *testing.T, certDir string) (*allClient, clientv3.KV } }() - for { - kubeAPIServerOptions := options.NewServerRunOptions() - kubeAPIServerOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1") - kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir - kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()} - kubeAPIServerOptions.Etcd.DefaultStorageMediaType = runtime.ContentTypeJSON // TODO use protobuf? - kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange - kubeAPIServerOptions.Authorization.Mode = "RBAC" + listener, _, err := genericapiserveroptions.CreateListener("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } - // always get a fresh port in case something claimed the old one - kubePort, err := framework.FindFreeLocalPort() - if err != nil { - t.Fatal(err) - } + kubeAPIServerOptions := options.NewServerRunOptions() - kubeAPIServerOptions.SecureServing.BindPort = kubePort + kubeAPIServerOptions.SecureServing.Listener = listener + kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir + kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()} + kubeAPIServerOptions.Etcd.DefaultStorageMediaType = runtime.ContentTypeJSON // TODO use protobuf? + kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange + kubeAPIServerOptions.Authorization.Mode = "RBAC" - tunneler, proxyTransport, err := app.CreateNodeDialer(kubeAPIServerOptions) - if err != nil { - t.Fatal(err) - } - kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, err := app.CreateKubeAPIServerConfig(kubeAPIServerOptions, tunneler, proxyTransport) - if err != nil { - t.Fatal(err) - } + tunneler, proxyTransport, err := app.CreateNodeDialer(kubeAPIServerOptions) + if err != nil { + t.Fatal(err) + } + kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, err := app.CreateKubeAPIServerConfig(kubeAPIServerOptions, tunneler, proxyTransport) + if err != nil { + t.Fatal(err) + } - kubeAPIServerConfig.ExtraConfig.APIResourceConfigSource = &allResourceSource{} // force enable all resources + kubeAPIServerConfig.ExtraConfig.APIResourceConfigSource = &allResourceSource{} // force enable all resources - kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.EmptyDelegate, sharedInformers, versionedInformers) - if err != nil { - t.Fatal(err) - } + kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.EmptyDelegate, sharedInformers, versionedInformers) + if err != nil { + t.Fatal(err) + } - kubeClientConfigValue.Store(kubeAPIServerConfig.GenericConfig.LoopbackClientConfig) - storageConfigValue.Store(kubeAPIServerOptions.Etcd.StorageConfig) + kubeClientConfigValue.Store(kubeAPIServerConfig.GenericConfig.LoopbackClientConfig) + storageConfigValue.Store(kubeAPIServerOptions.Etcd.StorageConfig) - if err := kubeAPIServer.GenericAPIServer.PrepareRun().Run(wait.NeverStop); err != nil { - t.Log(err) - } - - time.Sleep(time.Second) + if err := kubeAPIServer.GenericAPIServer.PrepareRun().Run(wait.NeverStop); err != nil { + t.Fatal(err) } }() diff --git a/test/integration/examples/apiserver_test.go b/test/integration/examples/apiserver_test.go index 3e20e4af7dd..d24ff797f22 100644 --- a/test/integration/examples/apiserver_test.go +++ b/test/integration/examples/apiserver_test.go @@ -22,9 +22,9 @@ import ( "fmt" "io/ioutil" "net" + "net/http" "os" "path" - "strconv" "sync/atomic" "testing" "time" @@ -34,6 +34,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" genericapiserver "k8s.io/apiserver/pkg/server" + genericapiserveroptions "k8s.io/apiserver/pkg/server/options" client "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -90,52 +91,48 @@ func TestAggregatedAPIServer(t *testing.T) { kubeClientConfigValue := atomic.Value{} go func() { - for { - // always get a fresh port in case something claimed the old one - kubePort, err := framework.FindFreeLocalPort() - if err != nil { - t.Fatal(err) - } + listener, _, err := genericapiserveroptions.CreateListener("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } - kubeAPIServerOptions := options.NewServerRunOptions() - kubeAPIServerOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1") - kubeAPIServerOptions.SecureServing.BindPort = kubePort - kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir - kubeAPIServerOptions.InsecureServing.BindPort = 0 - kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()} - kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange - kubeAPIServerOptions.Authentication.RequestHeader.UsernameHeaders = []string{"X-Remote-User"} - kubeAPIServerOptions.Authentication.RequestHeader.GroupHeaders = []string{"X-Remote-Group"} - kubeAPIServerOptions.Authentication.RequestHeader.ExtraHeaderPrefixes = []string{"X-Remote-Extra-"} - kubeAPIServerOptions.Authentication.RequestHeader.AllowedNames = []string{"kube-aggregator"} - kubeAPIServerOptions.Authentication.RequestHeader.ClientCAFile = proxyCACertFile.Name() - kubeAPIServerOptions.Authentication.ClientCert.ClientCA = clientCACertFile.Name() - kubeAPIServerOptions.Authorization.Mode = "RBAC" + kubeAPIServerOptions := options.NewServerRunOptions() + kubeAPIServerOptions.SecureServing.Listener = listener + kubeAPIServerOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1") + kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir + kubeAPIServerOptions.InsecureServing.BindPort = 0 + kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()} + kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange + kubeAPIServerOptions.Authentication.RequestHeader.UsernameHeaders = []string{"X-Remote-User"} + kubeAPIServerOptions.Authentication.RequestHeader.GroupHeaders = []string{"X-Remote-Group"} + kubeAPIServerOptions.Authentication.RequestHeader.ExtraHeaderPrefixes = []string{"X-Remote-Extra-"} + kubeAPIServerOptions.Authentication.RequestHeader.AllowedNames = []string{"kube-aggregator"} + kubeAPIServerOptions.Authentication.RequestHeader.ClientCAFile = proxyCACertFile.Name() + kubeAPIServerOptions.Authentication.ClientCert.ClientCA = clientCACertFile.Name() + kubeAPIServerOptions.Authorization.Mode = "RBAC" - tunneler, proxyTransport, err := app.CreateNodeDialer(kubeAPIServerOptions) - if err != nil { - t.Fatal(err) - } - kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, err := app.CreateKubeAPIServerConfig(kubeAPIServerOptions, tunneler, proxyTransport) - if err != nil { - t.Fatal(err) - } - // Adjust the loopback config for external use (external server name and CA) - kubeAPIServerClientConfig := rest.CopyConfig(kubeAPIServerConfig.GenericConfig.LoopbackClientConfig) - kubeAPIServerClientConfig.CAFile = path.Join(certDir, "apiserver.crt") - kubeAPIServerClientConfig.CAData = nil - kubeAPIServerClientConfig.ServerName = "" - kubeClientConfigValue.Store(kubeAPIServerClientConfig) + tunneler, proxyTransport, err := app.CreateNodeDialer(kubeAPIServerOptions) + if err != nil { + t.Fatal(err) + } + kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, err := app.CreateKubeAPIServerConfig(kubeAPIServerOptions, tunneler, proxyTransport) + if err != nil { + t.Fatal(err) + } + // Adjust the loopback config for external use (external server name and CA) + kubeAPIServerClientConfig := rest.CopyConfig(kubeAPIServerConfig.GenericConfig.LoopbackClientConfig) + kubeAPIServerClientConfig.CAFile = path.Join(certDir, "apiserver.crt") + kubeAPIServerClientConfig.CAData = nil + kubeAPIServerClientConfig.ServerName = "" + kubeClientConfigValue.Store(kubeAPIServerClientConfig) - kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.EmptyDelegate, sharedInformers, versionedInformers) - if err != nil { - t.Fatal(err) - } + kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.EmptyDelegate, sharedInformers, versionedInformers) + if err != nil { + t.Fatal(err) + } - if err := kubeAPIServer.GenericAPIServer.PrepareRun().Run(wait.NeverStop); err != nil { - t.Log(err) - } - time.Sleep(100 * time.Millisecond) + if err := kubeAPIServer.GenericAPIServer.PrepareRun().Run(wait.NeverStop); err != nil { + t.Fatal(err) } }() @@ -154,9 +151,13 @@ func TestAggregatedAPIServer(t *testing.T) { t.Log(err) return false, nil } - if _, err := kubeClient.Discovery().ServerVersion(); err != nil { + + healthStatus := 0 + kubeClient.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus) + if healthStatus != http.StatusOK { return false, nil } + return true, nil }) if err != nil { @@ -177,32 +178,30 @@ func TestAggregatedAPIServer(t *testing.T) { // start the wardle server to prove we can aggregate it go func() { - for { - // always get a fresh port in case something claimed the old one - wardlePortInt, err := framework.FindFreeLocalPort() - if err != nil { - t.Fatal(err) - } - atomic.StoreInt32(wardlePort, int32(wardlePortInt)) - wardleCmd := sampleserver.NewCommandStartWardleServer(os.Stdout, os.Stderr, stopCh) - wardleCmd.SetArgs([]string{ - "--bind-address", "127.0.0.1", - "--secure-port", strconv.Itoa(wardlePortInt), - "--requestheader-username-headers=X-Remote-User", - "--requestheader-group-headers=X-Remote-Group", - "--requestheader-extra-headers-prefix=X-Remote-Extra-", - "--requestheader-client-ca-file=" + proxyCACertFile.Name(), - "--requestheader-allowed-names=kube-aggregator", - "--authentication-kubeconfig", kubeconfigFile.Name(), - "--authorization-kubeconfig", kubeconfigFile.Name(), - "--etcd-servers", framework.GetEtcdURL(), - "--cert-dir", wardleCertDir, - "--kubeconfig", kubeconfigFile.Name(), - }) - if err := wardleCmd.Execute(); err != nil { - t.Log(err) - } - time.Sleep(100 * time.Millisecond) + listener, port, err := genericapiserveroptions.CreateListener("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + atomic.StoreInt32(wardlePort, int32(port)) + + o := sampleserver.NewWardleServerOptions(os.Stdout, os.Stderr) + o.RecommendedOptions.SecureServing.Listener = listener + o.RecommendedOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1") + wardleCmd := sampleserver.NewCommandStartWardleServer(o, stopCh) + wardleCmd.SetArgs([]string{ + "--requestheader-username-headers=X-Remote-User", + "--requestheader-group-headers=X-Remote-Group", + "--requestheader-extra-headers-prefix=X-Remote-Extra-", + "--requestheader-client-ca-file=" + proxyCACertFile.Name(), + "--requestheader-allowed-names=kube-aggregator", + "--authentication-kubeconfig", kubeconfigFile.Name(), + "--authorization-kubeconfig", kubeconfigFile.Name(), + "--etcd-servers", framework.GetEtcdURL(), + "--cert-dir", wardleCertDir, + "--kubeconfig", kubeconfigFile.Name(), + }) + if err := wardleCmd.Execute(); err != nil { + t.Fatal(err) } }() @@ -220,8 +219,9 @@ func TestAggregatedAPIServer(t *testing.T) { t.Log(err) return false, nil } - if _, err := wardleClient.Discovery().ServerVersion(); err != nil { - t.Log(err) + healthStatus := 0 + wardleClient.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus) + if healthStatus != http.StatusOK { return false, nil } return true, nil @@ -255,30 +255,29 @@ func TestAggregatedAPIServer(t *testing.T) { aggregatorPort := new(int32) go func() { - for { - // always get a fresh port in case something claimed the old one - aggregatorPortInt, err := framework.FindFreeLocalPort() - if err != nil { - t.Fatal(err) - } - atomic.StoreInt32(aggregatorPort, int32(aggregatorPortInt)) - aggregatorCmd := kubeaggregatorserver.NewCommandStartAggregator(os.Stdout, os.Stderr, stopCh) - aggregatorCmd.SetArgs([]string{ - "--bind-address", "127.0.0.1", - "--secure-port", strconv.Itoa(aggregatorPortInt), - "--requestheader-username-headers", "", - "--proxy-client-cert-file", proxyClientCertFile.Name(), - "--proxy-client-key-file", proxyClientKeyFile.Name(), - "--kubeconfig", kubeconfigFile.Name(), - "--authentication-kubeconfig", kubeconfigFile.Name(), - "--authorization-kubeconfig", kubeconfigFile.Name(), - "--etcd-servers", framework.GetEtcdURL(), - "--cert-dir", aggregatorCertDir, - }) - if err := aggregatorCmd.Execute(); err != nil { - t.Log(err) - } - time.Sleep(100 * time.Millisecond) + listener, port, err := genericapiserveroptions.CreateListener("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + atomic.StoreInt32(aggregatorPort, int32(port)) + + o := kubeaggregatorserver.NewDefaultOptions(os.Stdout, os.Stderr) + o.RecommendedOptions.SecureServing.Listener = listener + o.RecommendedOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1") + aggregatorCmd := kubeaggregatorserver.NewCommandStartAggregator(o, stopCh) + aggregatorCmd.SetArgs([]string{ + "--requestheader-username-headers", "", + "--proxy-client-cert-file", proxyClientCertFile.Name(), + "--proxy-client-key-file", proxyClientKeyFile.Name(), + "--kubeconfig", kubeconfigFile.Name(), + "--authentication-kubeconfig", kubeconfigFile.Name(), + "--authorization-kubeconfig", kubeconfigFile.Name(), + "--etcd-servers", framework.GetEtcdURL(), + "--cert-dir", aggregatorCertDir, + }) + + if err := aggregatorCmd.Execute(); err != nil { + t.Fatal(err) } }() @@ -295,7 +294,9 @@ func TestAggregatedAPIServer(t *testing.T) { // this happens if we race the API server for writing the cert return false, nil } - if _, err := aggregatorDiscoveryClient.Discovery().ServerVersion(); err != nil { + healthStatus := 0 + aggregatorDiscoveryClient.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus) + if healthStatus != http.StatusOK { return false, nil } return true, nil @@ -304,7 +305,7 @@ func TestAggregatedAPIServer(t *testing.T) { t.Fatal(err) } - // now we're finally ready to test. These are what's run by defautl now + // now we're finally ready to test. These are what's run by default now testAPIGroupList(t, wardleClient.Discovery().RESTClient()) testAPIGroup(t, wardleClient.Discovery().RESTClient()) testAPIResourceList(t, wardleClient.Discovery().RESTClient()) @@ -342,8 +343,8 @@ func TestAggregatedAPIServer(t *testing.T) { _, err = aggregatorClient.ApiregistrationV1beta1().APIServices().Create(&apiregistrationv1beta1.APIService{ ObjectMeta: metav1.ObjectMeta{Name: "v1."}, Spec: apiregistrationv1beta1.APIServiceSpec{ - // register this as a loca service so it doesn't try to lookup the default kubernetes service - // which will have an unroutable IP address since its fake. + // register this as a local service so it doesn't try to lookup the default kubernetes service + // which will have an unroutable IP address since it's fake. Group: "", Version: "v1", GroupPriorityMinimum: 100, @@ -460,8 +461,3 @@ func testAPIResourceList(t *testing.T, client rest.Interface) { assert.Equal(t, "flunders", apiResourceList.APIResources[1].Name) assert.True(t, apiResourceList.APIResources[1].Namespaced) } - -const ( - policyCachePollInterval = 100 * time.Millisecond - policyCachePollTimeout = 5 * time.Second -) diff --git a/test/integration/framework/master_utils.go b/test/integration/framework/master_utils.go index 27c9ef5cc2f..719b1e4f723 100644 --- a/test/integration/framework/master_utils.go +++ b/test/integration/framework/master_utils.go @@ -21,7 +21,6 @@ import ( "net/http" "net/http/httptest" "path" - "strconv" "time" "github.com/go-openapi/spec" @@ -328,28 +327,6 @@ func RunAMasterUsingServer(masterConfig *master.Config, s *httptest.Server, mast return startMasterOrDie(masterConfig, s, masterReceiver) } -// FindFreeLocalPort returns the number of an available port number on -// the loopback interface. Useful for determining the port to launch -// a server on. Error handling required - there is a non-zero chance -// that the returned port number will be bound by another process -// after this function returns. -func FindFreeLocalPort() (int, error) { - l, err := net.Listen("tcp", ":0") - if err != nil { - return 0, err - } - defer l.Close() - _, portStr, err := net.SplitHostPort(l.Addr().String()) - if err != nil { - return 0, err - } - port, err := strconv.Atoi(portStr) - if err != nil { - return 0, err - } - return port, nil -} - // SharedEtcd creates a storage config for a shared etcd instance, with a unique prefix. func SharedEtcd() *storagebackend.Config { cfg := storagebackend.NewDefaultConfig(path.Join(uuid.New(), "registry"), nil) diff --git a/test/integration/tls/ciphers_test.go b/test/integration/tls/ciphers_test.go index 3945711011d..8d4ab5fea0f 100644 --- a/test/integration/tls/ciphers_test.go +++ b/test/integration/tls/ciphers_test.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" genericapiserver "k8s.io/apiserver/pkg/server" + genericapiserveroptions "k8s.io/apiserver/pkg/server/options" client "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/kubernetes/cmd/kube-apiserver/app" @@ -44,17 +45,17 @@ func runBasicSecureAPIServer(t *testing.T, ciphers []string) (uint32, error) { var kubePort uint32 go func() { - // always get a fresh port in case something claimed the old one - freePort, err := framework.FindFreeLocalPort() + listener, port, err := genericapiserveroptions.CreateListener("tcp", "127.0.0.1:0") if err != nil { t.Fatal(err) } - atomic.StoreUint32(&kubePort, uint32(freePort)) + atomic.StoreUint32(&kubePort, uint32(port)) kubeAPIServerOptions := options.NewServerRunOptions() kubeAPIServerOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1") - kubeAPIServerOptions.SecureServing.BindPort = freePort + kubeAPIServerOptions.SecureServing.BindPort = port + kubeAPIServerOptions.SecureServing.Listener = listener kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir kubeAPIServerOptions.SecureServing.CipherSuites = ciphers kubeAPIServerOptions.InsecureServing.BindPort = 0 From 468b8bf021cc6488ce16cfad1f471c47021ce78d Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Mon, 29 Jan 2018 11:58:47 +0800 Subject: [PATCH 49/53] run update bazel --- test/integration/etcd/BUILD | 1 + test/integration/examples/BUILD | 1 + test/integration/tls/BUILD | 1 + 3 files changed, 3 insertions(+) diff --git a/test/integration/etcd/BUILD b/test/integration/etcd/BUILD index ff5d3e24b43..7095ac27c45 100644 --- a/test/integration/etcd/BUILD +++ b/test/integration/etcd/BUILD @@ -36,6 +36,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/server:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server/options:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", diff --git a/test/integration/examples/BUILD b/test/integration/examples/BUILD index f8a18904b5b..595750032ce 100644 --- a/test/integration/examples/BUILD +++ b/test/integration/examples/BUILD @@ -22,6 +22,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/server:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server/options:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", diff --git a/test/integration/tls/BUILD b/test/integration/tls/BUILD index b5f331bedef..db8c1108a6d 100644 --- a/test/integration/tls/BUILD +++ b/test/integration/tls/BUILD @@ -15,6 +15,7 @@ go_test( "//test/integration/framework:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/server:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server/options:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", ], From c1643e5871130fd6477125c1305537df09ad847c Mon Sep 17 00:00:00 2001 From: talk2vino Date: Fri, 9 Feb 2018 22:55:26 +0530 Subject: [PATCH 50/53] libffi-dev dependency added in fluent-es-image Dockerfile to solve the docker build error --- .../addons/fluentd-elasticsearch/fluentd-es-image/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Dockerfile b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Dockerfile index 25c8a120271..ccf2d3812e1 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Dockerfile +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Dockerfile @@ -31,7 +31,7 @@ COPY Gemfile /Gemfile # 2. Install fluentd via ruby. # 3. Remove build dependencies. # 4. Cleanup leftover caches & files. -RUN BUILD_DEPS="make gcc g++ libc6-dev ruby-dev" \ +RUN BUILD_DEPS="make gcc g++ libc6-dev ruby-dev libffi-dev" \ && clean-install $BUILD_DEPS \ ca-certificates \ libjemalloc1 \ From 139c62c3e9c6b7f5553cc7df62bcf7a6bc230e23 Mon Sep 17 00:00:00 2001 From: Shawn Hsiao Date: Tue, 13 Feb 2018 12:10:02 -0500 Subject: [PATCH 51/53] kubectl port-forward allows using resource name to select a matching pod --- pkg/kubectl/cmd/portforward.go | 71 +++++++++++++++++++++-------- pkg/kubectl/cmd/portforward_test.go | 5 +- 2 files changed, 54 insertions(+), 22 deletions(-) diff --git a/pkg/kubectl/cmd/portforward.go b/pkg/kubectl/cmd/portforward.go index cf39bf637ab..9435ea412c2 100644 --- a/pkg/kubectl/cmd/portforward.go +++ b/pkg/kubectl/cmd/portforward.go @@ -23,6 +23,7 @@ import ( "net/url" "os" "os/signal" + "time" "github.com/spf13/cobra" @@ -51,15 +52,32 @@ type PortForwardOptions struct { } var ( + portforwardLong = templates.LongDesc(i18n.T(` + Forward one or more local ports to a pod. + + Use resource type/name such as deployment/mydeployment to select a pod. Resource type defaults to 'pod' if omitted. + + If there are multiple pods matching the criteria, a pod will be selected automatically. The + forwarding session ends when the selected pod terminates, and rerun of the command is needed + to resume forwarding.`)) + portforwardExample = templates.Examples(i18n.T(` # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod - kubectl port-forward mypod 5000 6000 + kubectl port-forward pod/mypod 5000 6000 + + # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in a pod selected by the deployment + kubectl port-forward deployment/mydeployment 5000 6000 # Listen on port 8888 locally, forwarding to 5000 in the pod - kubectl port-forward mypod 8888:5000 + kubectl port-forward pod/mypod 8888:5000 # Listen on a random port locally, forwarding to 5000 in the pod - kubectl port-forward mypod :5000`)) + kubectl port-forward pod/mypod :5000`)) +) + +const ( + // Amount of time to wait until at least one pod is running + defaultPodPortForwardWaitTimeout = 60 * time.Second ) func NewCmdPortForward(f cmdutil.Factory, cmdOut, cmdErr io.Writer) *cobra.Command { @@ -70,10 +88,10 @@ func NewCmdPortForward(f cmdutil.Factory, cmdOut, cmdErr io.Writer) *cobra.Comma }, } cmd := &cobra.Command{ - Use: "port-forward POD [LOCAL_PORT:]REMOTE_PORT [...[LOCAL_PORT_N:]REMOTE_PORT_N]", + Use: "port-forward TYPE/NAME [LOCAL_PORT:]REMOTE_PORT [...[LOCAL_PORT_N:]REMOTE_PORT_N]", DisableFlagsInUseLine: true, Short: i18n.T("Forward one or more local ports to a pod"), - Long: "Forward one or more local ports to a pod.", + Long: portforwardLong, Example: portforwardExample, Run: func(cmd *cobra.Command, args []string) { if err := opts.Complete(f, cmd, args); err != nil { @@ -87,7 +105,7 @@ func NewCmdPortForward(f cmdutil.Factory, cmdOut, cmdErr io.Writer) *cobra.Comma } }, } - cmd.Flags().StringP("pod", "p", "", "Pod name") + cmdutil.AddPodRunningTimeoutFlag(cmd, defaultPodPortForwardWaitTimeout) // TODO support UID return cmd } @@ -116,17 +134,8 @@ func (f *defaultPortForwarder) ForwardPorts(method string, url *url.URL, opts Po // Complete completes all the required options for port-forward cmd. func (o *PortForwardOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { var err error - o.PodName = cmdutil.GetFlagString(cmd, "pod") - if len(o.PodName) == 0 && len(args) == 0 { - return cmdutil.UsageErrorf(cmd, "POD is required for port-forward") - } - - if len(o.PodName) != 0 { - printDeprecationWarning("port-forward POD", "-p POD") - o.Ports = args - } else { - o.PodName = args[0] - o.Ports = args[1:] + if len(args) < 2 { + return cmdutil.UsageErrorf(cmd, "TYPE/NAME and list of ports are required for port-forward") } o.Namespace, _, err = f.DefaultNamespace() @@ -134,6 +143,32 @@ func (o *PortForwardOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, arg return err } + builder := f.NewBuilder(). + Internal(). + ContinueOnError(). + NamespaceParam(o.Namespace).DefaultNamespace() + + getPodTimeout, err := cmdutil.GetPodRunningTimeoutFlag(cmd) + if err != nil { + return cmdutil.UsageErrorf(cmd, err.Error()) + } + + resourceName := args[0] + builder.ResourceNames("pods", resourceName) + + obj, err := builder.Do().Object() + if err != nil { + return err + } + + forwardablePod, err := f.AttachablePodForObject(obj, getPodTimeout) + if err != nil { + return err + } + + o.PodName = forwardablePod.Name + o.Ports = args[1:] + clientset, err := f.ClientSet() if err != nil { return err @@ -157,7 +192,7 @@ func (o *PortForwardOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, arg // Validate validates all the required options for port-forward cmd. func (o PortForwardOptions) Validate() error { if len(o.PodName) == 0 { - return fmt.Errorf("pod name must be specified") + return fmt.Errorf("pod name or resource type/name must be specified") } if len(o.Ports) < 1 { diff --git a/pkg/kubectl/cmd/portforward_test.go b/pkg/kubectl/cmd/portforward_test.go index e2437bcb620..aac3fc0b20d 100644 --- a/pkg/kubectl/cmd/portforward_test.go +++ b/pkg/kubectl/cmd/portforward_test.go @@ -70,6 +70,7 @@ func testPortForward(t *testing.T, flags map[string]string, args []string) { var err error f, tf, codec, ns := cmdtesting.NewAPIFactory() tf.Client = &fake.RESTClient{ + VersionedAPIPath: "/api/v1", GroupVersion: schema.GroupVersion{Group: ""}, NegotiatedSerializer: ns, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { @@ -131,7 +132,3 @@ func testPortForward(t *testing.T, flags map[string]string, args []string) { func TestPortForward(t *testing.T) { testPortForward(t, nil, []string{"foo", ":5000", ":1000"}) } - -func TestPortForwardWithPFlag(t *testing.T) { - testPortForward(t, map[string]string{"pod": "foo"}, []string{":5000", ":1000"}) -} From 377bff614d14cca20b4cadaa131c402f849e6082 Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Tue, 13 Feb 2018 10:06:27 -0800 Subject: [PATCH 52/53] add reviewers to util/mount --- pkg/util/mount/OWNERS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/util/mount/OWNERS b/pkg/util/mount/OWNERS index 0c7ea81089b..8b4ff6218ea 100644 --- a/pkg/util/mount/OWNERS +++ b/pkg/util/mount/OWNERS @@ -1,6 +1,8 @@ reviewers: - jingxu97 - saad-ali + - jsafrane + - msau42 approvers: - jingxu97 - saad-ali From 9238f38400ccf513fda9c728e1d41841df7e2c77 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Fri, 9 Feb 2018 17:59:02 -0500 Subject: [PATCH 53/53] local-up-cluster.sh should be conformant out-of-the-box rename ALLOW_SECURITY_CONTEXT to DENY_SECURITY_CONTEXT_ADMISSION to be in line with the other admission plugins (like PSP_ADMISSION). Make sure by default, this plugin is not enabled as well. --- hack/local-up-cluster.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index bd5417b7dfa..7b9ad89aad0 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -23,7 +23,7 @@ DOCKER_OPTS=${DOCKER_OPTS:-""} DOCKER=(docker ${DOCKER_OPTS}) DOCKERIZE_KUBELET=${DOCKERIZE_KUBELET:-""} ALLOW_PRIVILEGED=${ALLOW_PRIVILEGED:-""} -ALLOW_SECURITY_CONTEXT=${ALLOW_SECURITY_CONTEXT:-""} +DENY_SECURITY_CONTEXT_ADMISSION=${DENY_SECURITY_CONTEXT_ADMISSION:-""} PSP_ADMISSION=${PSP_ADMISSION:-""} NODE_ADMISSION=${NODE_ADMISSION:-""} RUNTIME_CONFIG=${RUNTIME_CONFIG:-""} @@ -418,7 +418,7 @@ function set_service_accounts { function start_apiserver { security_admission="" - if [[ -z "${ALLOW_SECURITY_CONTEXT}" ]]; then + if [[ -n "${DENY_SECURITY_CONTEXT_ADMISSION}" ]]; then security_admission=",SecurityContextDeny" fi if [[ -n "${PSP_ADMISSION}" ]]; then